input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>io_scene_xray/level/exp.py
# standart modules
import os
import math
import struct
# blender modules
import bpy
import bmesh
import mathutils
# addon modules
from . import fmt
from .. import text
from .. import utils
from .. import log
from .. import version_utils
from .. import xray_io
from .. import ogf
class VertexBuffer(object):
def __init__(self):
self.vertex_count = 0
self.position = bytearray()
self.normal = bytearray()
self.tangent = bytearray()
self.binormal = bytearray()
self.color_hemi = bytearray()
self.color_light = bytearray()
self.color_sun = bytearray()
self.uv = bytearray()
self.uv_fix = bytearray()
self.uv_lmap = bytearray()
self.shader_data = bytearray()
self.vertex_format = None
TWO_MEGABYTES = 1024 * 1024 * 2
class Visual(object):
def __init__(self):
self.shader_index = None
class VisualsCache:
def __init__(self):
self.bounds = {}
self.children = {}
self._find_children()
def _find_children(self):
for obj in bpy.data.objects:
self.children[obj.name] = []
for child_obj in bpy.data.objects:
parent = child_obj.parent
if parent:
self.children[parent.name].append(child_obj.name)
class Level(object):
def __init__(self):
self.materials = {}
self.visuals = []
self.active_material_index = 0
self.vbs_offsets = []
self.ibs_offsets = []
self.fp_vbs_offsets = []
self.fp_ibs_offsets = []
self.saved_visuals = {}
self.sectors_indices = {}
self.visuals_bbox = {}
self.visuals_center = {}
self.visuals_radius = {}
self.visuals_cache = VisualsCache()
self.cform_objects = {}
def write_level_geom_swis():
packed_writer = xray_io.PackedWriter()
# TODO: export swis data
packed_writer.putf('<I', 0) # swis count
return packed_writer
def write_level_geom_ib(ibs):
packed_writer = xray_io.PackedWriter()
packed_writer.putf('<I', len(ibs)) # indices buffers count
for ib in ibs:
indices_count = len(ib) // 2 # index size = 2 byte
packed_writer.putf('<I', indices_count) # indices count
for index in range(0, indices_count, 3):
packed_writer.data.extend(ib[index * 2 : index * 2 + 2])
packed_writer.data.extend(ib[index * 2 + 4 : index * 2 + 6])
packed_writer.data.extend(ib[index * 2 + 2 : index * 2 + 4])
return packed_writer
def write_level_geom_vb(vbs):
packed_writer = xray_io.PackedWriter()
packed_writer.putf('<I', len(vbs)) # vertex buffers count
for vb in vbs:
if vb.vertex_format == 'NORMAL':
offsets = (0, 12, 16, 20, 24, 28) # normal visual vertex buffer offsets
usage_indices = (0, 0, 0, 0, 0, 1)
vertex_type = fmt.VERTEX_TYPE_BRUSH_14
elif vb.vertex_format == 'TREE':
offsets = (0, 12, 16, 20, 24)
usage_indices = (0, 0, 0, 0, 0)
vertex_type = fmt.VERTEX_TYPE_TREE
elif vb.vertex_format == 'COLOR':
offsets = (0, 12, 16, 20, 24, 28)
usage_indices = (0, 0, 0, 0, 0, 0)
vertex_type = fmt.VERTEX_TYPE_COLOR_14
elif vb.vertex_format == 'FASTPATH':
offsets = (0, )
usage_indices = (0, )
vertex_type = fmt.VERTEX_TYPE_FASTPATH
else:
raise BaseException('Unknown VB format:', vb.vertex_format)
for index, (usage, type_) in enumerate(vertex_type):
packed_writer.putf('<H', 0) # stream
packed_writer.putf('<H', offsets[index]) # offset
packed_writer.putf('<B', type_) # type
packed_writer.putf('<B', 0) # method
packed_writer.putf('<B', usage) # usage
packed_writer.putf('<B', usage_indices[index]) # usage_index
packed_writer.putf('<H', 255) # stream
packed_writer.putf('<H', 0) # offset
packed_writer.putf('<B', 17) # type UNUSED
packed_writer.putf('<B', 0) # method
packed_writer.putf('<B', 0) # usage
packed_writer.putf('<B', 0) # usage_index
packed_writer.putf('<I', vb.vertex_count) # vertices count
if vb.vertex_format == 'NORMAL':
for vertex_index in range(vb.vertex_count):
vertex_pos = vb.position[vertex_index * 12 : vertex_index * 12 + 12]
packed_writer.data.extend(vertex_pos)
packed_writer.putf(
'<4B',
vb.normal[vertex_index * 3],
vb.normal[vertex_index * 3 + 1],
vb.normal[vertex_index * 3 + 2],
vb.color_hemi[vertex_index]
) # normal, hemi
uv_fix = vb.uv_fix[vertex_index * 2 : vertex_index * 2 + 2]
# tangent
packed_writer.putf(
'<4B',
vb.tangent[vertex_index * 3],
vb.tangent[vertex_index * 3 + 1],
vb.tangent[vertex_index * 3 + 2],
uv_fix[0]
)
# binormal
packed_writer.putf(
'<4B',
vb.binormal[vertex_index * 3],
vb.binormal[vertex_index * 3 + 1],
vb.binormal[vertex_index * 3 + 2],
uv_fix[1]
)
# texture coordinate
packed_writer.data.extend(vb.uv[vertex_index * 4 : vertex_index * 4 + 4])
# light map texture coordinate
packed_writer.data.extend(vb.uv_lmap[vertex_index * 4 : vertex_index * 4 + 4])
elif vb.vertex_format == 'TREE':
for vertex_index in range(vb.vertex_count):
vertex_pos = vb.position[vertex_index * 12 : vertex_index * 12 + 12]
packed_writer.data.extend(vertex_pos)
packed_writer.putf(
'<4B',
vb.normal[vertex_index * 3],
vb.normal[vertex_index * 3 + 1],
vb.normal[vertex_index * 3 + 2],
vb.color_hemi[vertex_index]
) # normal, hemi
uv_fix = vb.uv_fix[vertex_index * 2 : vertex_index * 2 + 2]
# tangent
packed_writer.putf(
'<4B',
vb.tangent[vertex_index * 3],
vb.tangent[vertex_index * 3 + 1],
vb.tangent[vertex_index * 3 + 2],
uv_fix[0]
)
# binormal
packed_writer.putf(
'<4B',
vb.binormal[vertex_index * 3],
vb.binormal[vertex_index * 3 + 1],
vb.binormal[vertex_index * 3 + 2],
uv_fix[1]
)
# texture coordinate
packed_writer.data.extend(vb.uv[vertex_index * 4 : vertex_index * 4 + 4])
# tree shader data (wind coefficient and unused 2 bytes)
frac = vb.shader_data[vertex_index * 2 : vertex_index * 2 + 2]
frac.extend((0, 0))
packed_writer.data.extend(frac)
elif vb.vertex_format == 'COLOR':
for vertex_index in range(vb.vertex_count):
vertex_pos = vb.position[vertex_index * 12 : vertex_index * 12 + 12]
packed_writer.data.extend(vertex_pos)
packed_writer.putf(
'<4B',
vb.normal[vertex_index * 3],
vb.normal[vertex_index * 3 + 1],
vb.normal[vertex_index * 3 + 2],
vb.color_hemi[vertex_index]
) # normal, hemi
uv_fix = vb.uv_fix[vertex_index * 2 : vertex_index * 2 + 2]
# tangent
packed_writer.putf(
'<4B',
vb.tangent[vertex_index * 3],
vb.tangent[vertex_index * 3 + 1],
vb.tangent[vertex_index * 3 + 2],
uv_fix[0]
)
# binormal
packed_writer.putf(
'<4B',
vb.binormal[vertex_index * 3],
vb.binormal[vertex_index * 3 + 1],
vb.binormal[vertex_index * 3 + 2],
uv_fix[1]
)
# vertex color
packed_writer.putf(
'<4B',
vb.color_light[vertex_index * 3],
vb.color_light[vertex_index * 3 + 1],
vb.color_light[vertex_index * 3 + 2],
vb.color_sun[vertex_index]
)
# texture coordinate
packed_writer.data.extend(vb.uv[vertex_index * 4 : vertex_index * 4 + 4])
elif vb.vertex_format == 'FASTPATH':
for vertex_index in range(vb.vertex_count):
vertex_pos = vb.position[vertex_index * 12 : vertex_index * 12 + 12]
packed_writer.data.extend(vertex_pos)
return packed_writer
def write_level_geom(chunked_writer, vbs, ibs):
header_packed_writer = write_header()
chunked_writer.put(fmt.HEADER, header_packed_writer)
del header_packed_writer
vb_packed_writer = write_level_geom_vb(vbs)
chunked_writer.put(fmt.Chunks13.VB, vb_packed_writer)
del vb_packed_writer
ib_packed_writer = write_level_geom_ib(ibs)
chunked_writer.put(fmt.Chunks13.IB, ib_packed_writer)
del ib_packed_writer
swis_packed_writer = write_level_geom_swis()
chunked_writer.put(fmt.Chunks13.SWIS, swis_packed_writer)
del swis_packed_writer
def write_sector_root(root_index):
packed_writer = xray_io.PackedWriter()
packed_writer.putf('<I', root_index)
return packed_writer
def write_sector_portals(sectors_map, sector_name):
packed_writer = xray_io.PackedWriter()
# None - when there are no sectors
if sectors_map.get(sector_name, None):
for portal in sectors_map[sector_name]:
packed_writer.putf('<H', portal)
return packed_writer
def write_sector(root_index, sectors_map, sector_name):
chunked_writer = xray_io.ChunkedWriter()
sector_portals_writer = write_sector_portals(sectors_map, sector_name)
chunked_writer.put(fmt.SectorChunks.PORTALS, sector_portals_writer) # portals
sector_root_writer = write_sector_root(root_index)
chunked_writer.put(fmt.SectorChunks.ROOT, sector_root_writer) # root
return chunked_writer
def get_light_map_image(material, lmap_prop):
lmap_image_name = getattr(material.xray, lmap_prop, None)
if lmap_image_name:
lmap_image = bpy.data.images.get(lmap_image_name, None)
if not lmap_image:
raise utils.AppError(
text.error.level_no_lmap,
log.props(
light_map=lmap_image_name,
material=material.name
)
)
image_path = lmap_image.filepath
image_name = os.path.basename(image_path)
base_name, ext = os.path.splitext(image_name)
if ext != '.dds':
raise utils.AppError(
text.error.level_lmap_no_dds,
log.props(
image=lmap_image.name,
path=lmap_image.filepath,
extension=ext
)
)
lmap_name = base_name
else:
lmap_image = None
lmap_name = None
return lmap_image, lmap_name
def write_shaders(level):
texture_folder = version_utils.get_preferences().textures_folder_auto
materials = {}
for material, shader_index in level.materials.items():
materials[shader_index] = material
materials_count = len(materials)
packed_writer = xray_io.PackedWriter()
packed_writer.putf('<I', materials_count + 1) # shaders count
packed_writer.puts('') # first empty shader
for shader_index in range(materials_count):
material = materials[shader_index]
images = []
if version_utils.IS_28:
if not material.node_tree:
raise utils.AppError(
text.error.mat_not_use_nodes,
log.props(material=material.name)
)
for node in material.node_tree.nodes:
if not node.type in version_utils.IMAGE_NODES:
continue
image = node.image
if not node.image:
continue
images.append(image)
else:
for texture_slot in material.texture_slots:
if not texture_slot:
continue
texture = texture_slot.texture
if not texture:
continue
image = getattr(texture, 'image', None)
if not image:
continue
images.append(image)
images_count = len(images)
if not images_count:
raise utils.AppError(
text.error.mat_no_img,
log.props(material=material.name)
)
elif images_count > 1:
raise utils.AppError(
text.error.mat_many_img,
log.props(material=material.name)
)
else:
image = images[0]
texture_path = utils.gen_texture_name(
image, texture_folder, level_folder=level.source_level_path
)
eshader = material.xray.eshader
lmap_1_image, lmap_1_name = get_light_map_image(material, 'lmap_0')
lmap_2_image, lmap_2_name = get_light_map_image(material, 'lmap_1')
if lmap_1_image and lmap_2_image:
packed_writer.puts('{0}/{1},{2},{3}'.format(
eshader, texture_path, lmap_1_name, lmap_2_name
))
elif lmap_1_image and not lmap_2_image:
lmap_1_path = utils.gen_texture_name(
lmap_1_image,
texture_folder,
level_folder=level.source_level_path
) # terrain\terrain_name_lm.dds file
packed_writer.puts('{0}/{1},{2}'.format(
eshader, texture_path, lmap_1_path
))
else:
packed_writer.puts('{0}/{1}'.format(
eshader, texture_path
))
return packed_writer
def write_visual_bounding_sphere(packed_writer, bpy_obj, center, radius):
packed_writer.putf('<3f', center[0], center[2], center[1]) # center
packed_writer.putf('<f', radius) # radius
def write_visual_bounding_box(packed_writer, bpy_obj, bbox):
bbox_min = bbox[0]
bbox_max = bbox[1]
packed_writer.putf('<3f', bbox_min[0], bbox_min[2], bbox_min[1]) # min
packed_writer.putf('<3f', bbox_max[0], bbox_max[2], bbox_max[1]) # max
def write_visual_header(level, bpy_obj, visual=None, visual_type=0, shader_id=0):
packed_writer = xray_io.PackedWriter()
packed_writer.putf('<B', ogf.fmt.FORMAT_VERSION_4) # format version
packed_writer.putf('<B', visual_type)
if visual:
# +1 - skip first empty shader
packed_writer.putf('<H', visual.shader_index + 1)
else:
packed_writer.putf('<H', shader_id) # shader id
bbox, (center, radius) = ogf.exp.calculate_bbox_and_bsphere(
bpy_obj, apply_transforms=True, cache=level.visuals_cache
)
level.visuals_bbox[bpy_obj.name] = bbox
level.visuals_center[bpy_obj.name] = center
level.visuals_radius[bpy_obj.name] = radius
write_visual_bounding_box(packed_writer, bpy_obj, bbox)
write_visual_bounding_sphere(packed_writer, bpy_obj, center, radius)
return packed_writer
def get_bbox_center(bbox):
bbox_min = bbox[0]
bbox_max = bbox[6]
bbox_center = [
bbox_max[0] - (bbox_max[0] - bbox_min[0]) / 2,
bbox_max[1] - (bbox_max[1] - bbox_min[1]) / 2,
bbox_max[2] - (bbox_max[2] - bbox_min[2]) / 2
]
return bbox_center
def find_distance(vertex_1, vertex_2):
distance = (
(vertex_2[0] - vertex_1[0]) ** 2 + \
(vertex_2[1] - vertex_1[1]) ** 2 + \
(vertex_2[2] - vertex_1[2]) ** 2
) ** (1 / | |
mask
@property
def flow_direction(self):
"""The information necessary to move any variable laterally
(i.e. along Y and/or X) to its nearest receiving neighbour in
the `Grid` given as a `cf.Field` and returned as a processed
`numpy.ndarray`.
:Parameters:
directions: `cf.Field`
The field containing flow direction. The supported kinds
of directional information are listed in the table
below. The shape of the array must be the same as the
Grid, except for the relative kind, where an additional
trailing axis of size two holding the pairs must be
present.
================= =====================================
kind information
================= =====================================
cardinal The field data contains the direction
using `str` for the eight following
cardinal points: 'N' for North, 'NE'
for North-East, 'E' for East,
'SE' for South East, 'S' for South,
'SW' for South West, 'W' for West,
'NW' for North West.
digits The field data contains the direction
using `int` for the eight following
cardinal points: 1 for North, 2 for
North-East, 3 for East, 4 for South
East, 5 for South, 6 for South West,
7 for West, 8 for North West.
relative The field data contains the direction
using pairs of `int` (Y, X) for the
eight following cardinal points:
(1, 0) for North, (1, 1) for
North-East, (0, 1) for East, (-1, 1)
for South East, (-1, 0) for South,
(-1, -1) for South West, (0, -1)
for West, (1, -1) for North West.
================= =====================================
:Returns:
`numpy.ndarray`
The information to route any variable to its destination
in the `Grid` in the relative format (see table above).
If not set, return `None`.
**Examples**
Assigning flow direction to grid using cardinal values:
>>> import numpy
>>> grid = LatLonGrid.from_extent_and_resolution(
... latitude_extent=(51, 55),
... latitude_resolution=1,
... longitude_extent=(-2, 1),
... longitude_resolution=1
... )
>>> print(grid.flow_direction)
None
>>> directions = grid.to_field()
>>> directions.set_data(numpy.array([['SE', 'S', 'E'],
... ['NE', 'E', 'N'],
... ['S', 'S', 'W'],
... ['NW', 'E', 'SW']]))
>>> grid.flow_direction = directions
>>> print(grid.flow_direction)
[[[-1 1]
[-1 0]
[ 0 1]]
<BLANKLINE>
[[ 1 1]
[ 0 1]
[ 1 0]]
<BLANKLINE>
[[-1 0]
[-1 0]
[ 0 -1]]
<BLANKLINE>
[[ 1 -1]
[ 0 1]
[-1 -1]]]
>>> print(grid)
LatLonGrid(
shape {Y, X}: (4, 3)
Y, latitude (4,): [51.5, ..., 54.5] degrees_north
X, longitude (3,): [-1.5, -0.5, 0.5] degrees_east
Y_bounds (4, 2): [[51.0, ..., 55.0]] degrees_north
X_bounds (3, 2): [[-2.0, ..., 1.0]] degrees_east
flow_direction (4, 3, 2): [[[-1, ..., -1]]]
)
Assigning flow direction to grid using digits:
>>> flow_direction = grid.flow_direction
>>> directions.set_data(numpy.array([[4, 5, 3],
... [2, 3, 1],
... [5, 5, 7],
... [8, 3, 6]]))
>>> grid.flow_direction = directions
>>> numpy.array_equal(flow_direction, grid.flow_direction)
True
Assigning flow direction to grid using relative values:
>>> import cf
>>> ax = directions.set_construct(cf.DomainAxis(2))
>>> dim = cf.DimensionCoordinate(
... properties={'name': 'relative flow direction along y and x components',
... 'units': '1'},
... data=cf.Data(['y_rel', 'x_rel'])
... )
>>> dim = directions.set_construct(dim, axes=ax)
>>> directions.set_data(
... numpy.array([[[-1, 1], [-1, 0], [0, 1]],
... [[1, 1], [0, 1], [1, 0]],
... [[-1, 0], [-1, 0], [0, -1]],
... [[1, -1], [0, 1], [-1, -1]]]
... ),
... axes=('Y', 'X', dim)
... )
>>> grid.flow_direction = directions
>>> numpy.array_equal(flow_direction, grid.flow_direction)
True
Assigning masked flow direction to grid:
>>> directions = grid.to_field()
>>> directions.set_data(
... numpy.ma.array(
... [['SE', 'S', 'E'],
... ['NE', 'E', 'N'],
... ['S', 'S', 'W'],
... ['NW', 'E', 'SW']],
... mask=[[1, 0, 0],
... [1, 0, 0],
... [1, 1, 0],
... [0, 0, 0]]
... )
... )
>>> grid.flow_direction = directions
>>> print(grid.flow_direction)
[[[-- --]
[-1 0]
[0 1]]
<BLANKLINE>
[[-- --]
[0 1]
[1 0]]
<BLANKLINE>
[[-- --]
[-- --]
[0 -1]]
<BLANKLINE>
[[1 -1]
[0 1]
[-1 -1]]]
"""
return self._flow_direction
@flow_direction.setter
def flow_direction(self, directions):
error_valid = RuntimeError("flow direction contains invalid data")
error_dim = RuntimeError(
f"flow direction dimensions not compatible "
f"with {self.__class__.__name__}"
)
# check type
if not isinstance(directions, cf.Field):
raise TypeError("flow direction not a cf.Field")
# store given field for config file
self._flow_direction_field = directions
# drop potential size-1 Z axis since flow direction is
# only relevant horizontally
if directions.domain_axis(self.vertical_axis, key=True, default=False):
directions.squeeze(self.vertical_axis, inplace=True)
# check that directions and spacedomain are compatible
grid = self.to_horizontal_grid()
try:
directions = grid.subset_and_compare(directions)
except RuntimeError:
raise error_dim
# get field's data array
directions = directions.array
# determine horizontal-only shape of spacedomain
shp = grid.shape
# initialise info array by extending by one trailing axis of
# size 2 (for relative Y movement, and relative X movement)
# if masked array, use same mask on info
if np.ma.is_masked(directions):
if (shp + (2,)) == directions.shape:
info = np.ma.masked_array(
np.zeros(shp + (2,), int),
mask=directions.mask
)
elif shp == directions.shape:
info = np.ma.masked_array(
np.zeros(shp + (2,), int),
mask=np.tile(directions.mask[..., np.newaxis], 2)
)
else:
raise error_dim
info[~info.mask] = -9
else:
info = np.zeros(shp + (2,), int)
info[:] = -9
# convert directions to relative Y X movement
if directions.dtype == np.dtype('<U2'):
# cardinal
if not directions.shape == shp:
raise error_dim
# strip and capitalise strings
if np.ma.is_masked(directions):
directions[~directions.mask] = np.char.strip(
np.char.upper(directions[~directions.mask])
)
else:
directions = np.char.strip(np.char.upper(directions))
for card, yx_rel in self._routing_cardinal_map.items():
info[directions == card] = yx_rel
elif issubclass(directions.dtype.type, np.integer):
if info.shape == directions.shape:
# relative
if np.amin(directions) < -1 or np.amax(directions) > 1:
raise error_valid
info[:] = directions
elif shp == directions.shape:
# digits
for digit, yx_rel in self._routing_digits_map.items():
info[directions == digit] = yx_rel
else:
raise error_dim
else:
raise error_valid
# check that match found for everywhere in grid
if not np.sum(info == -9) == 0:
raise error_valid
# assign main routing mask
self._flow_direction = info
# find outflow towards outside domain
# to set relative direction special value 9
info_ = np.zeros(shp + (2,), int)
info_[:] = info
if not (self._Y_limits_contiguous
and (self.Y_bounds.array[0, 0] in self._Y_limits)
and (self.Y_bounds.array[-1, -1] in self._Y_limits)):
# northwards on north edge
info_[..., 0, :, 0][info_[..., 0, :, 0] == -1] = 9
# southwards on south edge
info_[..., -1, :, 0][info_[..., -1, :, 0] == 1] = 9
if not (self._X_limits_contiguous
and (self.X_bounds.array[0, 0] in self._X_limits)
and (self.X_bounds.array[-1, -1] in self._X_limits)):
# eastwards on east edge
info_[..., :, -1, 1][info_[..., :, -1, 1] == 1] = 9
# westwards on west edge
info_[..., :, 0, 1][info_[..., :, 0, 1] == -1] = 9
# create mask for location with outflow towards outside domain
to_out = (info_[..., 0] == 9) | (info_[..., 1] == 9)
# find outflow towards masked location
if np.ma.is_masked(directions):
# get absolute destination from relative directions
y, x = to_out.shape[-2:]
abs_dst = np.zeros(info_.shape, dtype=int)
abs_dst[..., 0][~to_out] = (np.arange(y, dtype=int)[:, np.newaxis]
+ info_[..., 0])[~to_out]
abs_dst[..., 1][~to_out] = (np.arange(x, dtype=int)[np.newaxis, :]
+ info_[..., 1])[~to_out]
# avoid IndexError by arbitrarily setting (0, 0) where domain outflow
abs_dst[..., 0][to_out] = 0
abs_dst[..., 1][to_out] = 0
# use destination on mask to determine if towards masked location
to_msk = directions.mask[tuple(abs_dst.T)].T
# eliminate previous arbitrary action that avoided IndexError
to_msk[to_out] = False
# set relative direction to special value 9
# where outflow towards masked location
info_[..., 0][to_msk] = 9
info_[..., 1][to_msk] = 9
else:
to_msk = np.zeros(shp, dtype=bool)
# pre-process some convenience masks out of main routing mask
# to avoid generating them every time *route* method is called
# Y-wards movement
for j in [-1, 0, 1]:
# X-wards movement
for i in [-1, 0, 1]:
# note: special value 9 set previously allows here to
# ignore them in the routing masks
self._routing_masks[(j, i)] = (
(info_[..., 0] == j) & (info_[..., 1] == i)
)
# OUT-wards movement
# (i.e. towards outside domain or towards masked location)
self._routing_out_mask = to_out | to_msk
def route(self, values_to_route):
"""Move the given values from their current location in the
`Grid` to their downstream/downslope location according to the
*flow_direction* property of `Grid`.
:Parameters:
values_to_route: `numpy.ndarray`
The values to route following the flow direction, e.g.
how river discharge to route. The shape of this array
| |
<reponame>HQSquantumsimulations/pyquest
"""Measurement function in PyQuest-cffi"""
# Copyright 2019 HQS Quantum Simulations GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyquest_cffi.questlib import quest, _PYQUEST, tqureg, ffi_quest, qreal, tquestenv, paulihamil
import numpy as np
from typing import Sequence, Union, List, Tuple
from pyquest_cffi import cheat
class calcFidelity(_PYQUEST):
r"""Calculate Fidelity of a quantum register
Determine the fidelity of a qureg (wavefunction :math:`\left| \psi \right\rangle`
or density matrix :math:`\rho`)
with respect to a reference_qureg of a wavefunction :math:`\left| \psi_{ref} \right\rangle`
Fidelity is defined as:
.. math::
\mathcal{F} &= \left\langle \psi | \psi_{ref} \right \rangle \\
\mathcal{F} &= \left\langle \psi_{ref}| \rho | \psi_{ref} \right \rangle
Args:
qureg: a qureg containing a wavefunction or a density matrix
qureg_reference: a qureg containing a wavefunction
readout: the readout register for static compilation
"""
def call_interactive(self, qureg: tqureg, qureg_reference: tqureg) -> float:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a wavefunction or a density matrix
qureg_reference: a qureg containing a wavefunction
Returns:
float
Raises:
RuntimeError: Reference qureg has to be a wavefunction qureg
but density matrix qureg was used
"""
if not qureg_reference.isDensityMatrix:
return quest.calcFidelity(qureg, qureg_reference)
else:
raise RuntimeError("Reference qureg has to be a wavefunction qureg but "
+ "density matrix qureg was used")
class calcInnerProduct(_PYQUEST):
r"""Calculate the inner-product/overlap of two wavefunction quregs
Inner product defined as:
.. math::
\left\langle \psi_{qureg1} | \psi_{qureg2} \right \rangle
Args:
qureg1: a qureg containing a wavefunction
qureg2: a qureg containing a wavefunction
readout: the readout register for static compilation
"""
def call_interactive(self, qureg1: tqureg, qureg2: tqureg) -> float:
r"""Interactive call of PyQuest-cffi
Args:
qureg1: a qureg containing a wavefunction
qureg2: a qureg containing a wavefunction
Returns:
float
Raises:
RuntimeError: Qureg1 has to be a wavefunction qureg but density matrix qureg was used
RuntimeError: Qureg2 has to be a wavefunction qureg but density matrix qureg was used
"""
if qureg1.isDensityMatrix:
raise RuntimeError("Qureg1 has to be a wavefunction qureg but "
+ "density matrix qureg was used")
elif qureg2.isDensityMatrix:
raise RuntimeError("Qureg2 has to be a wavefunction qureg but "
+ "density matrix qureg was used")
return quest.calcInnerProduct(qureg1, qureg2)
class calcProbOfOutcome(_PYQUEST):
r"""Calculate the probability that qubit #qubit of qureg is measured in state outcome
Args:
qureg: a qureg containing a wavefunction or density matrix
qubit: the index of the qubit for which the probability is determined
outcome: the outcome of the measurement
readout: the readout register for static compilation
"""
def call_interactive(self, qureg: tqureg, qubit: int, outcome: int) -> float:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a wavefunction or density matrix
qubit: the index of the qubit for which the probability is determined
outcome: the outcome of the measurement
Returns:
float
"""
return quest.calcProbOfOutcome(qureg, qubit, outcome)
class calcPurity(_PYQUEST):
r"""Calculate the purity of a density matrix in qureg
Purity defined as:
.. math::
\mathcal{Tr}\left(\rho^2\right)
Args:
qureg: a qureg containing a density matrix
readout: the readout register for static compilation
"""
def call_interactive(self, qureg: tqureg) -> float:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a density matrix
Returns:
float
Raises:
RuntimeError: Qureg has to be a density matrix qureg but wavefunction qureg was used
"""
if qureg.isDensityMatrix:
return quest.calcPurity(qureg)
else:
raise RuntimeError("Qureg has to be a density matrix qureg but "
+ "wavefunction qureg was used")
class calcTotalProb(_PYQUEST):
r"""Calculate total probability
Check physicality of system by calculating probability of system to be in any state.
In other words check that trace of density matrix or norm of state vector is one.
Args:
qureg: a qureg containing a density matrix or wavefunction
readout: the readout register for static compilation
"""
def call_interactive(self, qureg: tqureg) -> float:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a density matrix or wavefunction
Returns:
float
"""
return quest.calcTotalProb(qureg)
class getStateVectoratIndex(_PYQUEST):
r"""Get the value of a wavefunction/state vector in qureg at index
Args:
qureg: a qureg containing a wavefunction
index: The index either as an int or as a sequence
of 0 and 1 referencing the corresponding basis state
readout: the readout register for static compilation
"""
def call_interactive(self, qureg: tqureg, index: Union[int, Sequence[int]]) -> float:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a wavefunction
index: The index either as an int or as a sequence
of 0 and 1 referencing the corresponding basis state
Returns:
float
Raises:
RuntimeError: Qureg has to be a wavefunction qureg but density matrix qureg was used
"""
if hasattr(index, '__len__'):
index = basis_state_to_index(index)
if qureg.isDensityMatrix:
raise RuntimeError("Qureg has to be a wavefunction qureg but "
+ "density matrix qureg was used")
cComplex = quest.getAmp(qureg, index)
return cComplex.real + 1j * cComplex.imag
getAmp = getStateVectoratIndex
class getDensityMatrixatRowColumn(_PYQUEST):
r"""Get the value of the density matrix in qureg at row and column
Args:
qureg: a qureg containing a density matrix
row: The row index either as an int of as a sequence
of 0 and 1 referencing the corresponding basis state
column: The column index either as an int of as a sequence
of 0 and 1 referencing the corresponding basis state
readout: The readout register for static compilation
"""
def call_interactive(self, qureg: tqureg,
row: Union[int, Sequence[int]],
column: Union[int, Sequence[int]]) -> float:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a density matrix
row: The row index either as an int of as a sequence
of 0 and 1 referencing the corresponding basis state
column: The column index either as an int of as a sequence
of 0 and 1 referencing the corresponding basis state
Returns:
float
Raises:
RuntimeError: Qureg has to be a density matrix qureg but wavefunction qureg was used
"""
if hasattr(row, '__len__'):
row = basis_state_to_index(row)
if hasattr(column, '__len__'):
column = basis_state_to_index(column)
if qureg.isDensityMatrix:
cComplex = quest.getDensityAmp(qureg, row, column)
return cComplex.real + 1j * cComplex.imag
else:
raise RuntimeError("Qureg has to be a density matrix qureg but "
+ "wavefunction qureg was used")
getDensityAmp = getDensityMatrixatRowColumn
class getAbsoluteValSquaredatIndex(_PYQUEST):
r"""Get the absulute value squared of a wavefunction/state vector in a quantum register at index
Args:
qureg: a qureg containing a wavefunction
index: The index either as an int or as a sequence
of 0 and 1 referencing the corresponding basis state
"""
def call_interactive(self, qureg: tqureg, index: Union[int, Sequence[int]]) -> float:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a wavefunction
index: The index either as an int or as a sequence
of 0 and 1 referencing the corresponding basis state
Returns:
float
Raises:
RuntimeError: Qureg has to be a wavefunction qureg but density matrix qureg was used
"""
if hasattr(index, '__len__'):
index = basis_state_to_index(index)
if qureg.isDensityMatrix:
raise RuntimeError("Qureg has to be a wavefunction qureg but "
+ "density matrix qureg was used")
return quest.getProbAmp(qureg, index)
getProbAmp = getAbsoluteValSquaredatIndex
class getRealAmp(_PYQUEST):
r"""Get the real value of a wavefunction/state vector in qureg at index
Args:
qureg: a qureg containing a wavefunction
index: The index either as an int of as a sequence
of 0 and 1 referencing the corresponding basis state
readout: The readout register for static compilation
"""
def call_interactive(self, qureg: tqureg, index: Union[int, Sequence[int]]) -> float:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a wavefunction
index: The index either as an int of as a sequence
of 0 and 1 referencing the corresponding basis state
Returns:
float
Raises:
RuntimeError: Qureg has to be a wavefunction qureg but density matrix qureg was used
"""
if hasattr(index, '__len__'):
index = basis_state_to_index(index)
if qureg.isDensityMatrix:
raise RuntimeError("Qureg has to be a wavefunction qureg but "
+ "density matrix qureg was used")
return quest.getRealAmp(qureg, index)
class getImagAmp(_PYQUEST):
r"""Get the imaginary value of a wavefunction/state vector in qureg | |
default=(objects.collection_manager.CollectionIdentifier.Unindentified),
invalid_enums=(
objects.collection_manager.CollectionIdentifier.Unindentified,))),
locked_args={'test_type': COLLECTION_TYPE}),
specific_items=TunableTuple(description='\n If selected we will check that the collected item is from a\n specific list of collectable items that we are looking for.\n ',
specific_items=TunableList(description='\n List of allowed objects within a collection that we want to\n check.\n ',
tunable=TunableReference(description='\n Object reference to each collectible object.\n ',
manager=(services.definition_manager()))),
locked_args={'test_type': SPECIFIC_ITEMS}),
default='collection_type')}
def get_expected_args(self):
return {'collection_id':event_testing.test_constants.FROM_EVENT_DATA,
'collected_item_id':event_testing.test_constants.FROM_EVENT_DATA}
@cached_test
def __call__(self, collection_id=None, collected_item_id=None):
if self.test_type.test_type == self.COLLECTION_TYPE:
if collection_id is None:
return TestResult(False, 'Collected Item is None, valid during zone load.')
if self.test_type.collection_types and collection_id not in self.test_type.collection_types:
return TestResult(False, 'Collected Item is of wrong collection type.')
elif self.test_type.test_type == self.SPECIFIC_ITEMS:
if collected_item_id is None:
return TestResult(False, 'Collected Item is None, valid during zone load.')
if collected_item_id not in set((specific_item.id for specific_item in self.test_type.specific_items)):
return TestResult(False, 'Collected item is not in in the list of collected items that we are looking for.')
return TestResult.TRUE
class TopicTest(event_testing.test_base.BaseTest):
test_events = ()
FACTORY_TUNABLES = {'description':'Gate topics of the actor or target Sim.',
'subject':TunableEnumEntry(ParticipantType, ParticipantType.Actor, description='Who or what to apply this test to'),
'target_sim':TunableEnumEntry(ParticipantType, ParticipantType.Invalid, description='Set if topic needs a specfic target. If no target, keep as Invalid.'),
'whitelist_topics':TunableList(TunableReference(services.topic_manager()), description='The Sim must have any topic in this list to pass this test.'),
'blacklist_topics':TunableList(TunableReference(services.topic_manager()), description='The Sim cannot have any topic contained in this list to pass this test.')}
def __init__(self, subject, target_sim, whitelist_topics, blacklist_topics, **kwargs):
(super().__init__)(safe_to_skip=True, **kwargs)
self.subject = subject
self.target_sim = target_sim
self.whitelist_topics = whitelist_topics
self.blacklist_topics = blacklist_topics
def get_expected_args(self):
if self.target_sim == ParticipantType.Invalid:
return {'subjects': self.subject}
return {'subjects':self.subject, 'targets_to_match':self.target_sim}
def _topic_exists(self, sim, target):
if self.whitelist_topics:
if any((t.topic_exist_in_sim(sim, target=target) for t in self.whitelist_topics)):
return TestResult.TRUE
return TestResult(False, "{} doesn't have any topic in white list", sim, tooltip=(self.tooltip))
if self.blacklist_topics:
if any((t.topic_exist_in_sim(sim, target=target) for t in self.blacklist_topics)):
return TestResult(False, '{} has topic in black list', sim, tooltip=(self.tooltip))
return TestResult.TRUE
@cached_test
def __call__(self, subjects=None, targets_to_match=None):
for subject in subjects:
if subject.is_sim:
if subject.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) is None:
return TestResult(False, '{} failed topic check: It is not an instantiated sim.', subject, tooltip=(self.tooltip))
subject = subject.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
if targets_to_match is not None:
for target_to_match in targets_to_match:
result = self._topic_exists(subject, target_to_match)
if not result:
return result
else:
result = self._topic_exists(subject, None)
return result or result
return TestResult.TRUE
TunableTopicTest = TunableSingletonFactory.create_auto_factory(TopicTest)
class UseDefaultOfflotToleranceFactory(TunableSingletonFactory):
@staticmethod
def factory():
return objects.components.statistic_types.StatisticComponentGlobalTuning.DEFAULT_OFF_LOT_TOLERANCE
FACTORY_TYPE = factory
class LotOwnerTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
test_events = ()
FACTORY_TUNABLES = {'subject':TunableEnumEntry(description='\n Who or what to apply this test to\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'owns_lot':Tunable(description='\n If checked and subject owns the current lot then this test will\n pass. If unchecked, subject does not own lot, this test will pass.\n ',
tunable_type=bool,
default=True),
'consider_rented_lot_as_owned':Tunable(description='\n If checked, rented lots are considered owned. If unchecked, rented\n lots are considered unowned.\n ',
tunable_type=bool,
default=True),
'consider_business_lot_as_owned':Tunable(description='\n If checked, business lots are considered owned. If unchecked, business\n lots are considered unowned.\n ',
tunable_type=bool,
default=True),
'invert':Tunable(description='\n If checked, this test will return the opposite of what it\'s tuned to\n return. For instance, if this test is tuned to return True if the\n active household owns the lot, but "Invert" is checked, it will\n actually return False.\n ',
tunable_type=bool,
default=False)}
def get_expected_args(self):
return {'test_targets': self.subject}
def _is_lot_owner(self, zone, target):
if target.household.home_zone_id == zone.id:
return True
if self.consider_rented_lot_as_owned:
if target.is_renting_zone(zone.id):
return True
elif self.consider_business_lot_as_owned:
if zone.lot is not None and zone.lot.owner_household_id == target.household_id:
return True
return False
@cached_test
def __call__(self, test_targets=None):
current_zone = services.current_zone()
for target in test_targets:
if self._is_lot_owner(current_zone, target) and not self.owns_lot:
if self.invert:
return TestResult.TRUE
return TestResult(False, '{} owns the lot, but is not supposed to.', target, tooltip=(self.tooltip))
elif self.owns_lot:
if self.invert:
return TestResult.TRUE
return TestResult(False, '{} does not own the lot, but is supposed to.', target, tooltip=(self.tooltip))
if self.invert:
return TestResult(False, 'Test passed but is tuned to invert the result.')
return TestResult.TRUE
class HasLotOwnerTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
test_events = ()
FACTORY_TUNABLES = {'description':'\n Test to check if the lot has an owner or not.\n ',
'has_owner':Tunable(description='\n If checked then the test will return true if the lot has an\n owner.\n If unchecked then the test will return true if the lot does not\n have an owner.\n ',
tunable_type=bool,
default=True),
'consider_rented_lot_as_owned':Tunable(description='\n If unchecked, test will not consider, renting as ownership. If\n checked and a sim is renting the current lot then the test will\n treat being rented as having an owner. If unchecked and a sim\n is renting the current lot then the test will not treat this\n lot as having an owner.\n ',
tunable_type=bool,
default=True)}
def get_expected_args(self):
return {}
@cached_test
def __call__(self):
lot = services.active_lot()
if not lot:
return TestResult(False, 'HasLotOwnerTest: No active lot found.',
tooltip=(self.tooltip))
has_lot_owner = lot.owner_household_id != 0
if not has_lot_owner:
if self.consider_rented_lot_as_owned:
has_lot_owner = services.travel_group_manager().is_current_zone_rented()
if self.has_owner:
if not has_lot_owner:
return TestResult(False, 'HasLotOwnerTest: Trying to check if the lot has an owner, but the lot does not have an owner.',
tooltip=(self.tooltip))
if not self.has_owner:
if has_lot_owner:
return TestResult(False, 'HasLotOwnerTest: Trying to check if the lot does not have an owner, but the lot has an owner.',
tooltip=(self.tooltip))
return TestResult.TRUE
class DuringWorkHoursTest(event_testing.test_base.BaseTest):
test_events = ()
FACTORY_TUNABLES = {'description':'Returns True if run during a time that the subject Sim should be at work.',
'subject':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'is_during_work':Tunable(description='\n Check to return True if during work hours.\n ',
tunable_type=bool,
default=False),
'fail_if_taking_day_off_during_work':Tunable(description="\n If checked, this test will fail if the Sim is taking\n PTO/vacation/sick day during work hours and is_during_work is\n checked. If not checked, this test won't care about whether or not\n the Sim is taking the day off.\n ",
tunable_type=bool,
default=False),
'career':OptionalTunable(description='\n If tuned, this test will run against a specific career instead of \n against any career.\n ',
tunable=TunablePackSafeReference(description='\n The specific career to test against.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.CAREER))))}
def __init__(self, subject, is_during_work, fail_if_taking_day_off_during_work, career, **kwargs):
(super().__init__)(**kwargs)
self.subject = subject
self.is_during_work = is_during_work
self.fail_if_taking_day_off_during_work = fail_if_taking_day_off_during_work
self.career = career
def get_expected_args(self):
return {'subjects': self.subject}
@cached_test
def __call__(self, subjects=()):
is_work_time = False
taking_day_off = False
for subject in subjects:
career_tracker = subject.career_tracker
if self.career is not None:
career = career_tracker.get_career_by_uid(self.career.guid64)
if not career_tracker.career_during_work_hours(career):
continue
else:
career = career_tracker.career_currently_within_hours
if career is None:
continue
is_work_time = True
taking_day_off = career.taking_day_off
break
if is_work_time:
if self.is_during_work:
return self.fail_if_taking_day_off_during_work and taking_day_off or TestResult.TRUE
return TestResult(False, 'Current time is within career work hours.', tooltip=(self.tooltip))
if self.is_during_work:
return TestResult(False, 'Current time is not within career work hours.', tooltip=(self.tooltip))
return TestResult.TRUE
TunableDuringWorkHoursTest = TunableSingletonFactory.create_auto_factory(DuringWorkHoursTest)
class AtWorkTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
test_events = (
TestEvent.WorkdayStart,)
FACTORY_TUNABLES = {'subject':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'is_at_work':Tunable(description='\n Check to return True if any of the subjects are at work.\n ',
tunable_type=bool,
default=True),
'active_work_restriction':OptionalTunable(description='\n If enabled, if this is set the test will only pass if the Sim is at\n an active event. If not set, the test will instead only pass if the\n Sim is not at an active event.\n ',
tunable=Tunable(tunable_type=bool,
default=True))}
def get_expected_args(self):
return {'subjects': self.subject}
@cached_test
def __call__(self, subjects=(), **kwargs):
for subject in subjects:
career = subject.career_tracker.get_currently_at_work_career()
if career is not None:
break
else:
career = None
if career is not None and not self.is_at_work:
return TestResult(False, 'Sim is at work {}', career, tooltip=(self.tooltip))
if self.active_work_restriction is not None:
if career.is_at_active_event != self.active_work_restriction:
return TestResult(False, '{} does not meet active work restriction: {}', career,
(self.active_work_restriction), tooltip=(self.tooltip))
else:
if self.is_at_work:
return TestResult(False, 'Sim is not at work', tooltip=(self.tooltip))
return TestResult.TRUE
class AssignmentActiveFactory(TunableFactory, AutoFactoryInit):
@staticmethod
def factory(career):
if career is None:
return False
return career.on_assignment
FACTORY_TYPE = factory
class AssignmentSpecificFactory(TunableFactory):
@staticmethod
def factory(career, assignment):
return career is None or career.on_assignment or False
return assignment.guid64 in career.active_assignments
FACTORY_TYPE = factory
def __init__(self, **kwargs):
super().__init__(assignment=sims4.tuning.tunable.TunableReference(description='\n Aspiration that needs to be completed for satisfying the\n daily assignment.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.ASPIRATION)),
class_restrictions='AspirationAssignment',
pack_safe=True))
class CareerAssignmentTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
test_events = (
TestEvent.WorkdayStart,)
FACTORY_TUNABLES = {'participant':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantTypeSingleSim,
default=ParticipantTypeSingleSim.Actor),
'test_type':TunableVariant(description='\n Type of assignment test we want to run.\n | |
sep
dump += 'Flow IAT Std'
dump += sep
dump += 'Flow IAT Max'
dump += sep
dump += 'Flow IAT Min'
dump += sep
dump += 'Fwd IAT Tot'
dump += sep
dump += 'Fwd IAT Mean'
dump += sep
dump += 'Fwd IAT Std'
dump += sep
dump += 'Fwd IAT Max'
dump += sep
dump += 'Fwd IAT Min'
dump += sep
dump += 'Bwd IAT Tot'
dump += sep
dump += 'Bwd IAT Mean'
dump += sep
dump += 'Bwd IAT Std'
dump += sep
dump += 'Bwd IAT Max'
dump += sep
dump += 'Bwd IAT Min'
dump += sep
dump += 'Fwd PSH Flags'
dump += sep
dump += 'Bwd PSH Flags'
dump += sep
dump += 'Fwd URG Flags'
dump += sep
dump += 'Bwd URG Flags'
dump += sep
dump += 'Fwd Header Len'
dump += sep
dump += 'Bwd Header Len'
dump += sep
dump += 'Fwd Pkts/s'
dump += sep
dump += 'Bwd Pkts/s'
dump += sep
dump += 'Pkt Len Min'
dump += sep
dump += 'Pkt Len Max'
dump += sep
dump += 'Pkt Len Mean'
dump += sep
dump += 'Pkt Len Std'
dump += sep
dump += 'Pkt Len Var'
dump += sep
dump += 'FIN Flag Cnt'
dump += sep
dump += 'SYN Flag Cnt'
dump += sep
dump += 'RST Flag Cnt'
dump += sep
dump += 'PSH Flag Cnt'
dump += sep
dump += 'ACK Flag Cnt'
dump += sep
dump += 'URG Flag Cnt'
dump += sep
dump += 'CWE Flag Cnt'
dump += sep
dump += 'ECE Flag Cnt'
dump += sep
dump += 'Down/Up Ratio'
dump += sep
dump += 'Pkt Sive Avg'
dump += sep
dump += 'Fwd Seg Size Avg'
dump += sep
dump += 'Bwd Seg Size Avg'
dump += sep
dump += 'Fwd Byts/b Avg'
dump += sep
dump += 'Fwd Pkts/b Avg'
dump += sep
dump += 'Fwd Blk Rate Avg'
dump += sep
dump += 'Bwd Byts/b Avg'
dump += sep
dump += 'Bwd Pkts/b Avg'
dump += sep
dump += 'Bwd Blk Rate Avg'
dump += sep
dump += "Subflow Fwd Pkts"
dump += sep
dump += "Subflow Fwd Byts"
dump += sep
dump += "Subflow Bwd Pkts"
dump += sep
dump += "Subflow Bwd Byts"
dump += sep
dump += "Init Fwd Win Byts"
dump += sep
dump += "Init Bwd Win Byts"
dump += sep
dump += "Fwd Act Data Pkts"
dump += sep
dump += "Fwd Seg Size Min"
dump += sep
dump += "Active Mean"
dump += sep
dump += "Active Std"
dump += sep
dump += "Active Max"
dump += sep
dump += "Active Min"
dump += sep
dump += "Idle Mean"
dump += sep
dump += "Idle Std"
dump += sep
dump += "Idle Max"
dump += sep
dump += "Idle Min"
dump += sep
fileObject.write(dump)
fileObject.write('\n')
def dumpFlowBasedFeatures(self,sep,fileObject):
dump = ""
dump += str(self.__flowId)
dump += sep
if len(self.__src) >= 16:
dump += socket.inet_ntop(10,self.__src)
dump += sep
dump += str(self.getSrcPort())
dump += sep
dump += socket.inet_ntop(10,self.__dst)
dump += sep
dump += str(self.getDstPort())
dump += sep
dump += str(self.__protocol)
dump += sep
else:
dump += socket.inet_ntoa(self.__src)
dump += sep
dump += str(self.getSrcPort())
dump += sep
dump += str(socket.inet_ntoa(self.__dst))
dump += sep
dump += str(self.getDstPort())
dump += sep
dump += str(self.__protocol)
dump += sep
dump += str(datetime.utcfromtimestamp(self.__flowStartTime/1000000).strftime('%Y-%m-%d %H:%M:%S'))
dump += sep
dump += str(self.getFlowDuration())
dump += sep
dump += str(self.__fwdPktStats.getCount())
dump += sep
dump += str(self.__bwdPktStats.getCount())
dump += sep
dump += str(self.__fwdPktStats.getSum())
dump += sep
dump += str(self.__bwdPktStats.getSum())
dump += sep
if self.__fwdPktStats.getCount() > 0:
dump += str(self.__fwdPktStats.getMax())
dump += sep
dump += str(self.__fwdPktStats.getMin())
dump += sep
dump += str(self.__fwdPktStats.getMean())
dump += sep
dump += str(self.__fwdPktStats.getStandardDeviation())
dump += sep
else:
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
if self.__bwdPktStats.getCount() > 0:
dump += str(self.__bwdPktStats.getMax())
dump += sep
dump += str(self.__bwdPktStats.getMin())
dump += sep
dump += str(self.__bwdPktStats.getMean())
dump += sep
dump += str(self.__bwdPktStats.getStandardDeviation())
dump += sep
else:
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
#my stuff
if self.getFlowDuration()/1000000 <= 0.0:
dump += str("0")
dump += sep
dump += str("0")
dump += sep
else:
dump += str((self.__forwardBytes + self.__backwardBytes)/(self.getFlowDuration()/1000000))
dump += sep
dump += str((self.__bwdPktStats.getCount()+self.__fwdPktStats.getCount())/(self.getFlowDuration()/1000000))
dump += sep
dump += str(self.__flowIAT.getMean())
dump += sep
dump += str(self.__flowIAT.getStandardDeviation())
dump += sep
dump += str(self.__flowIAT.getMax())
dump += sep
dump += str(self.__flowIAT.getMin())
dump += sep
if len(self.__forward) > 1:
dump += str(self.__forwardIAT.getSum())
dump += sep
dump += str(self.__forwardIAT.getMean())
dump += sep
dump += str(self.__forwardIAT.getStandardDeviation())
dump += sep
dump += str(self.__forwardIAT.getMax())
dump += sep
dump += str(self.__forwardIAT.getMin())
dump += sep
else:
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
if len(self.__backward) > 1:
dump += str(self.__backwardIAT.getSum())
dump += sep
dump += str(self.__backwardIAT.getMean())
dump += sep
dump += str(self.__backwardIAT.getStandardDeviation())
dump += sep
dump += str(self.__backwardIAT.getMax())
dump += sep
dump += str(self.__backwardIAT.getMin())
dump += sep
else:
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str(self.__fPSH_cnt)
dump += sep
dump += str(self.__bPSH_cnt)
dump += sep
dump += str(self.__fURG_cnt)
dump += sep
dump += str(self.__bURG_cnt)
dump += sep
dump += str(self.__fHeaderBytes)
dump += sep
dump += str(self.__bHeaderBytes)
dump += sep
dump += str(self.getfPktsPerSecond())
dump += sep
dump += str(self.getfPktsPerSecond())
dump += sep
if len(self.__forward) > 0 or len(self.__backward) > 0:
dump += str(self.__flowLengthStats.getMin())
dump += sep
dump += str(self.__flowLengthStats.getMax())
dump += sep
dump += str(self.__flowLengthStats.getMean())
dump += sep
dump += str(self.__flowLengthStats.getStandardDeviation())
dump += sep
dump += str(self.__flowLengthStats.getVariance())
dump += sep
else:
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str(self.__fFIN_cnt)
dump += sep
dump += str(self.__fSYN_cnt)
dump += sep
dump += str(self.__fRST_cnt)
dump += sep
dump += str(self.__PSH_cnt )
dump += sep
dump += str(self.__fACK_cnt)
dump += sep
dump += str(self.__URG_cnt)
dump += sep
dump += str(self.__fCWR_cnt)
dump += sep
dump += str(self.__fECE_cnt)
dump += sep
dump += str(self.getDownUpRatio())
dump += sep
dump +=str(self.getAvgPacketSize())
dump += sep
dump += str(self.fAvgSegmentSize())
dump += sep
dump +=str (self.bAvgSegmentSize())
dump += sep
#Dafuq??
dump += str(self.fAvgBytesPerBulk())
dump += sep
dump += str(self.fAvgPacketsPerBulk())
dump += sep
dump += str(self.fAvgBulkRate())
dump += sep
dump += str(self.fAvgBytesPerBulk())
dump += sep
dump += str(self.bAvgPacketsPerBulk())
dump += sep
dump += str(self.bAvgBulkRate())
dump += sep
dump += str(self.getSflow_fpackets())
dump += sep
dump += str(self.getSflow_fbytes())
dump += sep
dump += str(self.getSflow_bpackets())
dump += sep
dump += str(self.getSflow_bbytes())
dump += sep
dump += str(self.__Init_Win_bytes_forward)
dump += sep
dump += str(self.__Init_Win_bytes_backward)
dump += sep
dump += str(self.__Act_data_pkt_forward)
dump += sep
dump += str(self.__min_seg_size_forward)
dump += sep
if self.__flowActive.getCount() > 0:
dump += str(self.__flowActive.getMean())
dump += sep
dump += str(self.__flowActive.getStandardDeviation())
dump += sep
dump += str(self.__flowActive.getMax())
dump += sep
dump += str(self.__flowActive.getMin())
dump += sep
else:
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
if self.__flowIdle.getCount() > 0:
dump += str(self.__flowIdle.getMean())
dump += sep
dump += str(self.__flowIdle.getStandardDeviation())
dump += sep
dump += str(self.__flowIdle.getMax())
dump += sep
dump += str(self.__flowIdle.getMin())
dump += sep
else:
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += sep
dump += str("0")
dump += | |
<filename>metalibm_core/code_generation/generators/llvm_ir_code_generator.py
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# created: Dec 24th, 2013
# last-modified: Mar 7th, 2018
#
# author(s): <NAME> (<EMAIL>)
###############################################################################
import copy
from metalibm_core.core.ml_operations import (
Addition,
Variable, Constant, ConditionBlock, Return, Statement,
FunctionObject,
ReferenceAssign, Loop,
)
from metalibm_core.core.bb_operations import (
BasicBlockList,
BasicBlock, ConditionalBranch, UnconditionalBranch,
PhiNode,
)
from metalibm_core.core.ml_table import ML_Table
from metalibm_core.core.ml_formats import *
from metalibm_core.code_generation.code_element import CodeVariable, CodeExpression
from metalibm_core.utility.log_report import Log
from metalibm_core.code_generation.code_function import CodeFunction
from metalibm_core.code_generation.llvm_utils import llvm_ir_format
from metalibm_core.code_generation.code_generator import RegisterCodeGenerator, CodeGenerator
from metalibm_core.code_generation.asm_utility import (
Label, get_free_label_name, append_label)
def llvm_ir_generate_condition_block(generator, optree, code_object, language, folded=False, next_block=None, initial=False):
condition = optree.inputs[0]
if_branch = optree.inputs[1]
else_branch = optree.inputs[2] if len(optree.inputs) > 2 else None
# generating pre_statement
generator.generate_expr(
code_object, optree.get_pre_statement(),
inlined=inlined, language=language)
# generate code to evaluate if-then-else condition
cond_code = generator.generate_expr(
code_object, condition, inlined=inlined, language=language)
def get_end_label():
if next_block is None:
return get_free_label_name(code_object, "end")
else:
return next_block
if_label = get_free_label_name(code_object, "true_label")
is_fallback_if = is_fallback_statement(if_branch)
if else_branch:
# if there is an else branch then else label must be specific
else_label = get_free_label_name(code_object, "false_label")
# we need a end label if one (or more) of if/else is a fallback
is_fallback_else = is_fallback_statement(else_branch)
is_fallback = is_fallback_if or is_fallback_else
if is_fallback:
end_label = get_end_label()
else:
end_label = None
else:
# there is no else so false-cond requires a fallback blocks
is_fallback = True
else_label = get_end_label()
end_label = else_label
code_object << "br i1 {cond} , label %{if_label}, label %{else_label}".format(
cond=cond_code.get(),
if_label=if_label,
else_label=else_label
)
append_label(code_object, if_label)
# generating code for if-branch
if_branch_code = generator.generate_expr(
code_object, if_branch, inlined=inlined,
language=language, next_block=end_label)
if is_fallback_if:
code_object << "br label %" << end_label << "\n"
if else_branch:
append_label(code_object, else_label)
else_branch_code = generator.generate_expr(
code_object, else_branch, inlined=inlined,
language=language, next_block=end_label)
if is_fallback and next_block is None:
append_label(code_object, end_label)
return None
def llvm_ir_generate_loop(generator, optree, code_object, language, folded=False, next_block=None, initial=False):
init_block = optree.get_input(0)
loop_test = optree.get_input(1)
loop_body = optree.get_input(2)
def get_end_label():
if next_block is None:
return get_free_label_name(code_object, "loop_end")
else:
return next_block
header_label = get_free_label_name(code_object, "loop_header")
loop_test_label = get_free_label_name(code_object, "loop_test")
loop_body_label = get_free_label_name(code_object, "loop_body")
loop_end_label = get_end_label()
# generate loop initialization block
append_label(code_object, header_label)
generator.generate_expr(code_object, init_block, inlined=inlined, language=language)
append_label(code_object, loop_test_label)
cond_code = generator.generate_expr(code_object, loop_test, inlined=inlined, language=language)
code_object << "br i1 {cond} , label %{loop_body}, label %{loop_end}".format(
cond=cond_code.get(),
loop_body=loop_body_label,
loop_end=loop_end_label,
)
append_label(code_object, loop_body_label)
generator.generate_expr(code_object, loop_body, next_block=loop_body_label, inlined=inlined, language=language)
code_object << "br label %" << loop_test_label << "\n"
if next_block is None:
append_label(code_object, loop_end_label)
def is_fallback_statement(optree):
""" Determinate if <optree> may fallback to the next block (True)
or if it will never reach next block (False)"""
if isinstance(optree, Return):
return False
elif isinstance(optree, ConditionBlock):
branch_if = optree.get_input(1)
branch_else = optree.get_input(2) if len(optree.inputs) > 2 else None
if branch_else is None:
# if there is no else-branch than false condition will
# trigger fallback
return True
else:
return is_fallback_statement(branch_if) or is_fallback_statement(branch_else)
elif isinstance(optree, Statement):
# if any of the sequential sub-statement is not a
# fallback then the overall statement is not one too
for op in optree.get_inputs():
if not is_fallback_statement(op):
return False
return True
else:
return True
def generate_llvm_cst(value, precision, precision_header=True):
""" Generate LLVM-IR code string to encode numerical value <value> """
if ML_FP_Format.is_fp_format(precision):
if FP_SpecialValue.is_special_value(value):
value = copy.copy(value)
value.precision = ML_Binary64
mask = ~(2**(ML_Binary64.get_field_size() - precision.get_field_size()) - 1)
return "0x{value:x}".format(
# special constant must be 64-bit encoded in hexadecimal
value=(ML_Binary64.get_integer_coding(value) & mask)
)
else:
sollya.settings.display = sollya.decimal
value_str = str(precision.round_sollya_object(value))
if not "." in value_str:
# adding suffix ".0" if numeric value is an integer
value_str += ".0"
return value_str
elif is_std_integer_format(precision):
return "{value}".format(
# prec="" if not precision_header else llvm_ir_format(precision),
value=int(value)
)
else:
Log.report(
Log.Error,
"format {} not supported in LLVM-IR generate_llvm_cst",
precision
)
def generate_Constant_expr(optree):
""" generate LLVM-IR code to materialize Constant node """
assert isinstance(optree, Constant)
if optree.precision.is_vector_format():
cst_value = optree.get_value()
return CodeExpression(
"<{}>".format(
", ".join(generate_llvm_cst(
elt_value, optree.precision.get_scalar_format()
) for elt_value in cst_value
)
),
optree.precision
)
else:
return CodeExpression(
generate_llvm_cst(optree.get_value(), optree.precision),
optree.precision
)
@RegisterCodeGenerator([LLVM_IR_Code])
class LLVMIRCodeGenerator(CodeGenerator):
""" LLVM-IR language code generator """
language = LLVM_IR_Code
def __init__(self, processor, declare_cst=True, disable_debug=False, libm_compliant=False, language=LLVM_IR_Code, decorate_code=False):
# on level for each of exact_mode possible values
self.generated_map = self.get_empty_memoization_map()
self.processor = processor
self.declare_cst = declare_cst
self.disable_debug = disable_debug
self.libm_compliant = libm_compliant
self.language = language
self.end_label = None
# map of basic blocks (bb node -> label)
self.bb_map = {}
if decorate_code: Log.report(Log.Error, "decorate_code option is not supported in LLVMIRCodeGenerator")
def get_empty_memoization_map(self):
""" build an initialized structure for the memoization map """
return [{}]
def clear_memoization_map(self):
""" Clear the content of the meoization map """
self.generated_map = self.get_empty_memoization_map()
def open_memoization_level(self):
""" Create a new memoization level on top of the stack """
self.generated_map.insert(0, {})
def close_memoization_level(self):
""" Close the highest memoization level """
self.generated_map.pop(0)
def has_memoization(self, optree):
""" test if a optree has already been generated and memoized """
for memoization_level in self.generated_map:
if optree in memoization_level: return True
return False
def get_memoization(self, optree):
""" retrieve pre-existing memoization entry """
for memoization_level in self.generated_map:
if optree in memoization_level: return memoization_level[optree]
return None
def add_memoization(self, optree, code_value):
""" register memoization value <code_value> for entry <optree> """
self.generated_map[0][optree] = code_value
def get_bb_label(self, code_object, bb):
if bb in self.bb_map:
return self.bb_map[bb]
else:
new_label = get_free_label_name(code_object, bb.get_tag() or "BB")
self.bb_map[bb] = new_label
return new_label
# force_variable_storing is not supported
def generate_expr(self, code_object, optree, inlined=True, result_var=None, __exact=None, language=None, strip_outer_parenthesis=False, force_variable_storing=False, next_block=None, **kw):
""" code generation function """
# search if <optree> has already been processed
if self.has_memoization(optree):
return self.get_memoization(optree)
result = None
# implementation generation
if isinstance(optree, CodeVariable):
# adding LLVM variable "%" prefix
if optree.name[0] != "%":
optree.name = "%" + optree.name
result = optree
elif isinstance(optree, Variable):
result = CodeVariable("%" + optree.get_tag(), optree.get_precision())
elif isinstance(optree, Constant):
precision = optree.get_precision()
result = generate_Constant_expr(optree)
#result = CodeExpression(precision.get_gappa_cst(optree.get_value()), precision)
elif isinstance(optree, BasicBlock):
bb_label = self.get_bb_label(code_object, optree)
code_object << (bb_label + ":")
code_object.open_level(header="")
for op in optree.inputs:
self.generate_expr(code_object, op, inlined=inlined, language=language)
code_object.close_level(footer="", cr="")
return None
elif isinstance(optree, ConditionalBranch):
cond = optree.get_input(0)
if_bb = optree.get_input(1)
else_bb = optree.get_input(2)
if_label = self.get_bb_label(code_object, if_bb)
else_label = self.get_bb_label(code_object, else_bb)
cond_code = self.generate_expr(
code_object, cond, inlined=inlined, language=language)
code_object << "br i1 {cond} , label %{if_label}, label %{else_label}\n".format(
cond=cond_code.get(),
if_label=if_label,
else_label=else_label
)
# generating destination bb
# self.generate_expr(code_object, if_bb, inlined=inlined, language=language)
# self.generate_expr(code_object, else_bb, inlined=inlined, language=language)
return None
elif isinstance(optree, UnconditionalBranch):
dest_bb = optree.get_input(0)
code_object << "br label %{}\n".format(self.get_bb_label(code_object, dest_bb))
# generating destination bb
# self.generate_expr(code_object, dest_bb, inlined=inlined, language=language)
return None
elif isinstance(optree, BasicBlockList):
for bb in optree.inputs:
self.generate_expr(code_object, bb, inlined=inlined, language=language)
return None
elif isinstance(optree, Statement):
Log.report(Log.Error, "Statement are not supported in LLVM-IR codegen"
"They must be translated to BB (e.g. through gen_basic_block pass)"
"faulty node: {}", optree)
elif isinstance(optree, ConditionBlock):
Log.report(Log.Error, "ConditionBlock are not supported in LLVM-IR codegen"
"They must be translated to BB (e.g. through gen_basic_block pass)"
"faulty node: {}", optree)
elif isinstance(optree, Loop):
Log.report(Log.Error, "Loop are not supported in LLVM-IR codegen"
"They must be translated to BB (e.g. through gen_basic_block pass)"
"faulty node: {}", optree)
elif isinstance(optree, PhiNode):
output_var = optree.get_input(0)
output_var_code = self.generate_expr(
code_object, output_var, inlined=inlined, | |
dfn(3.))
def test_holomorphic_jacrev_of_float_errors(self):
dfn = jacrev(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"jacrev with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_holomorphic_jacfwd_of_float_errors(self):
dfn = jacfwd(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"jacfwd with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_jacfwd_of_complex_errors(self):
dfn = jacfwd(lambda x: x ** 2)
self.assertRaisesRegex(
TypeError,
(r"jacfwd requires real-valued inputs \(input dtype that is a "
r"sub-dtype of np.floating\), but got complex.*"),
lambda: dfn(3. + 1j))
def test_xla_computation(self):
# these tests basically check the examples in the xla_computation docstring
def e(x):
return jnp.sin(jnp.cos(x))
c = api.xla_computation(e)(2.)
self.assertIn('cosine', c.as_hlo_text())
self.assertIn('sine', c.as_hlo_text())
def f(x):
return x - lax.psum(x, 'i')
axis_env = [('i', 4)]
c = api.xla_computation(f, axis_env=axis_env)(2)
self.assertIn('all-reduce', c.as_hlo_text())
self.assertIn('replica_groups={{0,1,2,3}}', c.as_hlo_text())
def g(x):
rowsum = lax.psum(x, 'i')
colsum = lax.psum(x, 'j')
allsum = lax.psum(x, ('i', 'j'))
return rowsum, colsum, allsum
axis_env = [('i', 4), ('j', 2)]
c = api.xla_computation(g, axis_env=axis_env)(5.)
self.assertIn('all-reduce', c.as_hlo_text())
self.assertIn('replica_groups={{0,2,4,6},{1,3,5,7}}', c.as_hlo_text())
self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())
self.assertIn('replica_groups={{0,1,2,3,4,5,6,7}}', c.as_hlo_text())
def h(x):
rowsum = lax.psum(x, 'i', axis_index_groups=[[0, 1], [2, 3]])
colsum = lax.psum(x, 'j')
return rowsum, colsum
axis_env = [('i', 4), ('j', 2)]
c = api.xla_computation(h, axis_env=axis_env)(5.)
self.assertIn('all-reduce', c.as_hlo_text())
self.assertIn('replica_groups={{0,2},{4,6},{1,3},{5,7}}', c.as_hlo_text())
self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())
def test_xla_computation_args(self):
def foo(x, y, z):
return x + y + z
c = api.xla_computation(foo)(1., 2., 3.)
self.assertEqual(len(c.program_shape().parameter_shapes()), 3)
c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)
param_shapes = c.program_shape().parameter_shapes()
self.assertEqual(len(param_shapes), 1)
self.assertEqual(param_shapes[0].xla_element_type(),
xb.xla_client.PrimitiveType.TUPLE)
def test_xla_computation_duck_typing(self):
def foo(x, y, z):
return x + y + z
x = jax.ShapeDtypeStruct((), np.float32)
y = jax.ShapeDtypeStruct((), np.float32)
z = jax.ShapeDtypeStruct((), np.float32)
c = api.xla_computation(foo)(x, y, z)
self.assertEqual(len(c.program_shape().parameter_shapes()), 3)
c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)
param_shapes = c.program_shape().parameter_shapes()
self.assertEqual(len(param_shapes), 1)
self.assertEqual(param_shapes[0].xla_element_type(),
xb.xla_client.PrimitiveType.TUPLE)
def test_staging_out_multi_replica(self):
def f(x):
return api.pmap(jnp.mean)(x)
xla_comp = api.xla_computation(f)
xla_comp(jnp.arange(8)).as_hlo_text() # doesn't crash
def test_xla_computation_instantiate_constant_outputs(self):
def f():
return jnp.zeros((3, 4))
xla_comp = api.xla_computation(f)()
out_shape, = xla_comp.program_shape().result_shape().tuple_shapes()
self.assertEqual(out_shape.dimensions(), (3, 4))
def test_xla_computation_static_argnums(self):
def f(x, y):
return x + y
xla_comp = api.xla_computation(f, static_argnums=(1,))(2, 3)
hlo_text = xla_comp.as_hlo_text()
self.assertIn("constant(3)", hlo_text)
# The static arguments should be removed from the function being compiled,
# thus the function should have only a single argument.
self.assertIn("parameter.1", hlo_text)
self.assertNotIn("parameter.2", hlo_text)
def test_xla_computation_return_shape(self):
_, shape_tree = api.xla_computation(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
return_shape=True)(np.int32(1))
expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
self.assertEqual(shape_tree, expected)
def test_xla_computation_partitioned(self):
def f(x, y):
return jnp.dot(x, y) + 1
x = jax.ShapeDtypeStruct((8, 8), np.float32)
y = jax.ShapeDtypeStruct((8, 16), np.float32)
xla_comp = api.xla_computation(f, in_parts=(P(2, 2), None),
out_parts=P(4, 1))(x, y)
hlo_text = xla_comp.as_hlo_text()
self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)
self.assertIn('sharding={replicated}', hlo_text)
self.assertIn('sharding={{devices=[4,1]0,1,2,3}}', hlo_text)
def test_xla_computation_replicated_and_partitioned(self):
def f(x, y):
return jnp.dot(x, y), lax.psum(x, 'i')
x = jax.ShapeDtypeStruct((8, 8), np.float32)
y = jax.ShapeDtypeStruct((8, 16), np.float32)
axis_env = [('i', 4)]
xla_comp = api.xla_computation(f, axis_env=axis_env,
in_parts=(P(2, 2), None),
out_parts=(P(4, 1), None))(x, y)
hlo_text = xla_comp.as_hlo_text()
self.assertIn('all-reduce', hlo_text)
self.assertIn('replica_groups={{0,1,2,3}}', hlo_text)
self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)
self.assertIn('sharding={replicated}', hlo_text)
self.assertIn('sharding={{devices=[4,1]0,1,2,3}, {replicated}}', hlo_text)
def test_xla_computation_psum_constant(self):
f = lambda: jax.lax.psum(1, "i")
api.xla_computation(f, axis_env=[("i", 2)])() # doesn't crash
@jtu.skip_on_devices("cpu", "gpu")
@jtu.ignore_warning(message="Some donated buffers were not usable")
def test_xla_computation_donate_argnums(self):
api.xla_computation(lambda x: None, donate_argnums=(0,))(3) # doesn't crash
def test_concurrent_device_get_and_put(self):
def f(x):
for _ in range(100):
y = jax.device_put(x)
x = jax.device_get(y)
return x
xs = [np.random.randn(i) for i in range(10)]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(partial(f, x)) for x in xs]
ys = [f.result() for f in futures]
for x, y in zip(xs, ys):
self.assertAllClose(x, y)
def test_dtype_warning(self):
# cf. issue #1230
if config.x64_enabled:
raise unittest.SkipTest("test only applies when x64 is disabled")
def check_warning(warn, nowarn):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
nowarn() # get rid of extra startup warning
prev_len = len(w)
nowarn()
assert len(w) == prev_len
warn()
assert len(w) > 0
msg = str(w[-1].message)
expected_prefix = "Explicitly requested dtype "
self.assertEqual(expected_prefix, msg[:len(expected_prefix)])
prev_len = len(w)
nowarn()
assert len(w) == prev_len
check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
lambda: jnp.array([1, 2, 3], dtype="float32"))
check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
lambda: jnp.array([1, 2, 3], dtype=float))
check_warning(lambda: jnp.ones(3, dtype=np.float64),
lambda: jnp.ones(3))
check_warning(lambda: jnp.ones(3, dtype=np.float64),
lambda: jnp.ones(3, dtype=float))
check_warning(lambda: jnp.ones_like(3, dtype=np.int64),
lambda: jnp.ones_like(3, dtype=np.int32))
check_warning(lambda: jnp.zeros(3, dtype="int64"),
lambda: jnp.zeros(3, dtype="int32"))
check_warning(lambda: jnp.zeros_like(3, dtype="float64"),
lambda: jnp.zeros_like(3, dtype="float32"))
check_warning(lambda: jnp.full((2, 3), 1, dtype="int64"),
lambda: jnp.full((2, 3), 1))
check_warning(lambda: jnp.ones(3).astype("float64"),
lambda: jnp.ones(3).astype("float32"))
check_warning(lambda: jnp.eye(3, dtype=np.float64),
lambda: jnp.eye(3))
check_warning(lambda: jnp.arange(3, dtype=np.float64),
lambda: jnp.arange(3, dtype=np.float32))
check_warning(lambda: jnp.linspace(0, 3, dtype=np.float64),
lambda: jnp.linspace(0, 3, dtype=np.float32))
check_warning(lambda: jnp.tri(2, dtype="float64"),
lambda: jnp.tri(2, dtype="float32"))
check_warning(lambda: jnp.arange(1).astype("float64"),
lambda: jnp.arange(1).astype(float))
check_warning(lambda: jnp.arange(1.0).astype("int64"),
lambda: jnp.arange(1.0).astype(int))
def test_vmap_preserves_docstr(self):
def superfun(a):
"""Does things with stuff."""
pass
self.assertRegex(api.vmap(superfun).__doc__, "\n".join([
"Vectorized version of superfun.*",
"",
"Original documentation:",
"",
superfun.__doc__,
]))
def test_vmap_in_axes_list(self):
# https://github.com/google/jax/issues/2367
dictionary = {'a': 5., 'b': jnp.ones(2)}
x = jnp.zeros(3)
y = jnp.arange(3.)
def f(dct, x, y):
return dct['a'] + dct['b'] + x + y
out1 = api.vmap(f, (None, 0, 0))(dictionary, x, y)
out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y)
self.assertAllClose(out1, out2)
def test_vmap_in_axes_tree_prefix_error(self):
# https://github.com/google/jax/issues/795
value_tree = jnp.ones(3)
self.assertRaisesRegex(
ValueError,
"vmap in_axes specification must be a tree prefix of the corresponding "
r"value, got specification \(0, 0\) for value tree "
+ re.escape(f"{tree_util.tree_structure((value_tree,))}."),
lambda: api.vmap(lambda x: x, in_axes=(0, 0))(value_tree)
)
def test_vmap_in_axes_leaf_types(self):
with self.assertRaisesRegex(
TypeError, r"vmap in_axes must be an int, None, or .*"):
api.vmap(lambda x: x, in_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
def test_vmap_out_axes_leaf_types(self):
with self.assertRaisesRegex(
TypeError, r"vmap out_axes must be an int, None, or .*"):
api.vmap(lambda x: x, out_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
def test_vmap_unbatched_object_passthrough_issue_183(self):
# https://github.com/google/jax/issues/183
fun = lambda f, x: f(x)
vfun = api.vmap(fun, (None, 0))
ans = vfun(lambda x: x + 1, jnp.arange(3))
self.assertAllClose(ans, np.arange(1, 4), check_dtypes=False)
def test_vmap_mismatched_axis_sizes_error_message_issue_705(self):
# https://github.com/google/jax/issues/705
def h(a, b):
return jnp.sum(a) + jnp.sum(b)
X = np.random.randn(10, 4)
U = np.random.randn(10, 2)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
"so\n"
"arg 0 has an axis to be mapped of size 10\n"
"arg 1 has an axis to be mapped of size 2"):
api.vmap(h, in_axes=(0, 1))(X, U)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
r"arg 2 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
"so\n"
"args 0, 2 have axes to be mapped of size 10\n"
"arg 1 has an axis to be mapped of size 2"):
api.vmap(lambda x, y, z: None, in_axes=(0, 1, 0))(X, U, X)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
"the tree of axis sizes is:\n"
r"\(10, \[2, 2\]\)"):
api.vmap(h, in_axes=(0, 1))(X, [U, U])
with self.assertRaisesRegex(
ValueError, "vmap got arg 0 of rank 0 but axis to be mapped 0"):
# The mapped inputs cannot be scalars
api.vmap(lambda x: x)(1.)
with self.assertRaisesRegex(
ValueError, "vmap must have at least one non-None value in in_axes"):
# If the output is mapped, there must be a non-None in_axes
api.vmap(lambda x: x, in_axes=None)(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError, "vmap got arg 0 of rank 1 but axis to be mapped 1"):
api.vmap(lambda x: x, in_axes=1)(jnp.array([1., 2.]))
# Error is: TypeError: only integer scalar arrays can be converted to a scalar index
with self.assertRaisesRegex(
ValueError,
"vmap out_axes specification must be a tree prefix of the "
"corresponding value.*"):
api.vmap(lambda x: x, in_axes=0, out_axes=(2, 3))(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError,
r"vmap has mapped output \(axis_name=foo\) but out_axes is None"):
# If the output is mapped (user-named axis), then there must be some
# out_axes specified.
api.vmap(lambda x: x, out_axes=None, axis_name="foo")(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError,
"vmap has mapped output but out_axes is None"):
# If the output is mapped (unnamed axis), then there must be some out_axes
# specified.
api.vmap(lambda x: x, out_axes=None)(jnp.array([1., 2.]))
def test_vmap_structured_in_axes(self):
A, B, C, D = 2, 3, 4, 5
K = 6 # batch size
x = np.ones((K, A, B)) # batch axis in different locations
y = | |
from schematics import Model
from schematics.types import ModelType, ListType, StringType, IntType, BooleanType, NumberType, DateTimeType, \
TimestampType, UTCDateTimeType, TimedeltaType, FloatType
class Tags(Model):
key = StringType(serialize_when_none=False)
value = StringType(serialize_when_none=False)
class SubResource(Model):
id = StringType()
class ExtendedLocation(Model):
name = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
class ApplicationSecurityGroup(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class SecurityRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
access = StringType(choices=('Allow', 'Deny'), serialize_when_none=False)
description = StringType(serialize_when_none=False)
destination_address_prefix = StringType(serialize_when_none=False)
destination_address_prefixes = ListType(StringType, serialize_when_none=False)
destination_application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
destination_port_range = StringType(serialize_when_none=False)
destination_port_ranges = ListType(StringType, serialize_when_none=False)
direction = StringType(choices=('Inbound', 'Outbound'), serialize_when_none=False)
priority = IntType(serialize_when_none=False)
protocol = StringType(choices=('*', 'Ah', 'Esp', 'Icmp', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
source_address_prefix = StringType(serialize_when_none=False)
source_address_prefixes = ListType(StringType, serialize_when_none=False)
source_application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
source_port_range = StringType(serialize_when_none=False)
source_port_ranges = ListType(StringType, serialize_when_none=False)
class TrafficAnalyticsConfigurationProperties(Model):
enabled = BooleanType(serialize_when_none=False)
traffic_analytics_interval = IntType(serialize_when_none=False)
workspace_id = StringType(serialize_when_none=False)
workspace_region = StringType(serialize_when_none=False)
workspace_resource_id = StringType(serialize_when_none=False)
class TrafficAnalyticsProperties(Model):
network_watcher_flow_analytics_configuration = ModelType(TrafficAnalyticsConfigurationProperties,
serialize_when_none=False)
class FlowLogFormatType(Model):
json = StringType(serialize_when_none=False)
class FlowLogFormatParameters(Model):
type = ModelType(FlowLogFormatType, serialize_when_none=False)
version = IntType(serialize_when_none=False)
class RetentionPolicyParameters(Model):
days = IntType(serialize_when_none=False)
enabled = BooleanType(serialize_when_none=False)
class FlowLog(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
enable = BooleanType(serialize_when_none=False)
flow_analytics_configuration = ModelType(TrafficAnalyticsProperties, serialize_when_none=False)
format = ModelType(FlowLogFormatParameters, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
retention_policy = ModelType(RetentionPolicyParameters, serialize_when_none=False)
storage_id = StringType(serialize_when_none=False)
target_resource_guid = StringType(serialize_when_none=False)
target_resource_id = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class NetworkInterfaceDnsSettings(Model):
applied_dns_servers = ListType(StringType, serialize_when_none=False)
dns_servers = ListType(StringType, serialize_when_none=False)
internal_dns_name_label = StringType(serialize_when_none=False)
internal_domain_name_suffix = StringType(serialize_when_none=False)
internal_fqdn = StringType(serialize_when_none=False)
class NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties(Model):
fqdns = ListType(StringType, serialize_when_none=False)
group_id = StringType(serialize_when_none=False)
required_member_name = StringType(serialize_when_none=False)
class PublicIPAddressSku(Model):
name = StringType(choices=('Basic', 'Standard'), serialize_when_none=False)
tier = StringType(choices=('Global', 'Regional'), serialize_when_none=False)
class IpTag(Model):
ip_tag_type = StringType(serialize_when_none=False)
tag = StringType(serialize_when_none=False)
class DdosSettings(Model):
ddos_custom_policy = ModelType(SubResource, serialize_when_none=False)
protected_ip = BooleanType(serialize_when_none=False)
protection_coverage = StringType(choices=('Basic', 'Standard'), serialize_when_none=False)
class PublicIPAddressDnsSettings(Model):
domain_name_label = StringType(serialize_when_none=False)
fqdn = StringType(serialize_when_none=False)
reverse_fqdn = StringType(serialize_when_none=False)
class IPConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address = StringType(serialize_when_none=False) # Change to Public IP Address's ID
subnet = StringType(serialize_when_none=False)
class NatGatewaySku(Model):
name = StringType(choices=('Standard', None), serialize_when_none=False)
class NatGateway(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_addresses = ListType(ModelType(SubResource), serialize_when_none=False)
public_ip_prefixes = ListType(ModelType(SubResource), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
subnets = ListType(ModelType(SubResource), serialize_when_none=False)
sku = ModelType(NatGatewaySku, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
zones = ListType(StringType, serialize_when_none=False)
class PublicIPAddress(Model):
etag = StringType(serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
ddos_settings = ModelType(DdosSettings, serialize_when_none=False)
dns_settings = ModelType(PublicIPAddressDnsSettings, serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
ip_address = StringType(serialize_when_none=False)
ip_configuration = ModelType(IPConfiguration, serialize_when_none=False)
ip_tags = ListType(ModelType(IpTag), serialize_when_none=False)
# linked_public_ip_address = ModelType(PublicIPAddress, serialize_when_none=False)
migration_phase = StringType(choices=('Abort', 'Commit', 'Committed', 'None', 'Prepare'), serialize_when_none=False)
nat_gateway = ModelType(NatGateway, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
public_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
sku = ModelType(PublicIPAddressSku, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
zones = ListType(StringType, serialize_when_none=False)
class NetworkInterfaceIPConfiguration(Model): # ip configuration in a network interface
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
private_link_connection_properties = ModelType(NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address = ModelType(PublicIPAddress, serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
virtual_network_taps = ListType(ModelType(SubResource), serialize_when_none=False)
class NetworkSecurityGroup(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(default='-', serialize_when_none=False)
default_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
flow_logs = ListType(ModelType(FlowLog), serialize_when_none=False)
network_interfaces = StringType(serialize_when_none=False) # Change to Network interfaces' Id
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
subnets = ListType(StringType, serialize_when_none=False) # Change to Subnet IDs
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class PrivateLinkServiceConnectionState(Model):
actions_required = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
status = StringType(serialize_when_none=False)
class PrivateLinkServiceConnection(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
group_ids = ListType(StringType, serialize_when_none=False)
private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
private_link_service_id = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
request_message = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
class CustomDnsConfigPropertiesFormat(Model):
fqdn = StringType(serialize_when_none=False)
ip_addresses = ListType(StringType, serialize_when_none=False)
class PrivateEndpointRef(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
custom_dns_configs = ListType(ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False)
manual_private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection),
serialize_when_none=False)
network_interfaces = ListType(StringType(), serialize_when_none=False) # Change to network interfaces id
private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to subnet ID
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class AutoApproval(Model):
subscriptions = ListType(StringType, serialize_when_none=False)
class PrivateLinkServiceIpConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
type = StringType(serialize_when_none=False)
class InboundNatPool(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
backend_port = IntType(serialize_when_none=False)
enable_floating_ip = BooleanType(serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
frontend_ip_configuration = ModelType(SubResource, serialize_when_none=False)
frontend_port_range_end = IntType(serialize_when_none=False)
frontend_port_range_start = IntType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class ApplicationSecurityGroupRef(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class NetworkInterfaceIPConfigurationRef(Model): # ip configuration in a network interface
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
application_security_groups = ListType(ModelType(ApplicationSecurityGroupRef), serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
private_link_connection_properties = ModelType(NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address = StringType(default='', serialize_when_none=False) # Change Public IP Address to id
subnet = StringType(default='', serialize_when_none=False) # Change Subnet to id
virtual_network_taps = ListType(ModelType(SubResource), serialize_when_none=False)
class InboundNatRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
backend_ip_configurations = ListType(ModelType(NetworkInterfaceIPConfigurationRef), serialize_when_none=False)
target_virtual_machine = ListType(StringType, serialize_when_none=False)
backend_port = IntType(serialize_when_none=False)
enable_floating_ip = BooleanType(serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
frontend_ip_configuration = ModelType(SubResource, serialize_when_none=False)
frontend_ip_configuration_display = StringType(serialize_when_none=False)
frontend_port = IntType(serialize_when_none=False)
port_mapping_display = StringType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class LoadBalancingRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
backend_address_pool = ModelType(SubResource, serialize_when_none=False)
backend_address_pool_display = StringType(serialize_when_none=False)
backend_port = IntType(serialize_when_none=False)
disable_outbound_s_nat = BooleanType(serialize_when_none=False)
enable_floating_ip = BooleanType(serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
frontend_ip_configuration = ModelType(SubResource, serialize_when_none=False)
frontend_ip_configuration_display = StringType(serialize_when_none=False)
frontend_port = IntType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
load_distribution = StringType(choices=('Default', 'SourceIP', 'SourceIPProtocol'), serialize_when_none=False)
load_distribution_display = StringType(serialize_when_none=False)
probe = ModelType(SubResource, serialize_when_none=False)
protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class OutboundRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
allocated_outbound_ports = IntType(serialize_when_none=False)
backend_address_pool = ModelType(SubResource, serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
frontend_ip_configurations = ListType(ModelType(SubResource), serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class FrontendIPConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
inbound_nat_pools = ListType(ModelType(InboundNatPool), serialize_when_none=False)
inbound_nat_rules = ListType(ModelType(InboundNatRule), serialize_when_none=False)
load_balancing_rules = ListType(ModelType(LoadBalancingRule), serialize_when_none=False)
outbound_rules = ListType(ModelType(OutboundRule), serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address = StringType(serialize_when_none=False)
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
type = StringType(serialize_when_none=False)
zones = ListType(StringType, serialize_when_none=False)
class PrivateLinkServiceConnectionState(Model):
actions_required = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
status = StringType(serialize_when_none=False)
class PrivateLinkServiceConnectionState(Model):
actions_required = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
status = StringType(serialize_when_none=False)
class PrivateEndpointConnection(Model):
etag = StringType(serialize_when_none=False)
id = StringType()
name = StringType(serialize_when_none=False)
link_identifier = StringType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpointRef)
private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class Visibility(Model):
subscriptions = ListType(StringType, serialize_when_none=False)
class PrivateLinkService(Model):
etag = StringType(serialize_when_none=False)
id = StringType()
name = StringType(serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
alias = StringType(serialize_when_none=False)
auto_approval = ModelType(AutoApproval, serialize_when_none=False)
enable_proxy_protocol = BooleanType(serialize_when_none=False)
fqdns = ListType(StringType, serialize_when_none=False)
ip_configurations = ListType(ModelType(PrivateLinkServiceIpConfiguration), serialize_when_none=False)
loadBalancer_frontend_ip_configurations = ListType(ModelType(FrontendIPConfiguration), serialize_when_none=False)
network_interfaces = ListType(StringType, serialize_when_none=False) # Change to | |
<reponame>gieses/xiRT
"""Module to build the xiRT-network."""
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_addons as tfa
import yaml
from tensorflow.keras import backend as K
from tensorflow.keras import losses
from tensorflow.keras import regularizers, optimizers
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger, TensorBoard, \
ReduceLROnPlateau
from tensorflow.keras.layers import Embedding, GRU, BatchNormalization, \
Input, concatenate, Dropout, Dense, LSTM, Bidirectional, Add, Maximum, Multiply, Average, \
Concatenate
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import plot_model
from tensorflow.python.keras.layers import CuDNNGRU, CuDNNLSTM
from tqdm.keras import TqdmCallback
# pragma: no cover
def loss_ordered(y_true, y_pred): # pragma: not covered
"""
Compute the loss for ordered logistic regression for neural networks.
Args:
y_true: ar-like, observed
y_pred: ar-like, predictions
Returns:
float, loss value
"""
weights = K.cast(K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1))
/ (K.int_shape(y_pred)[1] - 1), dtype='float32') # pragma: no cover
return (1.0 + weights) * losses.categorical_crossentropy(y_true, y_pred) # pragma: no cover
gpus = tf.config.experimental.list_physical_devices('GPU') # pragma: no cover
if gpus: # pragma: no cover
# Currently, memory growth needs to be the same across GPUs
try: # pragma: no cover
for gpu in gpus: # pragma: no cover
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e: # pragma: no cover
print(e) # pragma: no cover
class xiRTNET:
"""
A class used to build, train and modify xiRT for RT prediction.
This class is can be used to build customized networks based on the input parameterization.
Attributes:
----------
TODO
add self parameters here
Methods:
--------
TODO
functions go here
"""
def __init__(self, params, input_dim):
"""
Construct the xiRTNET.
Args:
params: dict, parsed yaml file
input_dim: int, number of input dimenions for the first layer
Returns:
None
"""
self.model = None
self.input_dim = input_dim
self.LSTM_p = params["LSTM"]
self.dense_p = params["dense"]
self.embedding_p = params["embedding"]
self.learning_p = params["learning"]
self.output_p = params["output"]
self.siamese_p = params["siamese"]
self.callback_p = params["callbacks"]
self.tasks = np.concatenate([sorted(params["predictions"]["fractions"]),
sorted(params["predictions"]["continues"])])
self.tasks = [i.lower() for i in self.tasks]
def build_model(self, siamese=False, alphabet=50):
"""
Build xiRTNET.
Function can either be used to build the Siamese network, Psedolinear (concatenated cross
links) or a normal network with a single input for linear peptides.
Args:
siamese: bool, if True siamese architecture is used.
alphabet: int, alphabet size
Returns:
None
"""
inlayer, net = self._build_base_network(alphabet)
if siamese:
# for crosslinks
base_network = Model(inlayer, net, name="siamese")
input_a = Input(shape=self.input_dim)
input_b = Input(shape=self.input_dim)
# init the base network with shared parameters
processed_a = base_network(input_a)
processed_b = base_network(input_b)
# merge the lower and upper part of the network
merge_func = self._add_siamese_connector()
merger_layer = merge_func()([processed_a, processed_b])
net = Model([input_a, input_b], merger_layer)
# create the individual prediction net works
# shortcut to acces output parameters
act_conf = self.output_p
tasks_ar = []
for task_i in self.tasks:
tmp_task = self._add_task_dense_layers(merger_layer, None)
tmp_task = Dense(act_conf[task_i + "-dimension"],
activation=act_conf[task_i + "-activation"],
name=task_i)(tmp_task)
tasks_ar.append(tmp_task)
model_full = Model(inputs=net.input, outputs=tasks_ar)
else:
model_full = self._build_task_network(inlayer, input_meta=None, net=net)
self.model = model_full
def _add_siamese_connector(self):
"""
Add the siamese layer to connect the individual data from the two crosslink branches.
Returns:
layer, merging layer, e.g. add, multiply, average, concatenate, maximum, minimum
"""
if self.siamese_p["merge_type"].lower() == "add":
merge_func = Add
elif self.siamese_p["merge_type"].lower() == "multiply":
merge_func = Multiply
elif self.siamese_p["merge_type"].lower() == "average":
merge_func = Average
elif self.siamese_p["merge_type"].lower() == "concatenate":
merge_func = Concatenate
elif self.siamese_p["merge_type"].lower() == "maximum":
merge_func = Maximum
else:
raise KeyError("Merging operation not supported ({})".
format(self.siamese_p["merge_type"]))
return merge_func
def _build_base_network(self, alphabet_size=50):
"""
Construct a simple network that consists of an input, embedding, and recurrent-layers.
Function can be used to build a scaffold for siamese networks.
Parameter:
alphabet_size: int, alphabet size embedding
Returns:
tuple, (input, network): the input data structure and the network structure from tf 2.0
"""
# init the input layer
inlayer = Input(shape=self.input_dim, name="main_input")
# translate labels into continuous space
net = Embedding(input_dim=alphabet_size,
output_dim=self.embedding_p["length"],
embeddings_initializer="he_normal", name="main_embedding",
input_length=self.input_dim)(inlayer)
# sequence layers (LSTM-type) + batch normalization if in config
for i in np.arange(self.LSTM_p["nlayers"]):
if self.LSTM_p["nlayers"] > 1:
net = self._add_recursive_layer(net, i, name="shared{}_".format(i))
else:
# return only sequence when there are more than 1 recurrent layers
net = self._add_recursive_layer(net, 1, name="shared{}_".format(i))
return inlayer, net
def _build_task_network(self, inlayer, input_meta, net):
"""
Build task specific, dense layers in the xiRT architecture.
Parameters:
inlayer: layer, previous input layers
input_meta: df, meta features
net: keras model, network so far
Returns
model
"""
# if desired the sequence input can be supplemented by precomputed features
if input_meta is not None:
in_meta = Input(shape=(input_meta,))
net_meta = Model(inputs=in_meta, outputs=in_meta)
net = Model(inputs=inlayer, outputs=net)
else:
net_meta = None
# create the individual prediction networks
act_conf = self.output_p
tasks_ar = []
for task_i in self.tasks:
tmp_task = self._add_task_dense_layers(net, None)
tmp_task = Dense(act_conf[task_i + "-dimension"],
activation=act_conf[task_i + "-activation"],
name=task_i)(tmp_task)
tasks_ar.append(tmp_task)
if input_meta is None:
model = Model(inputs=inlayer, outputs=tasks_ar)
else:
model = Model(inputs=[net.input, net_meta.input],
outputs=tasks_ar)
return model
def _add_recursive_layer(self, prev_layer, n_layer=0, name=""):
"""
Add recursive layers to network.
Depending on the parameters adds GRU/LSTM/Cu*** layers to the network architecture.
Regularization parameters are taken from the initiliazed options.
Parameters:
----------
prev_layer: keras model, a base model that should be extended
n_layer: int, current number of layer to add
name: str, name of the layer
Return:
-------
"""
# adjust parameter for 1 or more recurrent layers
return_seqs = True if n_layer == 0 else False
# add regularizer
reg_kernel = self._init_regularizer(regularizer=self.LSTM_p["kernel_regularization"],
reg_value=self.LSTM_p["kernelregularizer_value"])
reg_act = self._init_regularizer(regularizer=self.LSTM_p["activity_regularization"],
reg_value=self.LSTM_p["activityregularizer_value"])
# set the RNN Function to be used
if self.LSTM_p["type"] == "GRU":
f_rnn = GRU
f_name = "GRU"
elif self.LSTM_p["type"] == "LSTM":
f_rnn = LSTM
f_name = "LSTM"
elif self.LSTM_p["type"] == "CuDNNGRU": # pragma: no cover
f_rnn = CuDNNGRU # pragma: no cover
f_name = "CuGRU" # pragma: no cover
elif self.LSTM_p["type"] == "CuDNNLSTM": # pragma: no cover
f_rnn = CuDNNLSTM # pragma: no cover
f_name = "CuLSTM" # pragma: no cover
else:
raise KeyError("Recurrent type option not found ({})".format(
self.LSTM_p["activity_regularization"]))
if self.LSTM_p["bidirectional"]:
# GRU implementations do not support activiation
# activation = self.LSTM_p["activation"], disabled fo rnow
lstm = Bidirectional(f_rnn(self.LSTM_p["units"],
activation=self.LSTM_p["activation"],
activity_regularizer=reg_act,
kernel_regularizer=reg_kernel,
return_sequences=return_seqs),
name=name + "Bi" + f_name)(prev_layer)
else:
lstm = f_rnn(self.LSTM_p["units"],
activation=self.LSTM_p["activation"],
kernel_regularizer=reg_kernel,
return_sequences=return_seqs,
name=name + f_name)(prev_layer)
# add batch normalization
if self.LSTM_p["lstm_bn"]:
lstm = BatchNormalization(name=name + "lstm_bn_" + str(n_layer))(lstm)
return lstm
def _add_task_dense_layers(self, net, net_meta=None):
"""
Add task specific dense layers.
If net_meta is set also adds the meta information as input for each individual layer.
Parameters:
net, keras network model
net_meta: None or df, if df features should be stored there
"""
task = None
for i in np.arange(self.dense_p["nlayers"]):
# the first layer requires special handling, it takes the input from the shared
# sequence layers
if i == 0:
if net_meta is not None:
task = concatenate([net.output, net_meta.output])
task = self._add_dense_layer(i, task)
else:
task = self._add_dense_layer(i, net)
else:
task = self._add_dense_layer(i, task)
return task
def _add_dense_layer(self, idx, prev_layer):
"""
Add a dense layer.
Parameters:
idx: int,
integer indicating the idx'th layer that was added
prev_layer: keras layer,
Functional API object from the definition of the network.
Returns:
layer, a densely connected layer with dropout
"""
# add regularizer
reg_ = self._init_regularizer(self.dense_p["kernel_regularizer"][idx],
self.dense_p["regularizer_value"][idx])
# dense layer
dense = Dense(self.dense_p["neurons"][idx], kernel_regularizer=reg_)(prev_layer)
if self.dense_p["dense_bn"][idx]:
dense = BatchNormalization()(dense)
# this can be used for uncertainty estimation
# dense = Dropout(tmp_conf["dropout"][idx])(dense, training=True)
dense = Dropout(self.dense_p["dropout"][idx])(dense)
return dense
@staticmethod
def _init_regularizer(regularizer, reg_value):
"""
Create a regularizer (l1, l2, l1l2) to be used in an layer of choice.
Args:
regularizer: regularizers, type of regularizer to be used
reg_value: float, lambda value
Returns:
function, regularizer object to be used in model building.
"""
if regularizer == "l1":
regularizer_tmp = regularizers.l1(reg_value)
elif regularizer == "l2":
regularizer_tmp = regularizers.l2(reg_value)
elif regularizer == "l1l2":
regularizer_tmp = regularizers.l1_l2(reg_value, reg_value)
else:
raise KeyError("Regularizer not defined ({})".format(regularizer))
return regularizer_tmp
def export_model_visualization(self, fig_path):
"""
Visualize model architecture in pdf.
Args:
fig_path: str, file location where the model should be stored.
Returns:
None
"""
try:
plot_model(self.model, to_file=fig_path + "xiRT_model.pdf", show_shapes=True,
show_layer_names=True, dpi=300, expand_nested=True)
except ValueError as err:
print("Encountered an ValueError, PDF is still written. ({})".format(err))
def compile(self):
"""
Wrappaer to compile xiRTNETwork.
Loss, Metrics, Weights are all retrieved from the parameter file together with the
optimizer and the network is | |
== u"○"
if self.invert:
success = not success
if success:
return u"%sが【%s】である" % (s, s2)
else:
return u"%sが【%s】でない" % (s, s2)
class BranchGossipContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
self.gossip = self.data.get("gossip", "")
self.spchars = self.data.getbool(".", "spchars", False) # 特殊文字の展開(Wsn.4)
def action(self):
"""ゴシップ分岐コンテント。"""
if self.spchars:
gossip, _namelist = cw.sprite.message.rpl_specialstr(self.gossip, localvariables=True)
else:
gossip = self.gossip
flag = cw.cwpy.ydata.has_gossip(gossip)
return self.get_boolean_index(flag)
def get_status(self, event):
return u"ゴシップ分岐コンテント"
def get_childname(self, child, event):
s = self.gossip
spchars = u"(特殊文字を展開)" if self.spchars else u""
if self.get_contentname(child) == u"○":
return u"ゴシップ『%s』が宿屋にある%s" % (s, spchars)
else:
return u"ゴシップ『%s』が宿屋にない%s" % (s, spchars)
class BranchCompleteStampContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
def action(self):
"""終了シナリオ分岐コンテント。"""
scenario = self.data.get("scenario", "")
flag = cw.cwpy.ydata.has_compstamp(scenario)
return self.get_boolean_index(flag)
def get_status(self, event):
scenario = self.data.get("scenario", "")
if scenario:
return u"終了シナリオ『%s』分岐" % (scenario)
else:
return u"終了シナリオが指定されていません"
def get_childname(self, child, event):
s = self.data.get("scenario", "")
if self.get_contentname(child) == u"○":
return u"シナリオ『%s』が終了済である" % (s)
else:
return u"シナリオ『%s』が終了済ではない" % (s)
class BranchPartyNumberContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
def action(self):
"""パーティ人数分岐コンテント。"""
value = self.data.getint(".", "value", 0)
flag = bool(len(cw.cwpy.get_pcards()) >= value)
return self.get_boolean_index(flag)
def get_status(self, event):
return u"人数 = " + self.data.get("value", "0")
def get_childname(self, child, event):
s = self.data.get("value", "0")
if self.get_contentname(child) == u"○":
return u"パーティ人数が%s人以上" % (s)
else:
return u"パーティ人数が%s人未満" % (s)
class BranchLevelContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
def action(self):
"""レベル分岐コンテント。"""
average = self.data.getbool(".", "average", False)
value = self.data.getint(".", "value", 0)
if average:
pcards = cw.cwpy.get_pcards("unreversed")
if not pcards:
return self.get_boolean_index(False)
level = sum([pcard.level for pcard in pcards]) / len(pcards)
else:
pcard = cw.cwpy.event.get_targetmember("Selected")
if not pcard:
return self.get_boolean_index(False)
level = pcard.level
flag = bool(level >= value)
return self.get_boolean_index(flag)
def get_status(self, event):
return u"レベル分岐コンテント"
def get_childname(self, child, event):
if self.data.getbool(".", "average", False):
s = u"全員の平均値"
else:
s = u"選択中のキャラ"
if self.get_contentname(child) == u"○":
return u"%sがレベル%s以上" % (s, self.data.get("value", ""))
else:
return u"%sがレベル%s未満" % (s, self.data.get("value", ""))
class BranchCouponContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
self.scope = self.data.get("targets")
self.scope, self.someone, self.unreversed = _get_couponscope(self.scope)
# Wsn.1方式(1.50と同様の1クーポン名)
coupon = self.data.get("coupon","")
# Wsn.2方式
self.matchingtype = self.data.get("matchingtype")
names = [coupon] if coupon else []
for e in self.data.getfind("Coupons", raiseerror=False):
if e.text:
names.append(e.text)
self.couponnames = names
self.invert = self.data.getbool(".", "invert", False) # 条件の逆転(Wsn.4)
self.spchars = self.data.getbool(".", "spchars", False) # 特殊文字の展開(Wsn.4)
def action(self):
"""称号存在分岐コンテント。"""
true_index = self.get_boolean_index(True)
false_index = self.get_boolean_index(False)
if not self.couponnames:
return false_index
if self.spchars:
names = []
for name in self.couponnames:
name, _namelist = cw.sprite.message.rpl_specialstr(name, localvariables=True)
names.append(name)
else:
# シャロ―コピー
names = self.couponnames[:]
# 全てに一致?
allmatch = self.matchingtype == "And"
scope, someone, unreversed = self.scope, self.someone, self.unreversed
# 互換動作: 1.20では選択中のメンバがいない状態で
# 選択中のメンバでの所持判定を行うと
# 「誰か一人」のように動作する
if not cw.cwpy.event.has_selectedmember() and scope == "Selected":
if cw.cwpy.sdata and cw.cwpy.sct.lessthan("1.20", cw.cwpy.sdata.get_versionhint()):
scope = "Party"
# 所持判定
targets = cw.cwpy.event.get_targetscope(scope, unreversed)
# BUG: CardWirthでは複数の判定対象が想定される条件
# (「選択中のメンバ」以外)で、全員隠蔽等で
# 判定対象が0人の時に絶対成功する。
# これは誰か一人が失敗した時点で判定がFalseとなって
# 終了といったような処理になっているためと思われる
if len(targets) == 0:
cw.cwpy.event.clear_selectedmember()
return self.get_boolean_index(scope <> "Selected")
return self.get_boolean_index(_has_coupon(targets, names, scope, someone, allmatch, False, invert=self.invert))
def get_status(self, event):
names = self.couponnames
if len(names) > 0 and names[0] <> "":
s = u"」「".join(names)
type = u""
if len(names) > 1:
if self.matchingtype == "And":
type = u"全ての"
else:
type = u"どれか一つの"
if self.invert:
resulttype = u"不所有"
else:
resulttype = u"所有"
spchars = u"(特殊文字を展開)" if self.spchars else u""
return u"称号「%s」の%s%sで分岐%s" % (s, type, resulttype, spchars)
else:
return u"称号が指定されていません"
def get_childname(self, child, event):
names = self.couponnames
scope = self.data.get("targets")
s2 = self.textdict.get(scope.lower(), "")
s = u""
if len(names) > 0 and names[0] <> "":
s = u"」「".join(names)
type = u""
if len(names) > 1:
if self.matchingtype == "And":
type = u"の全て"
else:
type = u"のどれか一つ"
success = self.get_contentname(child) == u"○"
if self.invert:
# 判定条件の反転(Wsn.4)
success = not success
spchars = u"(特殊文字を展開)" if self.spchars else u""
if success:
return u"%sが称号「%s」%sを所有している%s" % (s2, s, type, spchars)
else:
return u"%sが称号「%s」%sを所有していない%s" % (s2, s, type, spchars)
class BranchSelectContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
self._init_values = False
self.targetall = self.data.getbool(".", "targetall", True)
self.method = self.data.getattr(".", "method", "")
if not self.method:
if self.data.getbool(".", "random", False):
self.method = "Random"
else:
self.method = "Manual"
def action(self):
"""メンバ選択分岐コンテント。"""
if self.targetall:
mode = "unreversed"
else:
mode = "active"
index = -1
if self.method == "Random":
pcards = cw.cwpy.get_pcards(mode)
if pcards:
pcard = cw.cwpy.dice.choice(pcards)
cw.cwpy.event.set_selectedmember(pcard)
index = 0
elif self.method == "Valued":
# 評価条件による選択(Wsn.1)
pcard = self.get_valuedmember(mode)
if pcard:
cw.cwpy.event.set_selectedmember(pcard)
index = 0
else:
pcards = cw.cwpy.get_pcards(mode)
mwin = cw.sprite.message.MemberSelectWindow(pcards)
index = cw.cwpy.show_message(mwin)
flag = bool(index == 0)
return self.get_boolean_index(flag)
def get_status(self, event):
return u"選択分岐コンテント"
def get_childname(self, child, event):
self.init_values()
if self.targetall:
s = u"パーティ全員から"
else:
s = u"動けるメンバから"
if self.method == "Random":
s += u"ランダムで "
elif self.method == "Valued":
values = [u"初期値 = %s" % (self.initvalue)]
for key in cw.util.sorted_by_attr(self.coupons.iterkeys()):
values.append(u"%s = %s" % (key, self.coupons[key]))
s += u"評価条件(%s)で" % (", ".join(values))
else:
s += u"手動で "
if self.get_contentname(child) == u"○":
s += u"キャラクターを選択"
else:
if self.method == "Manual":
s += u"の選択をキャンセル"
else:
s += u"の選択に失敗"
return s
class BranchMoneyContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
def action(self):
"""所持金存在分岐コンテント。"""
money = self.data.getint(".", "value", 0)
flag = bool(cw.cwpy.ydata.party.money >= money)
return self.get_boolean_index(flag)
def get_status(self, event):
return u"金額 = " + self.data.get("value", "0")
def get_childname(self, child, event):
if self.get_contentname(child) == u"○":
return self.data.get("value", "0") + u" sp以上所持している"
else:
return self.data.get("value", "0") + u" sp以上所持していない"
class BranchFlagContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
self.flag = self.data.get("flag")
def action(self):
"""フラグ分岐コンテント。"""
diffsc = self.is_differentscenario()
flag = cw.cwpy.sdata.find_flag(self.flag, diffsc, cw.cwpy.event.get_nowrunningevent())
if not flag is None:
index = self.get_boolean_index(flag)
elif self.get_children_num():
# フラグが存在しない場合は
# 常に最初の子コンテントが選ばれる
index = 0
else:
index = cw.IDX_TREEEND
return index
def get_status(self, event):
flag = cw.cwpy.sdata.find_flag(self.flag, self.is_differentscenario(event), event)
if not flag is None:
return u"フラグ『%s』分岐" % (flag.name)
else:
return u"フラグが指定されていません"
def get_childname(self, child, event):
flag = cw.cwpy.sdata.find_flag(self.flag, self.is_differentscenario(event), event)
if flag:
valuename = flag.get_valuename(self.get_contentname(child) == u"○")
return u"%s = %s" % (flag.name, valuename)
else:
return u"フラグが指定されていません"
class BranchStepContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
self.step = self.data.get("step")
self.value = self.data.getint(".", "value", 0)
self.nextlen = self.get_children_num()
def action(self):
"""ステップ上下分岐コンテント。"""
step = cw.cwpy.sdata.find_step(self.step, self.is_differentscenario(), cw.cwpy.event.get_nowrunningevent())
if not step is None:
flag = step.value >= self.value
index = self.get_boolean_index(flag)
elif self.nextlen:
# ステップsが存在しない場合は
# 常に最初の子コンテントが選ばれる
index = 0
else:
index = cw.IDX_TREEEND
return index
def get_status(self, event):
step = cw.cwpy.sdata.find_step(self.step, self.is_differentscenario(event), event)
if not step is None:
return u"ステップ『%s』分岐" % (step.name)
else:
return u"ステップが指定されていません"
def get_childname(self, child, event):
step = cw.cwpy.sdata.find_step(self.step, self.is_differentscenario(event), event)
if not step is None:
valuename = step.get_valuename(self.value)
if self.get_contentname(child) == u"○":
return u"ステップ『%s』が『%s』以上" % (step.name, valuename)
else:
return u"ステップ『%s』が『%s』未満" % (step.name, valuename)
else:
return u"ステップが指定されていません"
class BranchMultiStepContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
self.step = self.data.get("step")
self.nextlen = self.get_children_num()
def action(self):
"""ステップ多岐分岐コンテント。"""
step = cw.cwpy.sdata.find_step(self.step, self.is_differentscenario(), cw.cwpy.event.get_nowrunningevent())
if not step is None:
value = step.value
index = self.get_value_index(value)
elif self.nextlen:
# ステップsが存在しない場合は
# 常に最初の子コンテントが選ばれる
index = 0
else:
index = cw.IDX_TREEEND
return index
def get_status(self, event):
step = cw.cwpy.sdata.find_step(self.step, self.is_differentscenario(event), event)
if not step is None:
return u"ステップ『%s』多岐分岐" % (step.name)
else:
return u"ステップが指定されていません"
def get_childname(self, child, event):
step = cw.cwpy.sdata.find_step(self.step, self.is_differentscenario(event), event)
if not step is None:
try:
value = int(self.get_contentname(child, "Default"))
except:
value = "Default"
if value == "Default":
valuename = u"その他"
else:
valuename = step.get_valuename(value)
return u"%s = %s" % (step.name, valuename)
else:
return u"ステップが指定されていません"
class BranchRandomContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
def action(self):
"""ランダム分岐コンテント。"""
value = self.data.getint(".", "value", 0)
if cw.cwpy.sdata and cw.cwpy.sct.lessthan("1.28", cw.cwpy.sdata.get_versionhint()):
# 互換動作: 1.28以前のバグで、確率分岐の値が+1になる
flag = bool(cw.cwpy.dice.roll(1, 100) <= value+1)
else:
flag = bool(cw.cwpy.dice.roll(1, 100) <= value)
return self.get_boolean_index(flag)
def get_status(self, event):
return u"確率 = %s%%" % (self.data.get("value", "0"))
def get_childname(self, child, event):
if self.get_contentname(child) == u"○":
return self.data.get("value", "") + u" %成功"
else:
return self.data.get("value", "") + u" %失敗"
class BranchAbilityContent(BranchContent):
def __init__(self, data):
BranchContent.__init__(self, data)
self.level = self.data.getint(".", "value", 0)
self.vocation = self.data.get("physical"), self.data.get("mental")
self.targetm = self.data.get("targetm")
self.invert = self.data.getbool(".", "invert", False)
# 対象範囲修正
if self.targetm.endswith("Sleep"):
self.targetm = self.targetm.replace("Sleep", "")
self.sleep = True
else:
self.sleep = False
if self.targetm == "Random":
self.targetm = "Party"
self.someone = True
elif self.targetm == "Party":
self.someone = False
else:
self.someone = True
def action(self):
"""能力判定分岐コンテント。"""
level = self.level
vocation = self.vocation
targetm = self.targetm
sleep = self.sleep
someone = self.someone
# 対象メンバ取得
targets | |
# -*- encoding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An :ref:`Audit <audit_definition>` may be launched several times with the same
settings (:ref:`Goal <goal_definition>`, thresholds, ...). Therefore it makes
sense to save those settings in some sort of Audit preset object, which is
known as an :ref:`Audit Template <audit_template_definition>`.
An :ref:`Audit Template <audit_template_definition>` contains at least the
:ref:`Goal <goal_definition>` of the :ref:`Audit <audit_definition>`.
It may also contain some error handling settings indicating whether:
- :ref:`Watcher Applier <watcher_applier_definition>` stops the
entire operation
- :ref:`Watcher Applier <watcher_applier_definition>` performs a rollback
and how many retries should be attempted before failure occurs (also the latter
can be complex: for example the scenario in which there are many first-time
failures on ultimately successful :ref:`Actions <action_definition>`).
Moreover, an :ref:`Audit Template <audit_template_definition>` may contain some
settings related to the level of automation for the
:ref:`Action Plan <action_plan_definition>` that will be generated by the
:ref:`Audit <audit_definition>`.
A flag will indicate whether the :ref:`Action Plan <action_plan_definition>`
will be launched automatically or will need a manual confirmation from the
:ref:`Administrator <administrator_definition>`.
"""
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from watcher._i18n import _
from watcher.api.controllers import base
from watcher.api.controllers import link
from watcher.api.controllers.v1 import collection
from watcher.api.controllers.v1 import types
from watcher.api.controllers.v1 import utils as api_utils
from watcher.common import context as context_utils
from watcher.common import exception
from watcher.common import policy
from watcher.common import utils as common_utils
from watcher.decision_engine.scope import default
from watcher import objects
class AuditTemplatePostType(wtypes.Base):
_ctx = context_utils.make_context()
name = wtypes.wsattr(wtypes.text, mandatory=True)
"""Name of this audit template"""
description = wtypes.wsattr(wtypes.text, mandatory=False)
"""Short description of this audit template"""
goal = wtypes.wsattr(wtypes.text, mandatory=True)
"""Goal UUID or name of the audit template"""
strategy = wtypes.wsattr(wtypes.text, mandatory=False)
"""Strategy UUID or name of the audit template"""
scope = wtypes.wsattr(types.jsontype, mandatory=False, default=[])
"""Audit Scope"""
def as_audit_template(self):
return AuditTemplate(
name=self.name,
description=self.description,
goal_id=self.goal, # Dirty trick ...
goal=self.goal,
strategy_id=self.strategy, # Dirty trick ...
strategy_uuid=self.strategy,
scope=self.scope,
)
@staticmethod
def validate(audit_template):
available_goals = objects.Goal.list(AuditTemplatePostType._ctx)
available_goal_uuids_map = {g.uuid: g for g in available_goals}
available_goal_names_map = {g.name: g for g in available_goals}
if audit_template.goal in available_goal_uuids_map:
goal = available_goal_uuids_map[audit_template.goal]
elif audit_template.goal in available_goal_names_map:
goal = available_goal_names_map[audit_template.goal]
else:
raise exception.InvalidGoal(goal=audit_template.goal)
common_utils.Draft4Validator(
default.DefaultScope.DEFAULT_SCHEMA).validate(audit_template.scope)
include_host_aggregates = False
exclude_host_aggregates = False
for rule in audit_template.scope:
if 'host_aggregates' in rule:
include_host_aggregates = True
elif 'exclude' in rule:
for resource in rule['exclude']:
if 'host_aggregates' in resource:
exclude_host_aggregates = True
if include_host_aggregates and exclude_host_aggregates:
raise exception.Invalid(
message=_(
"host_aggregates can't be "
"included and excluded together"))
if audit_template.strategy:
available_strategies = objects.Strategy.list(
AuditTemplatePostType._ctx)
available_strategies_map = {
s.uuid: s for s in available_strategies}
if audit_template.strategy not in available_strategies_map:
raise exception.InvalidStrategy(
strategy=audit_template.strategy)
strategy = available_strategies_map[audit_template.strategy]
# Check that the strategy we indicate is actually related to the
# specified goal
if strategy.goal_id != goal.id:
choices = ["'%s' (%s)" % (s.uuid, s.name)
for s in available_strategies]
raise exception.InvalidStrategy(
message=_(
"'%(strategy)s' strategy does relate to the "
"'%(goal)s' goal. Possible choices: %(choices)s")
% dict(strategy=strategy.name, goal=goal.name,
choices=", ".join(choices)))
audit_template.strategy = strategy.uuid
# We force the UUID so that we do not need to query the DB with the
# name afterwards
audit_template.goal = goal.uuid
return audit_template
class AuditTemplatePatchType(types.JsonPatchType):
_ctx = context_utils.make_context()
@staticmethod
def mandatory_attrs():
return []
@staticmethod
def validate(patch):
if patch.path == "/goal" and patch.op != "remove":
AuditTemplatePatchType._validate_goal(patch)
elif patch.path == "/goal" and patch.op == "remove":
raise exception.OperationNotPermitted(
_("Cannot remove 'goal' attribute "
"from an audit template"))
if patch.path == "/strategy":
AuditTemplatePatchType._validate_strategy(patch)
return types.JsonPatchType.validate(patch)
@staticmethod
def _validate_goal(patch):
patch.path = "/goal_id"
goal = patch.value
if goal:
available_goals = objects.Goal.list(
AuditTemplatePatchType._ctx)
available_goal_uuids_map = {g.uuid: g for g in available_goals}
available_goal_names_map = {g.name: g for g in available_goals}
if goal in available_goal_uuids_map:
patch.value = available_goal_uuids_map[goal].id
elif goal in available_goal_names_map:
patch.value = available_goal_names_map[goal].id
else:
raise exception.InvalidGoal(goal=goal)
@staticmethod
def _validate_strategy(patch):
patch.path = "/strategy_id"
strategy = patch.value
if strategy:
available_strategies = objects.Strategy.list(
AuditTemplatePatchType._ctx)
available_strategy_uuids_map = {
s.uuid: s for s in available_strategies}
available_strategy_names_map = {
s.name: s for s in available_strategies}
if strategy in available_strategy_uuids_map:
patch.value = available_strategy_uuids_map[strategy].id
elif strategy in available_strategy_names_map:
patch.value = available_strategy_names_map[strategy].id
else:
raise exception.InvalidStrategy(strategy=strategy)
class AuditTemplate(base.APIBase):
"""API representation of a audit template.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of an
audit template.
"""
_goal_uuid = None
_goal_name = None
_strategy_uuid = None
_strategy_name = None
def _get_goal(self, value):
if value == wtypes.Unset:
return None
goal = None
try:
if (common_utils.is_uuid_like(value) or
common_utils.is_int_like(value)):
goal = objects.Goal.get(
pecan.request.context, value)
else:
goal = objects.Goal.get_by_name(
pecan.request.context, value)
except exception.GoalNotFound:
pass
if goal:
self.goal_id = goal.id
return goal
def _get_strategy(self, value):
if value == wtypes.Unset:
return None
strategy = None
try:
if (common_utils.is_uuid_like(value) or
common_utils.is_int_like(value)):
strategy = objects.Strategy.get(
pecan.request.context, value)
else:
strategy = objects.Strategy.get_by_name(
pecan.request.context, value)
except exception.StrategyNotFound:
pass
if strategy:
self.strategy_id = strategy.id
return strategy
def _get_goal_uuid(self):
return self._goal_uuid
def _set_goal_uuid(self, value):
if value and self._goal_uuid != value:
self._goal_uuid = None
goal = self._get_goal(value)
if goal:
self._goal_uuid = goal.uuid
def _get_strategy_uuid(self):
return self._strategy_uuid
def _set_strategy_uuid(self, value):
if value and self._strategy_uuid != value:
self._strategy_uuid = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_uuid = strategy.uuid
def _get_goal_name(self):
return self._goal_name
def _set_goal_name(self, value):
if value and self._goal_name != value:
self._goal_name = None
goal = self._get_goal(value)
if goal:
self._goal_name = goal.name
def _get_strategy_name(self):
return self._strategy_name
def _set_strategy_name(self, value):
if value and self._strategy_name != value:
self._strategy_name = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_name = strategy.name
uuid = wtypes.wsattr(types.uuid, readonly=True)
"""Unique UUID for this audit template"""
name = wtypes.text
"""Name of this audit template"""
description = wtypes.wsattr(wtypes.text, mandatory=False)
"""Short description of this audit template"""
goal_uuid = wsme.wsproperty(
wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True)
"""Goal UUID the audit template refers to"""
goal_name = wsme.wsproperty(
wtypes.text, _get_goal_name, _set_goal_name, mandatory=False)
"""The name of the goal this audit template refers to"""
strategy_uuid = wsme.wsproperty(
wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False)
"""Strategy UUID the audit template refers to"""
strategy_name = wsme.wsproperty(
wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False)
"""The name of the strategy this audit template refers to"""
audits = wsme.wsattr([link.Link], readonly=True)
"""Links to the collection of audits contained in this audit template"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated audit template links"""
scope = wsme.wsattr(types.jsontype, mandatory=False)
"""Audit Scope"""
def __init__(self, **kwargs):
super(AuditTemplate, self).__init__()
self.fields = []
fields = list(objects.AuditTemplate.fields)
for k in fields:
# Skip fields we do not expose.
if not hasattr(self, k):
continue
self.fields.append(k)
setattr(self, k, kwargs.get(k, wtypes.Unset))
self.fields.append('goal_id')
self.fields.append('strategy_id')
setattr(self, 'strategy_id', kwargs.get('strategy_id', wtypes.Unset))
# goal_uuid & strategy_uuid are not part of
# objects.AuditTemplate.fields because they're API-only attributes.
self.fields.append('goal_uuid')
self.fields.append('goal_name')
self.fields.append('strategy_uuid')
self.fields.append('strategy_name')
setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset))
setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset))
setattr(self, 'strategy_uuid',
kwargs.get('strategy_id', wtypes.Unset))
setattr(self, 'strategy_name',
kwargs.get('strategy_id', wtypes.Unset))
@staticmethod
def _convert_with_links(audit_template, url, expand=True):
if not expand:
audit_template.unset_fields_except(
['uuid', 'name', 'goal_uuid', 'goal_name',
'scope', 'strategy_uuid', 'strategy_name'])
# The numeric ID should not be exposed to
# the user, it's internal only.
audit_template.goal_id = wtypes.Unset
audit_template.strategy_id = wtypes.Unset
audit_template.links = [link.Link.make_link('self', url,
'audit_templates',
audit_template.uuid),
link.Link.make_link('bookmark', url,
'audit_templates',
audit_template.uuid,
bookmark=True)]
return audit_template
@classmethod
def convert_with_links(cls, rpc_audit_template, expand=True):
audit_template = AuditTemplate(**rpc_audit_template.as_dict())
return cls._convert_with_links(audit_template, pecan.request.host_url,
expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
name='My Audit Template',
description='Description of my audit template',
goal_uuid='83e44733-b640-40e2-8d8a-7dd3be7134e6',
strategy_uuid='367d826e-b6a4-4b70-bc44-c3f6fe1c9986',
created_at=datetime.datetime.utcnow(),
deleted_at=None,
updated_at=datetime.datetime.utcnow(),
scope=[],)
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
class AuditTemplateCollection(collection.Collection):
"""API representation of a collection of audit templates."""
audit_templates = [AuditTemplate]
"""A list containing audit templates objects"""
def __init__(self, **kwargs):
super(AuditTemplateCollection, self).__init__()
self._type = 'audit_templates'
@staticmethod
def convert_with_links(rpc_audit_templates, limit, url=None, expand=False,
**kwargs):
at_collection = AuditTemplateCollection()
at_collection.audit_templates = [
AuditTemplate.convert_with_links(p, expand)
for p in rpc_audit_templates]
at_collection.next = at_collection.get_next(limit, url=url, **kwargs)
return at_collection
@classmethod
def sample(cls):
sample = cls()
sample.audit_templates = [AuditTemplate.sample(expand=False)]
return sample
class AuditTemplatesController(rest.RestController):
"""REST controller for AuditTemplates."""
def __init__(self):
super(AuditTemplatesController, self).__init__()
from_audit_templates = False
"""A flag to indicate if | |
__init__(self, *args, **kwargs):
"""
See also
--------
`~proplot.subplots.subplots`, `Axes`
"""
# Set tick length to zero so azimuthal labels are not too offset
# Change default radial axis formatter but keep default theta one
super().__init__(*args, **kwargs)
formatter = axistools.Formatter('auto')
self.yaxis.set_major_formatter(formatter)
self.yaxis.isDefault_majfmt = True
for axis in (self.xaxis, self.yaxis):
axis.set_tick_params(which='both', size=0)
def format(self, *args,
r0=None, theta0=None, thetadir=None,
thetamin=None, thetamax=None, thetalim=None,
rmin=None, rmax=None, rlim=None,
rlabelpos=None, rscale=None, rborder=None,
thetalocator=None, rlocator=None, thetalines=None, rlines=None,
thetaformatter=None, rformatter=None,
thetalabels=None, rlabels=None,
thetalocator_kw=None, rlocator_kw=None,
thetaformatter_kw=None, rformatter_kw=None,
**kwargs):
"""
Calls `Axes.format` and `Axes.context`, formats radial gridline
locations, gridline labels, limits, and more. All ``theta`` arguments
are specified in *degrees*, not radians. The below parameters are
specific to `PolarAxes`.
Parameters
----------
r0 : float, optional
The radial origin.
theta0 : {'N', 'NW', 'W', 'SW', 'S', 'SE', 'E', 'NE'}
The zero azimuth location.
thetadir : {-1, 1, 'clockwise', 'anticlockwise', 'counterclockwise'}, \
optional
The positive azimuth direction. Clockwise corresponds to ``-1``
and anticlockwise corresponds to ``-1``. Default is ``-1``.
thetamin, thetamax : float, optional
The lower and upper azimuthal bounds in degrees. If
``thetamax != thetamin + 360``, this produces a sector plot.
thetalim : (float, float), optional
Specifies `thetamin` and `thetamax` at once.
rmin, rmax : float, optional
The inner and outer radial limits. If ``r0 != rmin``, this
produces an annular plot.
rlim : (float, float), optional
Specifies `rmin` and `rmax` at once.
rborder : bool, optional
Toggles the polar axes border on and off. Visibility of the "inner"
radial spine and "start" and "end" azimuthal spines is controlled
automatically be matplotlib.
thetalocator, rlocator : float or list of float, optional
Used to determine the azimuthal and radial gridline positions.
Passed to the `~proplot.axistools.Locator` constructor.
thetalines, rlines
Aliases for `thetalocator`, `rlocator`.
thetalocator_kw, rlocator_kw : dict-like, optional
The azimuthal and radial locator settings. Passed to
`~proplot.axistools.Locator`.
rlabelpos : float, optional
The azimuth at which radial coordinates are labeled.
thetaformatter, rformatter : formatter spec, optional
Used to determine the azimuthal and radial label format.
Passed to the `~proplot.axistools.Formatter` constructor.
Use ``[]`` or ``'null'`` for no ticks.
thetalabels, rlabels : optional
Aliases for `thetaformatter`, `rformatter`.
thetaformatter_kw, rformatter_kw : dict-like, optional
The azimuthal and radial label formatter settings. Passed to
`~proplot.axistools.Formatter`.
**kwargs
Passed to `Axes.format` and `Axes.context`
See also
--------
:py:obj:`Axes.format`, :py:obj:`Axes.context`
"""
context, kwargs = self.context(**kwargs)
with context:
# Not mutable default args
thetalocator_kw = thetalocator_kw or {}
thetaformatter_kw = thetaformatter_kw or {}
rlocator_kw = rlocator_kw or {}
rformatter_kw = rformatter_kw or {}
# Flexible input
if rlim is not None:
if rmin is not None or rmax is not None:
_warn_proplot(
f'Conflicting keyword args rmin={rmin}, rmax={rmax}, '
f'and rlim={rlim}. Using "rlim".')
rmin, rmax = rlim
if thetalim is not None:
if thetamin is not None or thetamax is not None:
_warn_proplot(
f'Conflicting keyword args thetamin={thetamin}, '
f'thetamax={thetamax}, and thetalim={thetalim}. '
f'Using "thetalim".')
thetamin, thetamax = thetalim
thetalocator = _notNone(
thetalines, thetalocator, None,
names=('thetalines', 'thetalocator'))
thetaformatter = _notNone(
thetalabels, thetaformatter, None,
names=('thetalabels', 'thetaformatter'))
rlocator = _notNone(rlines, rlocator, None,
names=('rlines', 'rlocator'))
rformatter = _notNone(rlabels, rformatter,
None, names=('rlabels', 'rformatter'))
# Special radius settings
if r0 is not None:
self.set_rorigin(r0)
if rlabelpos is not None:
self.set_rlabel_position(rlabelpos)
if rscale is not None:
self.set_rscale(rscale)
if rborder is not None:
self.spines['polar'].set_visible(bool(rborder))
# Special azimuth settings
if theta0 is not None:
self.set_theta_zero_location(theta0)
if thetadir is not None:
self.set_theta_direction(thetadir)
# Iterate
for (
x, r, axis,
min_, max_,
locator, formatter,
locator_kw, formatter_kw,
) in zip(
('x', 'y'), ('theta', 'r'), (self.xaxis, self.yaxis),
(thetamin, rmin), (thetamax, rmax),
(thetalocator, rlocator), (thetaformatter, rformatter),
(thetalocator_kw, rlocator_kw),
(thetaformatter_kw, rformatter_kw)
):
# Axis limits
# Try to use public API where possible
if min_ is not None:
getattr(self, 'set_' + r + 'min')(min_)
else:
min_ = getattr(self, 'get_' + r + 'min')()
if max_ is not None:
getattr(self, 'set_' + r + 'max')(max_)
else:
max_ = getattr(self, 'get_' + r + 'max')()
# Spine settings
kw = rc.fill({
'linewidth': 'axes.linewidth',
'color': 'axes.edgecolor',
})
sides = ('inner', 'polar') if r == 'r' else ('start', 'end')
spines = [self.spines[s] for s in sides]
for spine, side in zip(spines, sides):
spine.update(kw)
# Grid and grid label settings
# NOTE: Not sure if polar lines inherit tick or grid props
kw = rc.fill({
'color': x + 'tick.color',
'labelcolor': 'tick.labelcolor', # new props
'labelsize': 'tick.labelsize',
'grid_color': 'grid.color',
'grid_alpha': 'grid.alpha',
'grid_linewidth': 'grid.linewidth',
'grid_linestyle': 'grid.linestyle',
})
axis.set_tick_params(which='both', **kw)
# Label settings that can't be controlled with set_tick_params
kw = rc.fill({
'fontfamily': 'font.family',
'weight': 'tick.labelweight'
})
for t in axis.get_ticklabels():
t.update(kw)
# Tick locator, which in this case applies to gridlines
# NOTE: Must convert theta locator input to radians, then back
# to degrees.
if locator is not None:
if r == 'theta' and (
not isinstance(locator, (str, mticker.Locator))):
# real axis limts are rad
locator = np.deg2rad(locator)
locator = axistools.Locator(locator, **locator_kw)
locator.set_axis(axis) # this is what set_locator does
grids = np.array(locator())
if r == 'r':
grids = grids[(grids >= min_) & (grids <= max_)]
self.set_rgrids(grids)
else:
grids = np.rad2deg(grids)
grids = grids[(grids >= min_) & (grids <= max_)]
if grids[-1] == min_ + 360: # exclusive if 360 degrees
grids = grids[:-1]
self.set_thetagrids(grids)
# Tick formatter and toggling
if formatter is not None:
formatter = axistools.Formatter(formatter, **formatter_kw)
axis.set_major_formatter(formatter)
# Parent method
super().format(*args, **kwargs)
# Disabled methods suitable only for cartesian axes
_disable = _disable_decorator(
'Invalid plotting method {!r} for polar axes.')
twinx = _disable(Axes.twinx)
twiny = _disable(Axes.twiny)
matshow = _disable(Axes.matshow)
imshow = _disable(Axes.imshow)
spy = _disable(Axes.spy)
hist = _disable(Axes.hist)
hist2d = _disable(Axes.hist2d)
boxplot = _disable(Axes.boxplot)
violinplot = _disable(Axes.violinplot)
step = _disable(Axes.step)
stem = _disable(Axes.stem)
stackplot = _disable(Axes.stackplot)
table = _disable(Axes.table)
eventplot = _disable(Axes.eventplot)
pie = _disable(Axes.pie)
xcorr = _disable(Axes.xcorr)
acorr = _disable(Axes.acorr)
psd = _disable(Axes.psd)
csd = _disable(Axes.csd)
cohere = _disable(Axes.cohere)
specgram = _disable(Axes.specgram)
angle_spectrum = _disable(Axes.angle_spectrum)
phase_spectrum = _disable(Axes.phase_spectrum)
magnitude_spectrum = _disable(Axes.magnitude_spectrum)
def _circle_path(N=100):
"""Return a circle `~matplotlib.path.Path` used as the outline
for polar stereographic, azimuthal equidistant, and Lambert
conformal projections. This was developed from `this cartopy example \
<https://scitools.org.uk/cartopy/docs/v0.15/examples/always_circular_stereo.html>`__.""" # noqa
theta = np.linspace(0, 2 * np.pi, N)
center, radius = [0.5, 0.5], 0.5
verts = np.vstack([np.sin(theta), np.cos(theta)]).T
return mpath.Path(verts * radius + center)
class ProjAxes(Axes):
"""Intermediate class, shared by `GeoAxes` and
`BasemapAxes`. Disables methods that are inappropriate for map
projections and adds `ProjAxes.format`, so that arguments
passed to `Axes.format` are identical for `GeoAxes`
and `BasemapAxes`."""
def __init__(self, *args, **kwargs):
"""
See also
--------
`~proplot.subplots.subplots`, `Axes`, `GeoAxes`, `BasemapAxes`
"""
# Store props that let us dynamically and incrementally modify
# line locations and settings like with Cartesian axes
self._boundinglat = None
self._latmax = None
self._latlines = None
self._lonlines = None
self._lonlines_values = None
self._latlines_values = None
self._lonlines_labels = None
self._latlines_labels = None
super().__init__(*args, **kwargs)
def format(self, *,
lonlim=None, latlim=None, boundinglat=None, grid=None,
lonlines=None, lonlocator=None,
latlines=None, latlocator=None, latmax=None,
labels=None, latlabels=None, lonlabels=None,
patch_kw=None, **kwargs,
):
"""
Calls `Axes.format` and `Axes.context`, formats the meridian
and parallel labels, longitude and latitude map limits, geographic
features, and more.
Parameters
----------
lonlim, latlim : (float, float), optional
Longitude and latitude limits of projection, applied
with `~cartopy.mpl.geoaxes.GeoAxes.set_extent`.
For cartopy axes only.
boundinglat : float, optional
The edge latitude for the circle bounding North Pole and
South Pole-centered projections. For cartopy axes only.
grid : bool, optional
Toggles meridian and parallel gridlines on and off. Default is
:rc:`geogrid`.
lonlines, latlines : float or list of float, optional
If float, indicates the *spacing* of meridian and parallel
gridlines. Otherwise, must be a list of floats indicating specific
meridian and parallel gridlines to draw.
lonlocator, latlocator : optional
Aliases for `lonlines`, `latlines`.
latmax : float, optional
The maximum absolute latitude for meridian gridlines. Default is
:rc:`geogrid.latmax`.
labels : bool, optional
Toggles meridian and parallel gridline labels on and off. Default
is :rc:`geogrid.labels`.
lonlabels, latlabels
Whether to label longitudes and latitudes, and on which sides
of the map. There are four different options:
1. Boolean ``True``. Indicates left side for latitudes,
bottom for longitudes.
2. A string, e.g. ``'lr'`` | |
<filename>pynrm/aoinstrument.py
# -*- coding: utf-8 -*-
"""Useful utilities that are not telescope-dependent.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import astropy.io.fits as pyfits
import csv
class AOInstrument:
"""The AOInstrument Class
"""
#A blank dictionary on startup.
csv_dict = dict()
#Blank reduction, cube. and data directories on startup.
rdir = ''
ddir = ''
cdir = ''
def read_summary_csv(self, filename='datainfo.csv',ddir=''):
"""Read the data from local file into a csv_dict structure.
Notes
-----
At the moment, all data types are strings. It would work better if
the second line of the CSV file was the data type.
"""
#Allow over-riding default data directory.
if (ddir == ''):
ddir = self.ddir
try:
f = open(ddir + filename)
except:
print ("Error: file doesn't exist " + ddir + filename)
raise UserWarning
r = csv.DictReader(f, delimiter=',')
#Read first line to initiate the dictionary
line = next(r)
d = dict()
for k in line.keys():
d[k] = [line[k]]
#Read the rest of the file
for line in r:
for k in line.keys():
d[k].append(line[k])
#Convert to numpy arrays
for k in line.keys():
d[k] = np.array(d[k])
f.close()
self.csv_dict = d
def make_all_darks(self, ddir='', rdir=''):
"""Make all darks in a current directory. This skeleton routine assumes that
keywords "SHRNAME", "NAXIS1" and "NAXIS2" exist.
"""
#Allow over-riding default reduction and data directories.
if (rdir == ''):
rdir = self.rdir
if (ddir == ''):
ddir = self.ddir
darks = np.where(np.array(n2.csv_dict['SHRNAME']) == 'closed')[0]
#Now we need to find unique values of the following:
#NAXIS1, NAXIS2 (plus for nirc2... ITIME, COADDS, MULTISAM)
codes = []
for d in darks:
codes.append(self.csv_dict['NAXIS1'][d] + self.csv_dict['NAXIS2'][d])
codes = np.array(codes)
#For each unique code, find all dark files and call make_dark.
for c in np.unique(codes):
w = np.where(codes == c)[0]
#Only bother if there are at least 3 files.
if (len(w) >= 3):
files = [ddir + self.csv_dict['FILENAME'][darks[ww]] for ww in w]
self.make_dark(files, rdir=rdir)
def make_dark(self,in_files, out_file='dark.fits', rdir=''):
"""This is a basic method to make a dark from several files. It is
generally expected to be over-ridden in derived classes.
Parameters
----------
in_files : array_like (dtype=string). A list if input filenames.
out_file: string
The file to write to.
"""
#Allow over-riding default reduction directory.
if (rdir == ''):
rdir = self.rdir
nf = len(in_files)
in_fits = pyfits.open(in_files[0], ignore_missing_end=True)
adark = in_fits[0].data
in_fits.close()
s = adark.shape
darks = np.zeros((nf,s[0],s[1]))
for i in range(nf):
#Read in the data, linearizing as a matter of principle, and also because
#this routine is used for
in_fits = pyfits.open(in_files[i], ignore_missing_end=True)
adark = in_fits[0].data
in_fits.close()
darks[i,:,:] = adark
med_dark = np.median(darks, axis=0)
pyfits.writeto(rdir + out_file, med_dark, output_verify='ignore')
def info_from_header(self, h):
"""Find important information from the fits header and store in a common format
Prototype function only - to be over-ridden in derived classes.
Parameters
----------
h: The fits header
Returns
-------
(dark_file, flat_file, filter, wave, rad_pixel)
"""
try: filter = h['FILTER']
except:
print ("No FILTER in header")
try: wave = h['WAVE']
except:
print ("No WAVE in header")
try: rad_pix = h['RAD_PIX']
except:
print ("No RAD_PIX in header")
try: targname = h['TARGET']
except:
print ("No TARGET in header")
return {'dark_file':'dark.fits', 'flat_file':'flat.fits', 'filter':filter,
'wave':wave, 'rad_pixel':rad_pixel,'targname':targname,'pupil_type':'circ','pupil_params':dict()}
def mod2pi(self,angle):
""" Convert an angle to the range (-pi,pi)
Parameters
----------
angle: float
input angle
Returns
-------
angle: float
output angle after the mod2pi operation
"""
return np.remainder(angle + np.pi,2*np.pi) - np.pi
def make_flat(self,in_files, rdir='', out_file='', dark_file='', wave=0.0):
"""Create a flat frame and save to a fits file,
with an attached bad pixel map as the first fits extension.
Parameters
----------
in_files : array_like (dtype=string). A list if input filenames.
dark_file: string
The dark file, previously created with make_dark
out_file: string
The file to write to
rdir: Over-writing the default reduction directory.
Returns
-------
Nothing.
"""
#Allow over-riding default reduction directory.
if (rdir == ''):
rdir = self.rdir
#Create a default flat filename from the input files
try:
in_fits = pyfits.open(in_files[0], ignore_missing_end=True)
except:
in_fits = pyfits.open(in_files[0]+'.gz', ignore_missing_end=True)
h = in_fits[0].header
hinfo = self.info_from_header(h)
if (out_file == ''):
out_file = hinfo['flat_file']
#We use the make_dark function to average our flats. NB we don't destripe.
self.make_dark(in_files, rdir=rdir, out_file=out_file, subtract_median=False, destripe=False, med_threshold=15.0)
#Now extract the key parts.
h = pyfits.getheader(rdir + out_file)
#Add a wavelength to the header
h['WAVE'] = hinfo['wave']
if (dark_file ==''):
dark_file=self.get_dark_filename(h)
#FIXME: A hack if get_dark_filename returns a non existant file.
#FIXME: This should be incorporated into get_dark_filename if it is necessary, or
# otherwise give an error.
if not os.path.isfile(rdir + dark_file):
allDarks = [f for f in os.listdir(rdir) if 'dark' in f]
if 'EXPTIME' in h.keys():
exptime = h['EXPTIME']*100
elif 'ITIME' in h.keys():
exptime = h['ITIME']*100
allTimes = []
for ii in range(len(allDarks)):
count = 0
for jj in range(len(allDarks[ii])):
if allDarks[ii][jj]=='_':
count+=1
if count==2:
index = jj+1
allTimes.append(int(allDarks[ii][index:allDarks[ii].find('.fits')]))
allTimes = np.array(allTimes)
diffTimes = abs(allTimes-exptime)
dark_file = allDarks[np.argmin(diffTimes)]
#Subtract the dark and normalise the flat.
flat = pyfits.getdata(rdir + out_file,0) - pyfits.getdata(rdir + dark_file,0)
bad = np.logical_or(pyfits.getdata(rdir + out_file,1),pyfits.getdata(rdir + dark_file,1))
flat[np.where(bad)] = np.median(flat)
flat /= np.median(flat)
#Write this to a file
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(flat,h))
hl.append(pyfits.ImageHDU(np.uint8(bad)))
hl.writeto(rdir + out_file,clobber=True)
plt.figure(1)
plt.imshow(flat, cmap=cm.gray, interpolation='nearest')
plt.title('Flat')
def fix_bad_pixels(self,im,bad,fmask):
"""Fix the bad pixels, using a Fourier technique that adapts to the
sampling of each particular pupil/filter combination.
Parameters
----------
im : (N,N) array (dtype=float)
An image, already chopped to the subarr x subarr size.
bad: (N,N) array (dtype=int)
A bad pixel map
fmask: (N,N) array (dtype=int)
A mask containing the region in the Fourier plane where there is
no expected signal.
Returns
-------
The image with bad pixel values optimally corrected.
"""
# fmask defines the null space of the image Fourier transform.
wft = np.where(fmask)
# Where the bad pixel array is non-zero.
w = np.where(bad)
# The bad matrix should map the bad pixels to the real and imaginary
# parts of the null space of the image Fourier transform
badmat = np.zeros((len(w[0]),len(wft[0])*2))
#print("Bad matrix shape: " + str(badmat.shape))
# Create a uv grid. Math should be correct here, but the second vector could be
# 2*np.pi*np.arange(im.shape[0])/float(im.shape[0]) and it would still work.
xy = np.meshgrid(2*np.pi*np.arange(im.shape[1]//2 + 1)/float(im.shape[1]),
2*np.pi*(((np.arange(im.shape[0]) + im.shape[0]//2) % im.shape[0]) - im.shape[0]//2)/float(im.shape[0]))
for i in range(len(w[0])):
# Avoiding the fft is marginally faster here...
bft = np.exp(-1j*(w[0][i]*xy[1] + w[1][i]*xy[0]))
badmat[i,:] = np.append(bft[wft].real, bft[wft].imag)
#A dodgy pseudo-inverse that needs an "invert" is faster than the la.pinv function
#Unless things are really screwed, the matrix shouldn't be singular.
hb = np.transpose(np.conj(badmat))
ibadmat = np.dot(hb,np.linalg.inv(np.dot(badmat,hb)))
#Now find the image Fourier transform on the "zero" region in the Fourier plane
#To minimise numerical errors, set the bad pixels to zero at the start.
im[w]=0
ftimz = (np.fft.rfft2(im))[wft]
# Now compute the bad pixel corrections. (NB a sanity check here is
# that the imaginary part really is 0)
addit = -np.real(np.dot(np.append(ftimz.real, ftimz.imag),ibadmat))
#FIXIT
#We would rather use linalg.solve than an inversion!
#addit2 =
#import pdb; pdb.set_trace()
# ibadmat = np.solve(
# plt.clf()
# plt.plot(np.real(np.dot(ftimz,ibadmat)), np.imag(np.dot(ftimz,ibadmat)))
# raise UserWarning
im[w] += addit
return im
def regrid_fft(self,im,new_shape, fmask=[]):
"""Regrid onto a larger number of pixels using an fft. This is optimal
for Nyquist sampled data.
Parameters
----------
im: array
The input image.
new_shape: (new_y,new_x)
The new shape
Notes
------
TODO: This should work with an arbitrary number of dimensions
"""
ftim = np.fft.rfft2(im)
if len(fmask) > 0:
ftim[np.where(fmask)] = 0
new_ftim = np.zeros((new_shape[0], new_shape[1]/2 + 1),dtype='complex')
new_ftim[0:ftim.shape[0]/2,0:ftim.shape[1]] = \
ftim[0:ftim.shape[0]/2,0:ftim.shape[1]]
new_ftim[new_shape[0]-ftim.shape[0]/2:,0:ftim.shape[1]] = \
ftim[ftim.shape[0]/2:,0:ftim.shape[1]]
return np.fft.irfft2(new_ftim)
def hexagon(self, dim, width):
"""This function creates a hexagon.
Parameters
----------
dim: int
Size of the 2D array
width: int
flat-to-flat width of the hexagon
Returns
-------
pupil: float array (sz,sz)
2D array hexagonal pupil mask
"""
x = np.arange(dim)-dim/2.0
xy = np.meshgrid(x,x)
xx = xy[1]
yy = xy[0]
w = np.where( (yy | |
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.hybrid_shape_interfaces.hybrid_shape_circle import HybridShapeCircle
from pycatia.in_interfaces.reference import Reference
class HybridShapeCircleBitangentPoint(HybridShapeCircle):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| CATGSMIDLItf.HybridShapeCircle
| HybridShapeCircleBitangentPoint
|
| Represents the hybrid shape circle object defined using a point and tangent to
| two curves.
| Role: To access the data of the hybrid shape circle object.
|
| This data includes:
|
| The circle passing point
| The two curves to which the circle is tangent
| The surface that supports the circle
| The orientation of each curve
|
| Use the CATIAHybridShapeFactory to create a HybridShapeCircleBitangentPoint
| object.
|
| See also:
| HybridShapeFactory
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_circle_bitangent_point = com_object
@property
def begin_of_circle(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property BeginOfCircle() As long
|
| Return or Set the number of the beginning curve of the circle. This
| parameter is used to stabilize the resulting circle
|
| Example:
|
| This example set the beginning wire index of
| the hybShpcircle hybrid shape circle
|
|
| hybShpcircle.BeginOfCircle = 1
:return: int
:rtype: int
"""
return self.hybrid_shape_circle_bitangent_point.BeginOfCircle
@begin_of_circle.setter
def begin_of_circle(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_circle_bitangent_point.BeginOfCircle = value
@property
def curve1(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Curve1() As Reference
|
| Returns or sets the first curve to which the circle is or will be
| tangent.
| Sub-element(s) supported (see Boundary object): TriDimFeatEdge or
| BiDimFeatEdge.
|
| Example:
| This example retrieves in HybShpCircleFirstCurve the first curve to
| which the HybShpCircle hybrid shape circle is tangent.
|
| Dim HybShpCircleFirstCurve As Reference
| HybShpCircleFirstCurve = HybShpCircle.Curve1
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_circle_bitangent_point.Curve1)
@curve1.setter
def curve1(self, reference_curve: Reference):
"""
:param Reference reference_curve:
"""
self.hybrid_shape_circle_bitangent_point.Curve1 = reference_curve.com_object
@property
def curve2(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Curve2() As Reference
|
| Returns or sets the second curve to which the circle is or will be
| tangent.
| Sub-element(s) supported (see Boundary object): TriDimFeatEdge or
| BiDimFeatEdge.
|
| Example:
| This example sets the second curve to which the HybShpCircle hybrid
| shape circle will be tangent to Crv5.
|
| HybShpCircle.Curve2 Crv5
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_circle_bitangent_point.Curve2)
@curve2.setter
def curve2(self, reference_curve: Reference):
"""
:param Reference reference_curve:
"""
self.hybrid_shape_circle_bitangent_point.Curve2 = reference_curve.com_object
@property
def discrimination_index(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property DiscriminationIndex() As long
|
| Return or set the discrimination index of the current circle. Several
| resulting solutions produced by the operator can be same oriented regarding to
| the input wire bodies. In such a case, they are sorted in order to distinguish
| them. The Sequence FirstOrientation - SecondOrientation - DiscriminationIndex
| allows you to identify a unique one-domain solution.
|
| Example:
|
| This example set the discrimination index of
| the hybShpcircle hybrid shape circle
|
|
| hybShpcircle.DiscriminationIndex = 2
:return: int
:rtype: int
"""
return self.hybrid_shape_circle_bitangent_point.DiscriminationIndex
@discrimination_index.setter
def discrimination_index(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_circle_bitangent_point.DiscriminationIndex = value
@property
def orientation1(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Orientation1() As long
|
| Returns or sets the orientation of the first curve to which the circle is
| tangent.
| Role: The orientation of the first curve determines the side of this curve
| taken into account to find the point where the circle is tangent to the curve.
| This side is determined by the cross product of the normal to the support and a
| tangent to the curve oriented using the curve orientation.
| Legal values: 1 to state that the side of the curve to be taken into
| account is the side shown by the vector resulting from this cross product, and
| -1 otherwise.
|
| Example:
| This example sets the orientation of the first curve to which the
| HybShpCircle hybrid shape circle is tangent to
| reverse.
|
| HybShpCircle.Orientation1 -1
:return: int
:rtype: int
"""
return self.hybrid_shape_circle_bitangent_point.Orientation1
@orientation1.setter
def orientation1(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_circle_bitangent_point.Orientation1 = value
@property
def orientation2(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Orientation2() As long
|
| Returns or sets the orientation of the second curve to which the circle is
| tangent.
| Role: The orientation of the second curve determines the side of this curve
| taken into account to find the point where the circle is tangent to the curve.
| This side is determined by the cross product of the normal to the support and a
| tangent to the curve oriented using the curve orientation.
| Legal values: 1 to state that the side of the curve to be taken into
| account is the side shown by the vector resulting from this cross product, and
| -1 otherwise.
|
| Example:
| This example retrieves in HybShpCircleOrientation the orientation of
| the second curve to which the HybShpCircle hybrid shape circle is
| tangent.
|
| HybShpCircleOrientation = HybShpCircle.Orientation2
:return: int
:rtype: int
"""
return self.hybrid_shape_circle_bitangent_point.Orientation2
@orientation2.setter
def orientation2(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_circle_bitangent_point.Orientation2 = value
@property
def pt(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Pt() As Reference
|
| Returns or sets the circle passing point. This point must lie on second
| curve.
| Sub-element(s) supported (see Boundary object): Vertex.
|
| Example:
| This example retrieves the passing point of the HybShpCircle hybrid
| shape circle in HybShpCirclePassingPoint point.
|
| Dim HybShpCirclePassingPoint As Reference
| Set HybShpCirclePassingPoint = HybShpCircle.Pt
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_circle_bitangent_point.Pt)
@pt.setter
def pt(self, reference_point: Reference):
"""
:param Reference reference_point:
"""
self.hybrid_shape_circle_bitangent_point.Pt = reference_point.com_object
@property
def support(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Support() As Reference
|
| Returns or sets the circle support surface.
| Sub-element(s) supported (see Boundary object): Face.
|
| Example:
| This example retrieves in HybShpCircleSupportSurf the support surface
| of the HybShpCircle hybrid shape circle.
|
| Dim HybShpCircleSupportSurf As Reference
| HybShpCircleSupportSurf = HybShpCircle.Support
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_circle_bitangent_point.Support)
@support.setter
def support(self, reference_support: Reference):
"""
:param Reference reference_support:
"""
self.hybrid_shape_circle_bitangent_point.Support = reference_support.com_object
@property
def tangent_orientation1(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property TangentOrientation1() As long
|
| Returns or sets the tangent orientation of the circle first reference
| element. compared to the circle itself
|
| Example:
|
| This example retrieves the tangent orientation of first reference
| element of
| the hybShpcircle hybrid shape circle in firstOrient.
|
|
| Dim firstOrient As long
| firstOrient = hybShpcircle.FirstTangentOrientation
:return: int
:rtype: int
"""
return self.hybrid_shape_circle_bitangent_point.TangentOrientation1
| |
<reponame>jpapadakis/gdal
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test /vsizip/vsimem/
# Author: <NAME> <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2010-2014, <NAME> <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import random
import gdaltest
from osgeo import gdal
import pytest
###############################################################################
# Test writing a ZIP with multiple files and directories
def test_vsizip_1():
# We can keep the handle open during all the ZIP writing
hZIP = gdal.VSIFOpenL("/vsizip/vsimem/test.zip", "wb")
assert hZIP is not None, 'fail 1'
# One way to create a directory
f = gdal.VSIFOpenL("/vsizip/vsimem/test.zip/subdir2/", "wb")
assert f is not None, 'fail 2'
gdal.VSIFCloseL(f)
# A more natural one
gdal.Mkdir("/vsizip/vsimem/test.zip/subdir1", 0)
# Create 1st file
f2 = gdal.VSIFOpenL("/vsizip/vsimem/test.zip/subdir3/abcd", "wb")
assert f2 is not None, 'fail 3'
gdal.VSIFWriteL("abcd", 1, 4, f2)
gdal.VSIFCloseL(f2)
# Test that we cannot read a zip file being written
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
f = gdal.VSIFOpenL("/vsizip/vsimem/test.zip/subdir3/abcd", "rb")
gdal.PopErrorHandler()
assert gdal.GetLastErrorMsg() == 'Cannot read a zip file being written', \
'expected error'
assert f is None, 'should not have been successful 1'
# Create 2nd file
f3 = gdal.VSIFOpenL("/vsizip/vsimem/test.zip/subdir3/efghi", "wb")
assert f3 is not None, 'fail 4'
gdal.VSIFWriteL("efghi", 1, 5, f3)
# Try creating a 3d file
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
f4 = gdal.VSIFOpenL("/vsizip/vsimem/test.zip/that_wont_work", "wb")
gdal.PopErrorHandler()
assert gdal.GetLastErrorMsg() == 'Cannot create that_wont_work while another file is being written in the .zip', \
'expected error'
assert f4 is None, 'should not have been successful 2'
gdal.VSIFCloseL(f3)
# Now we can close the main handle
gdal.VSIFCloseL(hZIP)
# ERROR 6: Support only 1 file in archive file /vsimem/test.zip when no explicit in-archive filename is specified
gdal.ErrorReset()
with gdaltest.error_handler():
f = gdal.VSIFOpenL('/vsizip/vsimem/test.zip', 'rb')
if f is not None:
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() != '', 'expected error'
f = gdal.VSIFOpenL("/vsizip/vsimem/test.zip/subdir3/abcd", "rb")
assert f is not None, 'fail 5'
data = gdal.VSIFReadL(1, 4, f)
gdal.VSIFCloseL(f)
assert data.decode('ASCII') == 'abcd'
# Test alternate uri syntax
gdal.Rename("/vsimem/test.zip", "/vsimem/test.xxx")
f = gdal.VSIFOpenL("/vsizip/{/vsimem/test.xxx}/subdir3/abcd", "rb")
assert f is not None
data = gdal.VSIFReadL(1, 4, f)
gdal.VSIFCloseL(f)
assert data.decode('ASCII') == 'abcd'
# With a trailing slash
f = gdal.VSIFOpenL("/vsizip/{/vsimem/test.xxx}/subdir3/abcd/", "rb")
assert f is not None
gdal.VSIFCloseL(f)
# Test ReadDir()
assert len(gdal.ReadDir("/vsizip/{/vsimem/test.xxx}")) == 3
# Unbalanced curls
f = gdal.VSIFOpenL("/vsizip/{/vsimem/test.xxx", "rb")
assert f is None
# Non existing mainfile
f = gdal.VSIFOpenL("/vsizip/{/vsimem/test.xxx}/bla", "rb")
assert f is None
# Non existing subfile
f = gdal.VSIFOpenL("/vsizip/{/vsimem/test.zzz}/bla", "rb")
assert f is None
# Wrong syntax
f = gdal.VSIFOpenL("/vsizip/{/vsimem/test.xxx}.aux.xml", "rb")
assert f is None
# Test nested { { } }
hZIP = gdal.VSIFOpenL("/vsizip/{/vsimem/zipinzip.yyy}", "wb")
assert hZIP is not None, 'fail 1'
f = gdal.VSIFOpenL("/vsizip/{/vsimem/zipinzip.yyy}/test.xxx", "wb")
f_src = gdal.VSIFOpenL("/vsimem/test.xxx", "rb")
data = gdal.VSIFReadL(1, 10000, f_src)
gdal.VSIFCloseL(f_src)
gdal.VSIFWriteL(data, 1, len(data), f)
gdal.VSIFCloseL(f)
gdal.VSIFCloseL(hZIP)
f = gdal.VSIFOpenL("/vsizip/{/vsizip/{/vsimem/zipinzip.yyy}/test.xxx}/subdir3/abcd/", "rb")
assert f is not None
data = gdal.VSIFReadL(1, 4, f)
gdal.VSIFCloseL(f)
assert data.decode('ASCII') == 'abcd'
gdal.Unlink("/vsimem/test.xxx")
gdal.Unlink("/vsimem/zipinzip.yyy")
# Test VSIStatL on a non existing file
assert gdal.VSIStatL('/vsizip//vsimem/foo.zip') is None
# Test ReadDir on a non existing file
assert gdal.ReadDir('/vsizip//vsimem/foo.zip') is None
###############################################################################
# Test writing 2 files in the ZIP by closing it completely between the 2
def test_vsizip_2():
zip_name = '/vsimem/test2.zip'
fmain = gdal.VSIFOpenL("/vsizip/" + zip_name + "/foo.bar", "wb")
assert fmain is not None, 'fail 1'
gdal.VSIFWriteL("12345", 1, 5, fmain)
gdal.VSIFCloseL(fmain)
content = gdal.ReadDir("/vsizip/" + zip_name)
assert content == ['foo.bar'], 'bad content 1'
# Now append a second file
fmain = gdal.VSIFOpenL("/vsizip/" + zip_name + "/bar.baz", "wb")
assert fmain is not None, 'fail 2'
gdal.VSIFWriteL("67890", 1, 5, fmain)
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
content = gdal.ReadDir("/vsizip/" + zip_name)
gdal.PopErrorHandler()
assert gdal.GetLastErrorMsg() == 'Cannot read a zip file being written', \
'expected error'
assert content is None, 'bad content 2'
gdal.VSIFCloseL(fmain)
content = gdal.ReadDir("/vsizip/" + zip_name)
assert content == ['foo.bar', 'bar.baz'], 'bad content 3'
fmain = gdal.VSIFOpenL("/vsizip/" + zip_name + "/foo.bar", "rb")
assert fmain is not None, 'fail 3'
data = gdal.VSIFReadL(1, 5, fmain)
gdal.VSIFCloseL(fmain)
assert data.decode('ASCII') == '12345'
fmain = gdal.VSIFOpenL("/vsizip/" + zip_name + "/bar.baz", "rb")
assert fmain is not None, 'fail 4'
data = gdal.VSIFReadL(1, 5, fmain)
gdal.VSIFCloseL(fmain)
assert data.decode('ASCII') == '67890'
gdal.Unlink(zip_name)
###############################################################################
# Test opening in write mode a file inside a zip archive whose content has been listed before (testcase for fix of r22625)
def test_vsizip_3():
fmain = gdal.VSIFOpenL("/vsizip/vsimem/test3.zip", "wb")
f = gdal.VSIFOpenL("/vsizip/vsimem/test3.zip/foo", "wb")
gdal.VSIFWriteL("foo", 1, 3, f)
gdal.VSIFCloseL(f)
f = gdal.VSIFOpenL("/vsizip/vsimem/test3.zip/bar", "wb")
gdal.VSIFWriteL("bar", 1, 3, f)
gdal.VSIFCloseL(f)
gdal.VSIFCloseL(fmain)
gdal.ReadDir("/vsizip/vsimem/test3.zip")
f = gdal.VSIFOpenL("/vsizip/vsimem/test3.zip/baz", "wb")
gdal.VSIFWriteL("baz", 1, 3, f)
gdal.VSIFCloseL(f)
res = gdal.ReadDir("/vsizip/vsimem/test3.zip")
gdal.Unlink("/vsimem/test3.zip")
assert res == ['foo', 'bar', 'baz']
###############################################################################
# Test ReadRecursive on valid zip
def test_vsizip_4():
# read recursive and validate content
res = gdal.ReadDirRecursive("/vsizip/data/testzip.zip")
assert res is not None, 'fail read'
assert (res == ['subdir/', 'subdir/subdir/', 'subdir/subdir/uint16.tif',
'subdir/subdir/test_rpc.txt', 'subdir/test_rpc.txt',
'test_rpc.txt', 'uint16.tif']), 'bad content'
###############################################################################
# Test ReadRecursive on deep zip
def test_vsizip_5():
# make file in memory
fmain = gdal.VSIFOpenL('/vsizip/vsimem/bigdepthzip.zip', 'wb')
assert fmain is not None
filename = "a" + "/a" * 1000
finside = gdal.VSIFOpenL('/vsizip/vsimem/bigdepthzip.zip/' + filename, 'wb')
assert finside is not None
gdal.VSIFCloseL(finside)
gdal.VSIFCloseL(fmain)
# read recursive and validate content
res = gdal.ReadDirRecursive("/vsizip/vsimem/bigdepthzip.zip")
assert res is not None, 'fail read'
assert len(res) == 1001, ('wrong size: ' + str(len(res)))
assert res[10] == 'a/a/a/a/a/a/a/a/a/a/a/', ('bad content: ' + res[10])
gdal.Unlink("/vsimem/bigdepthzip.zip")
###############################################################################
# Test writing 2 files with same name in a ZIP (#4785)
def test_vsizip_6():
# Maintain ZIP file opened
fmain = gdal.VSIFOpenL("/vsizip/vsimem/test6.zip", "wb")
f = gdal.VSIFOpenL("/vsizip/vsimem/test6.zip/foo.bar", "wb")
assert f is not None
gdal.VSIFWriteL("12345", 1, 5, f)
gdal.VSIFCloseL(f)
f = None
gdal.PushErrorHandler('CPLQuietErrorHandler')
f = gdal.VSIFOpenL("/vsizip/vsimem/test6.zip/foo.bar", "wb")
gdal.PopErrorHandler()
if f is not None:
gdal.VSIFCloseL(f)
pytest.fail()
gdal.VSIFCloseL(fmain)
fmain = None
gdal.Unlink("/vsimem/test6.zip")
# Now close it each time
f = gdal.VSIFOpenL("/vsizip/vsimem/test6.zip/foo.bar", "wb")
assert f is not None
gdal.VSIFWriteL("12345", 1, 5, f)
gdal.VSIFCloseL(f)
f = None
gdal.PushErrorHandler('CPLQuietErrorHandler')
f = gdal.VSIFOpenL("/vsizip/vsimem/test6.zip/foo.bar", "wb")
gdal.PopErrorHandler()
if f is not None:
gdal.VSIFCloseL(f)
pytest.fail()
gdal.Unlink("/vsimem/test6.zip")
###############################################################################
# Test that we use the extended field for UTF-8 filenames (#5361).
def test_vsizip_7():
content = gdal.ReadDir("/vsizip/data/cp866_plus_utf8.zip")
ok = 0
try:
local_vars = {'content': content, 'ok': ok}
exec("if content == [u'\u0430\u0431\u0432\u0433\u0434\u0435', u'\u0436\u0437\u0438\u0439\u043a\u043b']: ok = 1", None, local_vars)
ok = local_vars['ok']
except:
if content == ['\u0430\u0431\u0432\u0433\u0434\u0435', '\u0436\u0437\u0438\u0439\u043a\u043b']:
ok = 1
if ok == 0:
print(content)
pytest.fail('bad content')
###############################################################################
# Basic test for ZIP64 support (5 GB file that compresses in less than 4 GB)
def test_vsizip_8():
assert gdal.VSIStatL('/vsizip/vsizip/data/zero.bin.zip.zip/zero.bin.zip').size == 5000 * 1000 * 1000 + 1
###############################################################################
# Basic test for ZIP64 support (5 GB file that is stored)
def test_vsizip_9():
assert gdal.VSIStatL('/vsizip//vsisparse/data/zero_stored.bin.xml.zip/zero.bin').size == 5000 * 1000 * 1000 + 1
assert gdal.VSIStatL('/vsizip//vsisparse/data/zero_stored.bin.xml.zip/hello.txt').size == 6
f = gdal.VSIFOpenL('/vsizip//vsisparse/data/zero_stored.bin.xml.zip/zero.bin', 'rb')
gdal.VSIFSeekL(f, 5000 * 1000 * 1000, 0)
data = gdal.VSIFReadL(1, 1, f)
gdal.VSIFCloseL(f)
assert data.decode('ascii') == '\x03'
f = gdal.VSIFOpenL('/vsizip//vsisparse/data/zero_stored.bin.xml.zip/hello.txt', 'rb')
data = gdal.VSIFReadL(1, 6, f)
gdal.VSIFCloseL(f)
assert data.decode('ascii') == 'HELLO\n'
###############################################################################
# Test that we recode filenames in ZIP (#5361)
def test_vsizip_10():
gdal.SetConfigOption('CPL_ZIP_ENCODING', 'CP866')
content = gdal.ReadDir("/vsizip/data/cp866.zip")
gdal.SetConfigOption('CPL_ZIP_ENCODING', None)
ok = 0
try:
local_vars = {'content': content, 'ok': ok}
exec("if content == [u'\u0430\u0431\u0432\u0433\u0434\u0435', u'\u0436\u0437\u0438\u0439\u043a\u043b']: ok = 1", None, local_vars)
ok = local_vars['ok']
except:
if content == ['\u0430\u0431\u0432\u0433\u0434\u0435', '\u0436\u0437\u0438\u0439\u043a\u043b']:
ok = 1
if ok == 0:
if gdal.GetLastErrorMsg().find('Recode from CP866 to UTF-8 not supported') >= 0:
pytest.skip()
print(content)
pytest.fail('bad content')
###############################################################################
# Test that we don't do anything with ZIP with filenames in UTF-8 | |
price_df = self._get_price_df()
return self._get_data_list(multivariate_df=price_df)
def _get_price_return_df_list(self):
price_return_df = self._get_price_return_df()
return self._get_data_list(multivariate_df=price_return_df)
def _get_data_list(self, multivariate_df: pd.DataFrame):
"""
Returns a list of DataFrames. The ith DataFrame represents the data to be used for the signal observed on the
ith observation date of the ith set of eligible tickers
:param multivariate_df: pd.DataFrame
:return: list of pd.DataFrame
"""
# each element (which is a list of str) in this list represents the tickers that has a defined signal for the
# ith observation date
eligible_tickers_list = self._get_eligible_tickers_list()
# adjust the observation date to only include dates that exists in the price DataFrame
signal_obs_calendar = self.signal_df.index
data_obs_calendar = multivariate_df.index
adj_obs_date_i_list = [data_obs_calendar.get_loc(obs_date, method='ffill')
for obs_date in signal_obs_calendar]
# this looks messy but all we are doing is to create a list of DataFrames where the column and row selection is
# dictated by the adjusted observation dates, eligible tickers and observation window
num_obs_dates = len(signal_obs_calendar)
if self.observation_window is None:
# use no lower limit for the observation (starting from the oldest available observation date)
df_list = [multivariate_df.iloc[:adj_obs_date_i_list[i] + 1, :][eligible_tickers_list[i]]
for i in range(num_obs_dates)]
elif isinstance(self.observation_window, int):
df_list = [multivariate_df.iloc[adj_obs_date_i_list[i] - self.observation_window + 1:
adj_obs_date_i_list[i] + 1, :][eligible_tickers_list[i]]
for i in range(num_obs_dates)]
else:
raise ValueError('observation_window needs to be an int or None')
return df_list
def _filter_price_based_on_frequency(self, price_df: pd.DataFrame) -> pd.DataFrame:
"""
Select prices based on a particular weekday (represented by a str) or a specific interval (int)
:param price_df: pd.DataFrame
:return: pd.DataFrame
"""
# filter out rows if you have specified certain observation intervals or weekdays
if isinstance(self.price_obs_freq, str):
price_df = price_df[price_df.index.weekday == self._weekday_i_dict[self.price_obs_freq]]
elif isinstance(self.price_obs_freq, int):
# sort index in descending order. this is done to have the count start from the latest observation date
price_df = price_df.sort_index(ascending=False).iloc[::self.price_obs_freq, :].sort_index()
return price_df
def get_annualization_factor(self) -> float:
"""
Calculate an annualization factor as the ratio of 252 and the return lag.
:return: float
"""
if isinstance(self.price_obs_freq, str):
annualization_factor = 252 / 5
else:
annualization_factor = 252 / self.price_obs_freq
return annualization_factor
@staticmethod
def _clean_price_df(raw_price_df: pd.DataFrame, ffill: bool, bfill: bool):
"""
Replaces nan with other available prices
:param raw_price_df: pd.DataFrame
:param ffill: bool -> if price is nan, replace it with the closest price available in the past
:param bfill: bool -> if price is nan, replace it with the closest price available in the future
:return:
"""
clean_price_df = raw_price_df.copy()
if ffill:
clean_price_df.fillna(method='ffill', inplace=True)
if bfill:
clean_price_df.fillna(method='bfill', inplace=True)
return clean_price_df
# ------------------------------------------------------------------------------------------------------------------
# get and setter methods
@property
def price_obs_freq(self):
return self._price_obs_freq
@price_obs_freq.setter
def price_obs_freq(self, price_obs_freq: {str, int}):
if isinstance(price_obs_freq, str) and price_obs_freq.lower() in list(self._weekday_i_dict.keys()):
self._price_obs_freq = price_obs_freq.lower()
elif isinstance(price_obs_freq, int) and price_obs_freq >= 1:
self._price_obs_freq = price_obs_freq
elif price_obs_freq is None:
self._price_obs_freq = 1
else:
raise ValueError("price_obs_freq needs to be an int larger or equal to 1 or a string equal to '%s'."
% "' or '".join(self._weekday_i_dict.keys()))
class _PriceBasedProportionalWeight(_PriceBasedWeight, _ProportionalWeight):
"""Class definition of _PriceBasedProportionalWeight. Subclass of _PriceBasedWeight and
_ProportionalWeight."""
def __init__(self, signal_df: pd.DataFrame, total_return: bool, currency: str, price_obs_freq: {str, int},
non_daily_price_obs: bool, observation_window: {int, list, None}, clean_data: bool,
inversely: bool, max_instrument_weight: {float, None}, min_instrument_weight: {float, None}):
_PriceBasedWeight.__init__(self, signal_df=signal_df, total_return=total_return, currency=currency,
price_obs_freq=price_obs_freq, non_daily_price_obs=non_daily_price_obs,
observation_window=observation_window, clean_data=clean_data,
max_instrument_weight=max_instrument_weight, min_instrument_weight=min_instrument_weight)
_ProportionalWeight.__init__(self, signal_df=signal_df, inversely=inversely,
max_instrument_weight=max_instrument_weight,
min_instrument_weight=min_instrument_weight)
class VolatilityWeight(_PriceBasedProportionalWeight):
"""Class definition of VolatilityWeight. Subclass of _PriceBasedProportionalWeight."""
def __init__(self, volatility_observation_period: {int, list}, inversely: bool = True,
signal_df: pd.DataFrame = None,
total_return: bool = True, currency: str = None, price_obs_freq: {str, int}=None,
non_daily_price_obs: bool = False, max_instrument_weight: float = None, min_instrument_weight: float = None,
clean_data: bool = False):
super().__init__(signal_df=signal_df, total_return=total_return, currency=currency,
price_obs_freq=price_obs_freq, non_daily_price_obs=non_daily_price_obs,
observation_window=volatility_observation_period, clean_data=clean_data,
inversely=inversely, max_instrument_weight=max_instrument_weight,
min_instrument_weight=min_instrument_weight)
self.volatility_observation_period = volatility_observation_period
def _get_dataframe(self):
annualization_factor = self.get_annualization_factor()
price = self._get_price_df()
if self.non_daily_price_obs or isinstance(self.price_obs_freq, str):
return_lag = 1
else:
return_lag = self.price_obs_freq
volatility = realized_volatility(multivariate_price_df=price, vol_lag=self.volatility_observation_period,
annualized_factor=annualization_factor, return_lag=return_lag)
return volatility
def get_weight_desc(self):
if self.inversely:
return 'weight is inversely proportional to realized volatility'
else:
return 'weight is proportional to realized volatility'
# ------------------------------------------------------------------------------------------------------------------
# get and setter methods
@property
def volatility_observation_period(self):
return self.observation_window
@volatility_observation_period.setter
def volatility_observation_period(self, volatility_observation_period: {int, list}):
self.observation_window = volatility_observation_period
class _OptimizedWeight(_PriceBasedWeight):
"""Class definition of _OptimizedWeight"""
def __init__(self, signal_df: pd.DataFrame, total_return: bool, currency: str, max_instrument_weight: {float, None},
min_instrument_weight: {float, None}, max_total_weight: {float, None}, min_total_weight: {float, None},
price_return_lag: int, non_daily_price_obs: bool, observation_window: int, calculate_mean_returns: bool,
has_analytical_solution: bool, clean_data: bool):
super().__init__(signal_df=signal_df, total_return=total_return, currency=currency, price_obs_freq=price_return_lag,
non_daily_price_obs=non_daily_price_obs, clean_data=clean_data, observation_window=observation_window,
max_instrument_weight=max_instrument_weight, min_instrument_weight=min_instrument_weight)
self.max_total_weight = max_total_weight
self.min_total_weight = min_total_weight
self.calculate_mean_returns = calculate_mean_returns
self.has_analytical_solution = has_analytical_solution
def _optimizer(self, cov_matrix: np.array, mean_returns: {np.array, None}, initial_guess: np.array) -> np.array:
raise ValueError('an optimizer has not been specified')
def _theoretical_optimizer(self, cov_matrix: np.array, mean_returns: {np.array, None}) -> np.array:
raise ValueError('an optimizer with an analytical solution has not been specified')
def _calculate_weight(self):
"""
Return a DataFrame with the same index and columns as the signal DataFrame containing the optimized weights.
A list of price return DataFrames is used to calculate a list of covariance matrices and mean return vectors
when applicable. If specified and when applicable, the script will use an analytical solution of the optimization
problem.
:return:
"""
# retrieve a list of DataFrames containing price returns
return_df_list = self._get_price_return_df_list()
# using the price returns calculate the inputs to be used in the optimizer
# inputs should all be numpy arrays
cov_matrix_list = self._get_covariance_matrix_list(return_df_list)
if self.calculate_mean_returns:
mean_return_list = self._get_mean_return_array_list(return_df_list)
else:
mean_return_list = self.signal_df.shape[0] * [None]
# only use the theoretical optimizer when there are no constraints and if an analytical solution is available
use_theoretical_optimizer = (com.count_not_none(self.min_instrument_weight, self.max_instrument_weight) == 0) \
and self.min_total_weight == self.max_total_weight and self.has_analytical_solution
# list of list of eligible tickers (i.e. having a defined signal) for each signal observation date
eligible_tickers_list = self._get_eligible_tickers_list()
# loop through each observation date and use the optimizer to calculate the weights
prev_eligible_tickers = eligible_tickers_list[0]
optimized_weight_list = [] # the solved weight arrays are stored here
self._counter = 0 # used to display progress
self._total = len(return_df_list) # used to display progress
for i in range(self.signal_df.shape[0]):
if use_theoretical_optimizer:
# no initial guess is needed when using a theoretical/analytical solution
# however since _theoretical_optimizer passes on the
optimized_weight = self._theoretical_optimizer(
cov_matrix=cov_matrix_list[i],
mean_returns=mean_return_list[i]
)
else:
# an initial guess is needed when using an optimizer with constraints
# this initial guess is the previous solved weights except at the start or when the instruments changes
new_eligible_tickers = eligible_tickers_list[i]
if i == 0 or prev_eligible_tickers != new_eligible_tickers:
initial_guess = None # equal weights by default inside the optimization function
else:
initial_guess = optimized_weight_list[i - 1] # previous solved weights
optimized_weight = self._optimizer(
cov_matrix=cov_matrix_list[i],
mean_returns=mean_return_list[i],
initial_guess=initial_guess
)
prev_eligible_tickers = new_eligible_tickers
# display progress
self._counter += 1
logger.info('progress: {}%...'.format(round(100 * self._counter / self._total, 2)))
optimized_weight_list.append(optimized_weight)
# reformat result as a DataFrame
optimized_weight_df = pd.DataFrame(np.zeros((self.signal_df.shape[0], self.signal_df.shape[1])),
index=self.signal_df.index, columns=self.signal_df.columns)
obs_calendar = self.signal_df.index
for i in range(self.signal_df.shape[0]):
obs_date = obs_calendar[i]
eligible_tickers = eligible_tickers_list[i]
optimized_weight_df.loc[obs_date, eligible_tickers] = optimized_weight_list[i]
return optimized_weight_df
@staticmethod
def _get_covariance_matrix_list(return_df_list: list):
"""
Calculates a covariance matrix for each price return DataFrame in the given list. Returns a list of numpy arrays
:param return_df_list: list of DataFrames
:return: list of numpy arrays
"""
cov_matrix_list = [np.cov(np.transpose(return_df.values).astype(float)) for return_df in return_df_list]
return cov_matrix_list
@staticmethod
def _get_mean_return_array_list(return_df_list: list):
"""
Calculates the mean returns for each price return DataFrame in the given list. Returns a list of numpy arrays
:param return_df_list: list of DataFrames
:return: list of numpy arrays
"""
mean_return_list = [return_df.mean().values for return_df in return_df_list]
return mean_return_list
# ------------------------------------------------------------------------------------------------------------------
# get and setter methods
@property
def price_return_lag(self):
return self.price_obs_freq
@price_return_lag.setter
def price_return_lag(self, price_return_lag: {int, str}):
self.price_obs_freq = price_return_lag
class MinimumVarianceWeight(_OptimizedWeight):
"""Class definition for MinimumVarianceWeight"""
def __init__(self, observation_window: int, price_return_lag: int = 1, max_instrument_weight: float = None,
min_instrument_weight: float = None, total_allocation: float = 1.0, signal_df: pd.DataFrame = None,
total_return: bool = False, currency: str = None, non_daily_price_obs: bool = False,
clean_data: bool = True):
super().__init__(signal_df=signal_df, total_return=total_return, currency=currency,
max_instrument_weight=max_instrument_weight, min_instrument_weight=min_instrument_weight,
max_total_weight=total_allocation, min_total_weight=total_allocation,
price_return_lag=price_return_lag, observation_window=observation_window,
calculate_mean_returns=False, has_analytical_solution=True,
non_daily_price_obs=non_daily_price_obs, clean_data=clean_data)
self._total_allocation = total_allocation
def _optimizer(self, cov_matrix: np.array, mean_returns: np.array, initial_guess: np.array):
annualization_factor = self.get_annualization_factor()
return minimum_variance_portfolio_weights_with_constraints(
covariance_matrix=cov_matrix, initial_guess=initial_guess, max_total_weight=self.total_allocation,
max_instrument_weight=self.max_instrument_weight, min_instrument_weight=self.min_instrument_weight,
annualizing_factor=annualization_factor
| |
= None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYVOLUMESNAPSHOTREPLY_FULL_NAME = 'org.zstack.header.storage.snapshot.APIQueryVolumeSnapshotReply'
class APIQueryVolumeSnapshotReply(object):
FULL_NAME='org.zstack.header.storage.snapshot.APIQueryVolumeSnapshotReply'
def __init__(self):
self.inventories = OptionalList()
self.total = None
self.success = None
self.error = None
APIQUERYVOLUMESNAPSHOTTREEMSG_FULL_NAME = 'org.zstack.header.storage.snapshot.APIQueryVolumeSnapshotTreeMsg'
class APIQueryVolumeSnapshotTreeMsg(object):
FULL_NAME='org.zstack.header.storage.snapshot.APIQueryVolumeSnapshotTreeMsg'
def __init__(self):
#mandatory field
self.conditions = NotNoneList()
self.limit = None
self.start = None
self.count = None
self.groupBy = None
self.replyWithCount = None
self.sortBy = None
#valid values: [asc, desc]
self.sortDirection = None
self.fields = OptionalList()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYVOLUMESNAPSHOTTREEREPLY_FULL_NAME = 'org.zstack.header.storage.snapshot.APIQueryVolumeSnapshotTreeReply'
class APIQueryVolumeSnapshotTreeReply(object):
FULL_NAME='org.zstack.header.storage.snapshot.APIQueryVolumeSnapshotTreeReply'
def __init__(self):
self.inventories = OptionalList()
self.total = None
self.success = None
self.error = None
APIREVERTVOLUMEFROMSNAPSHOTMSG_FULL_NAME = 'org.zstack.header.storage.snapshot.APIRevertVolumeFromSnapshotMsg'
class APIRevertVolumeFromSnapshotMsg(object):
FULL_NAME='org.zstack.header.storage.snapshot.APIRevertVolumeFromSnapshotMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIUPDATEVOLUMESNAPSHOTMSG_FULL_NAME = 'org.zstack.header.storage.snapshot.APIUpdateVolumeSnapshotMsg'
class APIUpdateVolumeSnapshotMsg(object):
FULL_NAME='org.zstack.header.storage.snapshot.APIUpdateVolumeSnapshotMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.name = None
self.description = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICREATESYSTEMTAGMSG_FULL_NAME = 'org.zstack.header.tag.APICreateSystemTagMsg'
class APICreateSystemTagMsg(object):
FULL_NAME='org.zstack.header.tag.APICreateSystemTagMsg'
def __init__(self):
#mandatory field
self.resourceType = NotNoneField()
#mandatory field
self.resourceUuid = NotNoneField()
#mandatory field
self.tag = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICREATEUSERTAGMSG_FULL_NAME = 'org.zstack.header.tag.APICreateUserTagMsg'
class APICreateUserTagMsg(object):
FULL_NAME='org.zstack.header.tag.APICreateUserTagMsg'
def __init__(self):
#mandatory field
self.resourceType = NotNoneField()
#mandatory field
self.resourceUuid = NotNoneField()
#mandatory field
self.tag = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETETAGMSG_FULL_NAME = 'org.zstack.header.tag.APIDeleteTagMsg'
class APIDeleteTagMsg(object):
FULL_NAME='org.zstack.header.tag.APIDeleteTagMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.deleteMode = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYSYSTEMTAGMSG_FULL_NAME = 'org.zstack.header.tag.APIQuerySystemTagMsg'
class APIQuerySystemTagMsg(object):
FULL_NAME='org.zstack.header.tag.APIQuerySystemTagMsg'
def __init__(self):
#mandatory field
self.conditions = NotNoneList()
self.limit = None
self.start = None
self.count = None
self.groupBy = None
self.replyWithCount = None
self.sortBy = None
#valid values: [asc, desc]
self.sortDirection = None
self.fields = OptionalList()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYSYSTEMTAGREPLY_FULL_NAME = 'org.zstack.header.tag.APIQuerySystemTagReply'
class APIQuerySystemTagReply(object):
FULL_NAME='org.zstack.header.tag.APIQuerySystemTagReply'
def __init__(self):
self.inventories = OptionalList()
self.total = None
self.success = None
self.error = None
APIQUERYTAGMSG_FULL_NAME = 'org.zstack.header.tag.APIQueryTagMsg'
class APIQueryTagMsg(object):
FULL_NAME='org.zstack.header.tag.APIQueryTagMsg'
def __init__(self):
self.systemTag = None
#mandatory field
self.conditions = NotNoneList()
self.limit = None
self.start = None
self.count = None
self.groupBy = None
self.replyWithCount = None
self.sortBy = None
#valid values: [asc, desc]
self.sortDirection = None
self.fields = OptionalList()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYTAGREPLY_FULL_NAME = 'org.zstack.header.tag.APIQueryTagReply'
class APIQueryTagReply(object):
FULL_NAME='org.zstack.header.tag.APIQueryTagReply'
def __init__(self):
self.inventories = OptionalList()
self.total = None
self.success = None
self.error = None
APIQUERYUSERTAGMSG_FULL_NAME = 'org.zstack.header.tag.APIQueryUserTagMsg'
class APIQueryUserTagMsg(object):
FULL_NAME='org.zstack.header.tag.APIQueryUserTagMsg'
def __init__(self):
#mandatory field
self.conditions = NotNoneList()
self.limit = None
self.start = None
self.count = None
self.groupBy = None
self.replyWithCount = None
self.sortBy = None
#valid values: [asc, desc]
self.sortDirection = None
self.fields = OptionalList()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYUSERTAGREPLY_FULL_NAME = 'org.zstack.header.tag.APIQueryUserTagReply'
class APIQueryUserTagReply(object):
FULL_NAME='org.zstack.header.tag.APIQueryUserTagReply'
def __init__(self):
self.inventories = OptionalList()
self.total = None
self.success = None
self.error = None
APIUPDATESYSTEMTAGMSG_FULL_NAME = 'org.zstack.header.tag.APIUpdateSystemTagMsg'
class APIUpdateSystemTagMsg(object):
FULL_NAME='org.zstack.header.tag.APIUpdateSystemTagMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
#mandatory field
self.tag = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETEVIPQOSMSG_FULL_NAME = 'org.zstack.header.vipQos.APIDeleteVipQosMsg'
class APIDeleteVipQosMsg(object):
FULL_NAME='org.zstack.header.vipQos.APIDeleteVipQosMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.port = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETVIPQOSMSG_FULL_NAME = 'org.zstack.header.vipQos.APIGetVipQosMsg'
class APIGetVipQosMsg(object):
FULL_NAME='org.zstack.header.vipQos.APIGetVipQosMsg'
def __init__(self):
self.uuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETVIPQOSREPLY_FULL_NAME = 'org.zstack.header.vipQos.APIGetVipQosReply'
class APIGetVipQosReply(object):
FULL_NAME='org.zstack.header.vipQos.APIGetVipQosReply'
def __init__(self):
self.inventories = OptionalList()
self.success = None
self.error = None
APISETVIPQOSMSG_FULL_NAME = 'org.zstack.header.vipQos.APISetVipQosMsg'
class APISetVipQosMsg(object):
FULL_NAME='org.zstack.header.vipQos.APISetVipQosMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.port = None
self.outboundBandwidth = None
self.inboundBandwidth = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIATTACHISOTOVMINSTANCEMSG_FULL_NAME = 'org.zstack.header.vm.APIAttachIsoToVmInstanceMsg'
class APIAttachIsoToVmInstanceMsg(object):
FULL_NAME='org.zstack.header.vm.APIAttachIsoToVmInstanceMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
#mandatory field
self.isoUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIATTACHL3NETWORKTOVMMSG_FULL_NAME = 'org.zstack.header.vm.APIAttachL3NetworkToVmMsg'
class APIAttachL3NetworkToVmMsg(object):
FULL_NAME='org.zstack.header.vm.APIAttachL3NetworkToVmMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
#mandatory field
self.l3NetworkUuid = NotNoneField()
self.staticIp = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICHANGEINSTANCEOFFERINGMSG_FULL_NAME = 'org.zstack.header.vm.APIChangeInstanceOfferingMsg'
class APIChangeInstanceOfferingMsg(object):
FULL_NAME='org.zstack.header.vm.APIChangeInstanceOfferingMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
#mandatory field
self.instanceOfferingUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICHANGEVMIMAGEMSG_FULL_NAME = 'org.zstack.header.vm.APIChangeVmImageMsg'
class APIChangeVmImageMsg(object):
FULL_NAME='org.zstack.header.vm.APIChangeVmImageMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
#mandatory field
self.imageUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICHANGEVMPASSWORDMSG_FULL_NAME = 'org.zstack.header.vm.APIChangeVmPasswordMsg'
class APIChangeVmPasswordMsg(object):
FULL_NAME='org.zstack.header.vm.APIChangeVmPasswordMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
#mandatory field
#valid regex values: [\da-zA-Z-`=\\\[\];',./~!@#$%^&*()_+|{}:"<>?]{1,}
self.password = NotNoneField()
#mandatory field
self.account = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICLONEVMINSTANCEMSG_FULL_NAME = 'org.zstack.header.vm.APICloneVmInstanceMsg'
class APICloneVmInstanceMsg(object):
FULL_NAME='org.zstack.header.vm.APICloneVmInstanceMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
#valid values: [InstantStart, JustCreate]
self.strategy = None
#mandatory field
self.names = NotNoneList()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICREATEVMINSTANCEMSG_FULL_NAME = 'org.zstack.header.vm.APICreateVmInstanceMsg'
class APICreateVmInstanceMsg(object):
FULL_NAME='org.zstack.header.vm.APICreateVmInstanceMsg'
def __init__(self):
#mandatory field
self.name = NotNoneField()
#mandatory field
self.instanceOfferingUuid = NotNoneField()
#mandatory field
self.imageUuid = NotNoneField()
#mandatory field
self.l3NetworkUuids = NotNoneList()
#valid values: [UserVm, ApplianceVm]
self.type = None
self.rootDiskOfferingUuid = None
self.dataDiskOfferingUuids = OptionalList()
self.zoneUuid = None
self.clusterUuid = None
self.hostUuid = None
self.primaryStorageUuidForRootVolume = None
self.description = None
self.defaultL3NetworkUuid = None
#valid values: [InstantStart, JustCreate]
self.strategy = None
self.resourceUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETENICQOSMSG_FULL_NAME = 'org.zstack.header.vm.APIDeleteNicQosMsg'
class APIDeleteNicQosMsg(object):
FULL_NAME='org.zstack.header.vm.APIDeleteNicQosMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
#mandatory field
#valid values: [in, out]
self.direction = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETEVMCONSOLEPASSWORDMSG_FULL_NAME = 'org.zstack.header.vm.APIDeleteVmConsolePasswordMsg'
class APIDeleteVmConsolePasswordMsg(object):
FULL_NAME='org.zstack.header.vm.APIDeleteVmConsolePasswordMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETEVMHOSTNAMEMSG_FULL_NAME = 'org.zstack.header.vm.APIDeleteVmHostnameMsg'
class APIDeleteVmHostnameMsg(object):
FULL_NAME='org.zstack.header.vm.APIDeleteVmHostnameMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.deleteMode = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETEVMSSHKEYMSG_FULL_NAME = 'org.zstack.header.vm.APIDeleteVmSshKeyMsg'
class APIDeleteVmSshKeyMsg(object):
FULL_NAME='org.zstack.header.vm.APIDeleteVmSshKeyMsg'
def __init__(self):
self.uuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETEVMSTATICIPMSG_FULL_NAME = 'org.zstack.header.vm.APIDeleteVmStaticIpMsg'
class APIDeleteVmStaticIpMsg(object):
FULL_NAME='org.zstack.header.vm.APIDeleteVmStaticIpMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
#mandatory field
self.l3NetworkUuid = NotNoneField()
self.deleteMode = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDESTROYVMINSTANCEMSG_FULL_NAME = 'org.zstack.header.vm.APIDestroyVmInstanceMsg'
class APIDestroyVmInstanceMsg(object):
FULL_NAME='org.zstack.header.vm.APIDestroyVmInstanceMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.deleteMode = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDETACHISOFROMVMINSTANCEMSG_FULL_NAME = 'org.zstack.header.vm.APIDetachIsoFromVmInstanceMsg'
class APIDetachIsoFromVmInstanceMsg(object):
FULL_NAME='org.zstack.header.vm.APIDetachIsoFromVmInstanceMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
self.isoUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDETACHL3NETWORKFROMVMMSG_FULL_NAME = 'org.zstack.header.vm.APIDetachL3NetworkFromVmMsg'
class APIDetachL3NetworkFromVmMsg(object):
FULL_NAME='org.zstack.header.vm.APIDetachL3NetworkFromVmMsg'
def __init__(self):
#mandatory field
self.vmNicUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIEXPUNGEVMINSTANCEMSG_FULL_NAME = 'org.zstack.header.vm.APIExpungeVmInstanceMsg'
class APIExpungeVmInstanceMsg(object):
FULL_NAME='org.zstack.header.vm.APIExpungeVmInstanceMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETCANDIDATEISOFORATTACHINGVMMSG_FULL_NAME = 'org.zstack.header.vm.APIGetCandidateIsoForAttachingVmMsg'
class APIGetCandidateIsoForAttachingVmMsg(object):
FULL_NAME='org.zstack.header.vm.APIGetCandidateIsoForAttachingVmMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETCANDIDATEISOFORATTACHINGVMREPLY_FULL_NAME = 'org.zstack.header.vm.APIGetCandidateIsoForAttachingVmReply'
class APIGetCandidateIsoForAttachingVmReply(object):
FULL_NAME='org.zstack.header.vm.APIGetCandidateIsoForAttachingVmReply'
def __init__(self):
self.inventories = OptionalList()
self.success = None
self.error = None
APIGETCANDIDATEPRIMARYSTORAGESFORCREATINGVMMSG_FULL_NAME = 'org.zstack.header.vm.APIGetCandidatePrimaryStoragesForCreatingVmMsg'
class APIGetCandidatePrimaryStoragesForCreatingVmMsg(object):
FULL_NAME='org.zstack.header.vm.APIGetCandidatePrimaryStoragesForCreatingVmMsg'
def __init__(self):
#mandatory field
self.imageUuid = NotNoneField()
self.backupStorageUuid = None
#mandatory field
self.l3NetworkUuids = NotNoneList()
self.rootDiskOfferingUuid = None
self.dataDiskOfferingUuids = OptionalList()
self.zoneUuid = None
self.clusterUuid = None
self.defaultL3NetworkUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETCANDIDATEPRIMARYSTORAGESFORCREATINGVMREPLY_FULL_NAME = 'org.zstack.header.vm.APIGetCandidatePrimaryStoragesForCreatingVmReply'
class APIGetCandidatePrimaryStoragesForCreatingVmReply(object):
FULL_NAME='org.zstack.header.vm.APIGetCandidatePrimaryStoragesForCreatingVmReply'
def __init__(self):
self.rootVolumePrimaryStorages = OptionalList()
self.dataVolumePrimaryStorages = OptionalMap()
self.success = None
self.error = None
APIGETCANDIDATEVMFORATTACHINGISOMSG_FULL_NAME = 'org.zstack.header.vm.APIGetCandidateVmForAttachingIsoMsg'
class APIGetCandidateVmForAttachingIsoMsg(object):
FULL_NAME='org.zstack.header.vm.APIGetCandidateVmForAttachingIsoMsg'
def __init__(self):
#mandatory field
self.isoUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETCANDIDATEVMFORATTACHINGISOREPLY_FULL_NAME = 'org.zstack.header.vm.APIGetCandidateVmForAttachingIsoReply'
class APIGetCandidateVmForAttachingIsoReply(object):
FULL_NAME='org.zstack.header.vm.APIGetCandidateVmForAttachingIsoReply'
def __init__(self):
self.inventories = OptionalList()
self.success = None
self.error = None
APIGETCANDIDATEZONESCLUSTERSHOSTSFORCREATINGVMMSG_FULL_NAME = 'org.zstack.header.vm.APIGetCandidateZonesClustersHostsForCreatingVmMsg'
class APIGetCandidateZonesClustersHostsForCreatingVmMsg(object):
FULL_NAME='org.zstack.header.vm.APIGetCandidateZonesClustersHostsForCreatingVmMsg'
def __init__(self):
#mandatory field
self.instanceOfferingUuid = NotNoneField()
#mandatory field
self.imageUuid = NotNoneField()
#mandatory field
self.l3NetworkUuids = NotNoneList()
self.rootDiskOfferingUuid = None
self.dataDiskOfferingUuids = OptionalList()
self.zoneUuid = None
self.clusterUuid = None
self.defaultL3NetworkUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETCANDIDATEZONESCLUSTERSHOSTSFORCREATINGVMREPLY_FULL_NAME = 'org.zstack.header.vm.APIGetCandidateZonesClustersHostsForCreatingVmReply'
class APIGetCandidateZonesClustersHostsForCreatingVmReply(object):
FULL_NAME='org.zstack.header.vm.APIGetCandidateZonesClustersHostsForCreatingVmReply'
def __init__(self):
self.zones = OptionalList()
self.clusters = OptionalList()
self.hosts = OptionalList()
self.success = None
self.error = None
APIGETIMAGECANDIDATESFORVMTOCHANGEMSG_FULL_NAME = 'org.zstack.header.vm.APIGetImageCandidatesForVmToChangeMsg'
class APIGetImageCandidatesForVmToChangeMsg(object):
| |
# -*- coding: utf-8 -*-
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for fake_filesystem module."""
import contextlib
import errno
import os
import stat
import sys
import time
import unittest
from pyfakefs import fake_filesystem
from pyfakefs.fake_filesystem import set_uid, set_gid, is_root, reset_ids
from pyfakefs.helpers import IS_WIN
from pyfakefs.tests.test_utils import DummyTime, TestCase, RealFsTestCase
class FakeDirectoryUnitTest(TestCase):
def setUp(self):
self.orig_time = time.time
time.time = DummyTime(10, 1)
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.fake_file = fake_filesystem.FakeFile(
'foobar', contents='dummy_file', filesystem=self.filesystem)
self.fake_dir = fake_filesystem.FakeDirectory(
'somedir', filesystem=self.filesystem)
def tearDown(self):
time.time = self.orig_time
def test_new_file_and_directory(self):
self.assertTrue(stat.S_IFREG & self.fake_file.st_mode)
self.assertTrue(stat.S_IFDIR & self.fake_dir.st_mode)
self.assertEqual({}, self.fake_dir.contents)
self.assertEqual(10, self.fake_file.st_ctime)
def test_add_entry(self):
self.fake_dir.add_entry(self.fake_file)
self.assertEqual({'foobar': self.fake_file}, self.fake_dir.contents)
def test_get_entry(self):
self.fake_dir.add_entry(self.fake_file)
self.assertEqual(self.fake_file, self.fake_dir.get_entry('foobar'))
def test_path(self):
self.filesystem.root.add_entry(self.fake_dir)
self.fake_dir.add_entry(self.fake_file)
self.assertEqual('/somedir/foobar', self.fake_file.path)
self.assertEqual('/somedir', self.fake_dir.path)
def test_path_with_drive(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar/baz'
self.filesystem.create_dir(dir_path)
dir_object = self.filesystem.get_object(dir_path)
self.assertEqual(dir_path, dir_object.path)
def test_path_after_chdir(self):
dir_path = '/foo/bar/baz'
self.filesystem.create_dir(dir_path)
self.os.chdir(dir_path)
dir_object = self.filesystem.get_object(dir_path)
self.assertEqual(dir_path, dir_object.path)
def test_path_after_chdir_with_drive(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar/baz'
self.filesystem.create_dir(dir_path)
self.os.chdir(dir_path)
dir_object = self.filesystem.get_object(dir_path)
self.assertEqual(dir_path, dir_object.path)
def test_remove_entry(self):
self.fake_dir.add_entry(self.fake_file)
self.assertEqual(self.fake_file, self.fake_dir.get_entry('foobar'))
self.fake_dir.remove_entry('foobar')
with self.assertRaises(KeyError):
self.fake_dir.get_entry('foobar')
def test_should_throw_if_set_size_is_not_integer(self):
def set_size():
self.fake_file.size = 0.1
self.assert_raises_os_error(errno.ENOSPC, set_size)
def test_should_throw_if_set_size_is_negative(self):
def set_size():
self.fake_file.size = -1
self.assert_raises_os_error(errno.ENOSPC, set_size)
def test_produce_empty_file_if_set_size_is_zero(self):
self.fake_file.size = 0
self.assertEqual('', self.fake_file.contents)
def test_sets_content_empty_if_set_size_is_zero(self):
self.fake_file.size = 0
self.assertEqual('', self.fake_file.contents)
def test_truncate_file_if_size_is_smaller_than_current_size(self):
self.fake_file.size = 6
self.assertEqual('dummy_', self.fake_file.contents)
def test_leave_file_unchanged_if_size_is_equal_to_current_size(self):
self.fake_file.size = 10
self.assertEqual('dummy_file', self.fake_file.contents)
def test_set_contents_to_dir_raises(self):
# Regression test for #276
self.filesystem.is_windows_fs = True
self.assert_raises_os_error(
errno.EISDIR, self.fake_dir.set_contents, 'a')
self.filesystem.is_windows_fs = False
self.assert_raises_os_error(
errno.EISDIR, self.fake_dir.set_contents, 'a')
def test_pads_with_nullbytes_if_size_is_greater_than_current_size(self):
self.fake_file.size = 13
self.assertEqual('dummy_file\0\0\0', self.fake_file.contents)
def test_set_m_time(self):
self.assertEqual(10, self.fake_file.st_mtime)
self.fake_file.st_mtime = 13
self.assertEqual(13, self.fake_file.st_mtime)
self.fake_file.st_mtime = 131
self.assertEqual(131, self.fake_file.st_mtime)
def test_file_inode(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
fake_os = fake_filesystem.FakeOsModule(filesystem)
file_path = 'some_file1'
filesystem.create_file(file_path, contents='contents here1')
self.assertLess(0, fake_os.stat(file_path)[stat.ST_INO])
file_obj = filesystem.get_object(file_path)
file_obj.st_ino = 43
self.assertEqual(43, fake_os.stat(file_path)[stat.ST_INO])
def test_directory_inode(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
fake_os = fake_filesystem.FakeOsModule(filesystem)
dirpath = 'testdir'
filesystem.create_dir(dirpath)
self.assertLess(0, fake_os.stat(dirpath)[stat.ST_INO])
dir_obj = filesystem.get_object(dirpath)
dir_obj.st_ino = 43
self.assertEqual(43, fake_os.stat(dirpath)[stat.ST_INO])
def test_ordered_dirs(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
filesystem.create_dir('/foo')
filesystem.create_file('/foo/2')
filesystem.create_file('/foo/4')
filesystem.create_file('/foo/1')
filesystem.create_file('/foo/3')
fake_dir = filesystem.get_object('/foo')
self.assertEqual(['2', '4', '1', '3'], fake_dir.ordered_dirs)
class SetLargeFileSizeTest(TestCase):
def setUp(self):
filesystem = fake_filesystem.FakeFilesystem()
self.fake_file = fake_filesystem.FakeFile('foobar',
filesystem=filesystem)
def test_should_throw_if_size_is_not_integer(self):
self.assert_raises_os_error(errno.ENOSPC,
self.fake_file.set_large_file_size, 0.1)
def test_should_throw_if_size_is_negative(self):
self.assert_raises_os_error(errno.ENOSPC,
self.fake_file.set_large_file_size, -1)
def test_sets_content_none_if_size_is_non_negative_integer(self):
self.fake_file.set_large_file_size(1000000000)
self.assertEqual(None, self.fake_file.contents)
self.assertEqual(1000000000, self.fake_file.st_size)
class NormalizePathTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = '/'
def test_empty_path_should_get_normalized_to_root_path(self):
self.assertEqual(self.root_name, self.filesystem.absnormpath(''))
def test_root_path_remains_unchanged(self):
self.assertEqual(self.root_name,
self.filesystem.absnormpath(self.root_name))
def test_relative_path_forced_to_cwd(self):
path = 'bar'
self.filesystem.cwd = '/foo'
self.assertEqual('/foo/bar', self.filesystem.absnormpath(path))
def test_absolute_path_remains_unchanged(self):
path = '/foo/bar'
self.assertEqual(path, self.filesystem.absnormpath(path))
def test_dotted_path_is_normalized(self):
path = '/foo/..'
self.assertEqual('/', self.filesystem.absnormpath(path))
path = 'foo/../bar'
self.assertEqual('/bar', self.filesystem.absnormpath(path))
def test_dot_path_is_normalized(self):
path = '.'
self.assertEqual('/', self.filesystem.absnormpath(path))
class GetPathComponentsTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = '/'
def test_root_path_should_return_empty_list(self):
self.assertEqual([], self.filesystem._path_components(self.root_name))
def test_empty_path_should_return_empty_list(self):
self.assertEqual([], self.filesystem._path_components(''))
def test_relative_path_with_one_component_should_return_component(self):
self.assertEqual(['foo'], self.filesystem._path_components('foo'))
def test_absolute_path_with_one_component_should_return_component(self):
self.assertEqual(['foo'], self.filesystem._path_components('/foo'))
def test_two_level_relative_path_should_return_components(self):
self.assertEqual(['foo', 'bar'],
self.filesystem._path_components('foo/bar'))
def test_two_level_absolute_path_should_return_components(self):
self.assertEqual(['foo', 'bar'],
self.filesystem._path_components('/foo/bar'))
class FakeFilesystemUnitTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = '/'
self.fake_file = fake_filesystem.FakeFile(
'foobar', filesystem=self.filesystem)
self.fake_child = fake_filesystem.FakeDirectory(
'foobaz', filesystem=self.filesystem)
self.fake_grandchild = fake_filesystem.FakeDirectory(
'quux', filesystem=self.filesystem)
def test_new_filesystem(self):
self.assertEqual('/', self.filesystem.path_separator)
self.assertTrue(stat.S_IFDIR & self.filesystem.root.st_mode)
self.assertEqual(self.root_name, self.filesystem.root.name)
self.assertEqual({}, self.filesystem.root.contents)
def test_none_raises_type_error(self):
with self.assertRaises(TypeError):
self.filesystem.exists(None)
def test_empty_string_does_not_exist(self):
self.assertFalse(self.filesystem.exists(''))
def test_exists_root(self):
self.assertTrue(self.filesystem.exists(self.root_name))
def test_exists_unadded_file(self):
self.assertFalse(self.filesystem.exists(self.fake_file.name))
def test_not_exists_subpath_named_like_file_contents(self):
# Regression test for #219
file_path = "/foo/bar"
self.filesystem.create_file(file_path, contents='baz')
self.assertFalse(self.filesystem.exists(file_path + "/baz"))
def test_get_root_object(self):
self.assertEqual(self.filesystem.root,
self.filesystem.get_object(self.root_name))
def test_add_object_to_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual({'foobar': self.fake_file},
self.filesystem.root.contents)
def test_exists_added_file(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertTrue(self.filesystem.exists(self.fake_file.name))
def test_exists_relative_path_posix(self):
self.filesystem.is_windows_fs = False
self.filesystem.create_file('/a/b/file_one')
self.filesystem.create_file('/a/c/file_two')
self.assertTrue(self.filesystem.exists('a/b/../c/file_two'))
self.assertTrue(self.filesystem.exists('/a/c/../b/file_one'))
self.assertTrue(self.filesystem.exists('/a/c/../../a/b/file_one'))
self.assertFalse(self.filesystem.exists('a/b/../z/d'))
self.assertFalse(self.filesystem.exists('a/b/../z/../c/file_two'))
self.filesystem.cwd = '/a/c'
self.assertTrue(self.filesystem.exists('../b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/../../a/c/file_two'))
self.assertFalse(self.filesystem.exists('../z/file_one'))
self.assertFalse(self.filesystem.exists('../z/../c/file_two'))
def test_exists_relative_path_windows(self):
self.filesystem.is_windows_fs = True
self.filesystem.is_macos = False
self.filesystem.create_file('/a/b/file_one')
self.filesystem.create_file('/a/c/file_two')
self.assertTrue(self.filesystem.exists('a/b/../c/file_two'))
self.assertTrue(self.filesystem.exists('/a/c/../b/file_one'))
self.assertTrue(self.filesystem.exists('/a/c/../../a/b/file_one'))
self.assertFalse(self.filesystem.exists('a/b/../z/d'))
self.assertTrue(self.filesystem.exists('a/b/../z/../c/file_two'))
self.filesystem.cwd = '/a/c'
self.assertTrue(self.filesystem.exists('../b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/../../a/c/file_two'))
self.assertFalse(self.filesystem.exists('../z/file_one'))
self.assertTrue(self.filesystem.exists('../z/../c/file_two'))
def test_get_object_from_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object('foobar'))
def test_get_nonexistent_object_from_root_error(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object('foobar'))
self.assert_raises_os_error(
errno.ENOENT, self.filesystem.get_object, 'some_bogus_filename')
def test_remove_object_from_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.filesystem.remove_object(self.fake_file.name)
self.assert_raises_os_error(
errno.ENOENT, self.filesystem.get_object, self.fake_file.name)
def test_remove_nonexisten_object_from_root_error(self):
self.assert_raises_os_error(
errno.ENOENT, self.filesystem.remove_object, 'some_bogus_filename')
def test_exists_removed_file(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.filesystem.remove_object(self.fake_file.name)
self.assertFalse(self.filesystem.exists(self.fake_file.name))
def test_add_object_to_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assertEqual(
{self.fake_file.name: self.fake_file},
self.filesystem.root.get_entry(self.fake_child.name).contents)
def test_add_object_to_regular_file_error_posix(self):
self.filesystem.is_windows_fs = False
self.filesystem.add_object(self.root_name, self.fake_file)
self.assert_raises_os_error(errno.ENOTDIR,
self.filesystem.add_object,
self.fake_file.name, self.fake_file)
def test_add_object_to_regular_file_error_windows(self):
self.filesystem.is_windows_fs = True
self.filesystem.add_object(self.root_name, self.fake_file)
self.assert_raises_os_error(errno.ENOENT,
self.filesystem.add_object,
self.fake_file.name, self.fake_file)
def test_exists_file_added_to_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
path = self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)
self.assertTrue(self.filesystem.exists(path))
def test_get_object_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assertEqual(self.fake_file,
self.filesystem.get_object(
self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)))
def test_get_nonexistent_object_from_child_error(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assert_raises_os_error(errno.ENOENT, self.filesystem.get_object,
self.filesystem.joinpaths(
self.fake_child.name,
'some_bogus_filename'))
def test_remove_object_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
target_path = self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)
self.filesystem.remove_object(target_path)
self.assert_raises_os_error(errno.ENOENT, self.filesystem.get_object,
target_path)
def test_remove_object_from_child_error(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.assert_raises_os_error(
errno.ENOENT, self.filesystem.remove_object,
self.filesystem.joinpaths(self.fake_child.name,
'some_bogus_filename'))
def test_remove_object_from_non_directory_error(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assert_raises_os_error(
errno.ENOTDIR, self.filesystem.remove_object,
self.filesystem.joinpaths(
'%s' % self.fake_file.name,
'file_does_not_matter_since_parent_not_a_directory'))
def test_exists_file_removed_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
path = self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)
self.filesystem.remove_object(path)
self.assertFalse(self.filesystem.exists(path))
def test_operate_on_grandchild_directory(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_grandchild)
grandchild_directory = self.filesystem.joinpaths(
self.fake_child.name, self.fake_grandchild.name)
grandchild_file = self.filesystem.joinpaths(
grandchild_directory, self.fake_file.name)
with self.assertRaises(OSError):
self.filesystem.get_object(grandchild_file)
self.filesystem.add_object(grandchild_directory, self.fake_file)
self.assertEqual(self.fake_file,
self.filesystem.get_object(grandchild_file))
self.assertTrue(self.filesystem.exists(grandchild_file))
self.filesystem.remove_object(grandchild_file)
with self.assertRaises(OSError):
self.filesystem.get_object(grandchild_file)
self.assertFalse(self.filesystem.exists(grandchild_file))
def test_create_directory_in_root_directory(self):
path = 'foo'
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
def test_create_directory_in_root_directory_already_exists_error(self):
path = 'foo'
self.filesystem.create_dir(path)
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_dir, path)
def test_create_directory(self):
path = 'foo/bar/baz'
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
# Create second directory to make sure first is OK.
path = '%s/quux' % path
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
def test_create_directory_already_exists_error(self):
path = 'foo/bar/baz'
self.filesystem.create_dir(path)
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_dir, path)
def test_create_file_in_read_only_directory_raises_in_posix(self):
self.filesystem.is_windows_fs = False
dir_path = '/foo/bar'
self.filesystem.create_dir(dir_path, perm_bits=0o555)
file_path = dir_path + '/baz'
if not is_root():
self.assert_raises_os_error(errno.EACCES,
self.filesystem.create_file,
file_path)
else:
self.filesystem.create_file(file_path)
self.assertTrue(self.filesystem.exists(file_path))
def test_create_file_in_read_only_directory_possible_in_windows(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar'
self.filesystem.create_dir(dir_path, perm_bits=0o555)
file_path = dir_path + '/baz'
self.filesystem.create_file(file_path)
self.assertTrue(self.filesystem.exists(file_path))
def test_create_file_in_current_directory(self):
path = 'foo'
contents = 'dummy data'
self.filesystem.create_file(path, contents=contents)
self.assertTrue(self.filesystem.exists(path))
self.assertFalse(self.filesystem.exists(os.path.dirname(path)))
path = './%s' % path
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
def test_create_file_in_root_directory(self):
path = '/foo'
contents = 'dummy data'
self.filesystem.create_file(path, contents=contents)
new_file = self.filesystem.get_object(path)
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
self.assertEqual(os.path.basename(path), new_file.name)
self.assertTrue(stat.S_IFREG & new_file.st_mode)
self.assertEqual(contents, new_file.contents)
def test_create_file_with_size_but_no_content_creates_large_file(self):
path = 'large_foo_bar'
self.filesystem.create_file(path, st_size=100000000)
new_file = self.filesystem.get_object(path)
self.assertEqual(None, new_file.contents)
self.assertEqual(100000000, new_file.st_size)
def test_create_file_in_root_directory_already_exists_error(self):
path = 'foo'
self.filesystem.create_file(path)
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_file, path)
def test_create_file(self):
path = 'foo/bar/baz'
retval = self.filesystem.create_file(path, contents='dummy_data')
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
new_file = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_file.name)
if IS_WIN:
self.assertEqual(1, new_file.st_uid)
self.assertEqual(1, new_file.st_gid)
else:
self.assertEqual(os.getuid(), new_file.st_uid)
self.assertEqual(os.getgid(), new_file.st_gid)
self.assertEqual(new_file, retval)
def test_create_file_with_changed_ids(self):
path = 'foo/bar/baz'
set_uid(42)
set_gid(2)
self.filesystem.create_file(path)
self.assertTrue(self.filesystem.exists(path))
new_file = self.filesystem.get_object(path)
self.assertEqual(42, new_file.st_uid)
self.assertEqual(2, new_file.st_gid)
reset_ids()
def test_empty_file_created_for_none_contents(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
path = 'foo/bar/baz'
self.filesystem.create_file(path, contents=None)
with fake_open(path) as f:
self.assertEqual('', f.read())
def test_create_file_with_incorrect_mode_type(self):
with self.assertRaises(TypeError):
self.filesystem.create_file('foo', 'bar')
def test_create_file_already_exists_error(self):
path = 'foo/bar/baz'
self.filesystem.create_file(path, contents='dummy_data')
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_file, path)
def test_create_link(self):
path = 'foo/bar/baz'
target_path = 'foo/bar/quux'
new_file = self.filesystem.create_symlink(path, 'quux')
# Neither the path nor the final target exists before we actually
# write to one of them, even though the link appears in the file
# system.
self.assertFalse(self.filesystem.exists(path))
self.assertFalse(self.filesystem.exists(target_path))
self.assertTrue(stat.S_IFLNK & new_file.st_mode)
# but once we write the linked to file, they both will exist.
self.filesystem.create_file(target_path)
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(target_path))
def test_resolve_object(self):
target_path = 'dir/target'
target_contents = '0123456789ABCDEF'
link_name = 'x'
self.filesystem.create_dir('dir')
self.filesystem.create_file('dir/target', contents=target_contents)
self.filesystem.create_symlink(link_name, target_path)
obj = self.filesystem.resolve(link_name)
self.assertEqual('target', obj.name)
self.assertEqual(target_contents, obj.contents)
def check_lresolve_object(self):
target_path = 'dir/target'
target_contents = '0123456789ABCDEF'
link_name = 'x'
self.filesystem.create_dir('dir')
self.filesystem.create_file('dir/target', contents=target_contents)
self.filesystem.create_symlink(link_name, target_path)
obj = self.filesystem.lresolve(link_name)
self.assertEqual(link_name, obj.name)
self.assertEqual(target_path, obj.contents)
def test_lresolve_object_windows(self):
self.filesystem.is_windows_fs = True
self.check_lresolve_object()
def test_lresolve_object_posix(self):
self.filesystem.is_windows_fs = False
self.check_lresolve_object()
def check_directory_access_on_file(self, error_subtype):
self.filesystem.create_file('not_a_dir')
self.assert_raises_os_error(
error_subtype, self.filesystem.resolve, 'not_a_dir/foo')
self.assert_raises_os_error(
error_subtype, self.filesystem.lresolve, 'not_a_dir/foo/bar')
def test_directory_access_on_file_windows(self):
self.filesystem.is_windows_fs = True
self.check_directory_access_on_file(errno.ENOENT)
def test_directory_access_on_file_posix(self):
self.filesystem.is_windows_fs = False
self.check_directory_access_on_file(errno.ENOTDIR)
def test_pickle_fs(self):
"""Regression test for #445"""
import pickle
self.filesystem.open_files = []
p = pickle.dumps(self.filesystem)
fs = pickle.loads(p)
self.assertEqual(str(fs.root), str(self.filesystem.root))
self.assertEqual(fs.mount_points, self.filesystem.mount_points)
class CaseInsensitiveFakeFilesystemTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.filesystem.is_case_sensitive = False
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.path = self.os.path
def test_get_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
self.assertTrue(self.filesystem.get_object('/Foo/Bar/Baz'))
def test_remove_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
self.filesystem.remove_object('/Foo/Bar/Baz')
self.assertFalse(self.filesystem.exists('/foo/bar/baz'))
def test_exists(self):
self.filesystem.create_dir('/Foo/Bar')
self.assertTrue(self.filesystem.exists('/Foo/Bar'))
self.assertTrue(self.filesystem.exists('/foo/bar'))
self.filesystem.create_file('/foo/Bar/baz')
self.assertTrue(self.filesystem.exists('/Foo/bar/BAZ'))
self.assertTrue(self.filesystem.exists('/foo/bar/baz'))
def test_create_directory_with_different_case_root(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_dir('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertEqual(dir1, dir2)
def test_create_file_with_different_case_dir(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_file('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertEqual(dir1, dir2)
def test_resolve_path(self):
self.filesystem.create_dir('/foo/baz')
| |
via
http.
"""
global director_service_thread
if director_service_thread is not None:
print(LOG_PREFIX + 'Sorry: there is already a Director service thread '
'listening.')
return
# Create server
server = xmlrpc_server.SimpleXMLRPCServer(
(demo.DIRECTOR_SERVER_HOST, demo.DIRECTOR_SERVER_PORT),
requestHandler=RequestHandler, allow_none=True)
# Register function that can be called via XML-RPC, allowing a Primary to
# submit a vehicle version manifest.
server.register_function(
#director_service_instance.register_vehicle_manifest,
register_vehicle_manifest_wrapper, # due to XMLRPC.Binary() for DER
'submit_vehicle_manifest')
server.register_function(
director_service_instance.register_ecu_serial, 'register_ecu_serial')
# Interface available for the demo website frontend.
server.register_function(
director_service_instance.add_new_vehicle, 'add_new_vehicle')
# Have decided that a function to add an ecu is unnecessary.
# Just add targets for it. It'll be registered when that ecu registers itself.
# Eventually, we'll want there to be an add ecu function here that takes
# an ECU's public key, but that's not reasonable right now.
# Provide absolute path for this, or path relative to the Director's repo
# directory.
server.register_function(add_target_to_director, 'add_target_to_director')
server.register_function(write_to_live, 'write_director_repo')
server.register_function(
inventory.get_last_vehicle_manifest, 'get_last_vehicle_manifest')
server.register_function(
inventory.get_last_ecu_manifest, 'get_last_ecu_manifest')
server.register_function(
director_service_instance.register_ecu_serial, 'register_ecu_serial')
server.register_function(clear_vehicle_targets, 'clear_vehicle_targets')
# Attack 1: Arbitrary Package Attack on Director Repository without
# Compromised Keys.
# README.md section 3.1
server.register_function(mitm_arbitrary_package_attack,
'mitm_arbitrary_package_attack')
server.register_function(undo_mitm_arbitrary_package_attack,
'undo_mitm_arbitrary_package_attack')
# Attack 2: Replay Attack without Compromised Keys
# README.md section 3.3
server.register_function(prepare_replay_attack_nokeys,
'prepare_replay_attack_nokeys')
server.register_function(replay_attack_nokeys, 'replay_attack_nokeys')
server.register_function(undo_replay_attack_nokeys,
'undo_replay_attack_nokeys')
# Attack 3: Arbitrary Package Attack with a Compromised Director Key
# README.md section 3.4. Recovery in section 3.6
server.register_function(keyed_arbitrary_package_attack,
'keyed_arbitrary_package_attack')
server.register_function(undo_keyed_arbitrary_package_attack,
'undo_keyed_arbitrary_package_attack')
# Attack 4: Arbitrary Package with Revoked Keys
# (README.md section 3.7)
server.register_function(sign_with_compromised_keys_attack,
'sign_with_compromised_keys_attack')
server.register_function(undo_sign_with_compromised_keys_attack,
'undo_sign_with_compromised_keys_attack')
print(LOG_PREFIX + 'Starting Director Services Thread: will now listen on '
'port ' + str(demo.DIRECTOR_SERVER_PORT))
director_service_thread = threading.Thread(target=server.serve_forever)
director_service_thread.setDaemon(True)
director_service_thread.start()
def mitm_arbitrary_package_attack(vin, target_filepath):
"""
Simulate an arbitrary package attack by a Man in the Middle, without
compromising any keys. Move an evil target file into place on the Director
repository without updating metadata.
"""
print(LOG_PREFIX + 'ATTACK: arbitrary package, no keys, on VIN ' +
repr(vin) + ', target_filepath ' + repr(target_filepath))
full_target_filepath = os.path.join(demo.DIRECTOR_REPO_DIR, vin,
'targets', target_filepath)
# TODO: NOTE THAT THIS ATTACK SCRIPT BREAKS IF THE TARGET FILE IS IN A
# SUBDIRECTORY IN THE REPOSITORY.
backup_target_filepath = os.path.join(demo.DIRECTOR_REPO_DIR, vin,
'targets', 'backup_' + target_filepath)
image_repo_full_target_filepath = os.path.join(demo.IMAGE_REPO_TARGETS_DIR,
target_filepath)
image_repo_backup_full_target_filepath = os.path.join(demo.IMAGE_REPO_TARGETS_DIR,
'backup_' + target_filepath)
if not os.path.exists(full_target_filepath) and not os.path.exists(image_repo_full_target_filepath):
raise Exception('The provided target file is not already in either the '
'Director or Image repositories. This attack is intended to be run on '
'an existing target that is already set to be delivered to a client.')
elif os.path.exists(backup_target_filepath):
raise Exception('The attack is already in progress, or was never recovered '
'from. Not running twice. Please check state and if everything is '
'otherwise okay, delete ' + repr(backup_target_filepath))
# If the image file already exists on the Director repository (not
# necessary), then back it up.
if os.path.exists(full_target_filepath):
shutil.copy(full_target_filepath, backup_target_filepath)
# Hide the image file on the image repository so that the client doesn't just
# grab an intact file from there, making the attack moot.
if os.path.exists(image_repo_full_target_filepath):
os.rename(image_repo_full_target_filepath,
image_repo_backup_full_target_filepath)
with open(full_target_filepath, 'w') as file_object:
file_object.write('EVIL UPDATE: ARBITRARY PACKAGE ATTACK TO BE'
' DELIVERED FROM MITM (no keys compromised).')
print(LOG_PREFIX + 'COMPLETED ATTACK')
def undo_mitm_arbitrary_package_attack(vin, target_filepath):
"""
Undo the arbitrary package attack launched by
mitm_arbitrary_package_attack(). Move evil target file out and normal
target file back in.
"""
print(LOG_PREFIX + 'UNDO ATTACK: arbitrary package, no keys, on VIN ' +
repr(vin) + ', target_filepath ' + repr(target_filepath))
full_target_filepath = os.path.join(demo.DIRECTOR_REPO_DIR, vin,
'targets', target_filepath)
# TODO: NOTE THAT THIS ATTACK SCRIPT BREAKS IF THE TARGET FILE IS IN A
# SUBDIRECTORY IN THE REPOSITORY.
backup_full_target_filepath = os.path.join(demo.DIRECTOR_REPO_DIR, vin,
'targets', 'backup_' + target_filepath)
image_repo_full_target_filepath = os.path.join(demo.IMAGE_REPO_TARGETS_DIR, target_filepath)
image_repo_backup_full_target_filepath = os.path.join(demo.IMAGE_REPO_TARGETS_DIR,
'backup_' + target_filepath)
if not os.path.exists(backup_full_target_filepath) or not os.path.exists(full_target_filepath):
raise Exception('The expected backup or attacked files do not exist. No '
'attack is in progress to undo, or manual manipulation has '
'broken the expected state.')
# In the case of the Director repository, we expect there to be a malicious
# image file, so we restore the backup over it.
os.rename(backup_full_target_filepath, full_target_filepath)
# If the file existed on the image repository, was backed up and hidden by
# the attack, and hasn't since been replaced (by some other attack or manual
# manipulation), restore that file to its place. Either way, delete the
# backup so that it's not there the next time to potentially confuse this.
if os.path.exists(image_repo_backup_full_target_filepath) and not os.path.exists(image_repo_full_target_filepath):
os.rename(image_repo_backup_full_target_filepath, image_repo_full_target_filepath)
elif os.path.exists(image_repo_backup_full_target_filepath):
os.remove(image_repo_backup_full_target_filepath)
print(LOG_PREFIX + 'COMPLETED UNDO ATTACK')
"""
Simulating a replay attack can be done with instructions in README.md,
using the functions below.
"""
def backup_timestamp(vin):
"""
Copy timestamp.der to backup_timestamp.der
Example:
>>> import demo.demo_director as dd
>>> dd.clean_slate()
>>> dd.backup_timestamp('111')
"""
timestamp_filename = 'timestamp.' + tuf.conf.METADATA_FORMAT
timestamp_path = os.path.join(demo.DIRECTOR_REPO_DIR, vin, 'metadata',
timestamp_filename)
backup_timestamp_path = os.path.join(demo.DIRECTOR_REPO_DIR, vin,
'backup_' + timestamp_filename)
shutil.copyfile(timestamp_path, backup_timestamp_path)
def replay_timestamp(vin):
"""
Move 'backup_timestamp.der' to 'timestamp.der', effectively rolling back
timestamp to a previous version. 'backup_timestamp.der' must already exist
at the expected path (can be created via backup_timestamp(vin)).
Prior to rolling back timestamp.der, the current timestamp is saved to
'current_timestamp.der'.
Example:
>>> import demo.demo_director as dd
>>> dd.clean_slate()
>>> dd.backup_timestamp('111')
>>> dd.replay_timestamp()
"""
timestamp_filename = 'timestamp.' + tuf.conf.METADATA_FORMAT
backup_timestamp_path = os.path.join(demo.DIRECTOR_REPO_DIR, vin,
'backup_' + timestamp_filename)
if not os.path.exists(backup_timestamp_path):
raise Exception('Cannot replay the Timestamp'
' file. ' + repr(backup_timestamp_path) + ' must already exist.'
' It can be created by calling backup_timestamp(vin).')
else:
timestamp_path = os.path.join(demo.DIRECTOR_REPO_DIR, vin, 'metadata',
timestamp_filename)
current_timestamp_backup = os.path.join(demo.DIRECTOR_REPO_DIR, vin,
'current_' + timestamp_filename)
# First backup the current timestamp.
shutil.move(timestamp_path, current_timestamp_backup)
shutil.move(backup_timestamp_path, timestamp_path)
def restore_timestamp(vin):
"""
# restore timestamp.der (first move current_timestamp.der to timestamp.der).
Example:
>>> import demo.demo_director as dd
>>> dd.clean_slate()
>>> dd.backup_timestamp('111')
>>> dd.replay_timestamp()
>>> dd.restore_timestamp()
"""
timestamp_filename = 'timestamp.' + tuf.conf.METADATA_FORMAT
current_timestamp_backup = os.path.join(demo.DIRECTOR_REPO_DIR, vin,
'current_' + timestamp_filename)
if not os.path.exists(current_timestamp_backup):
raise Exception('A backup copy of the timestamp file'
' could not be found. Missing: ' + repr(current_timestamp_backup))
else:
timestamp_path = os.path.join(demo.DIRECTOR_REPO_DIR, vin, 'metadata',
timestamp_filename)
shutil.move(current_timestamp_backup, timestamp_path)
def prepare_replay_attack_nokeys(vin):
"""
For exposure via XMLRPC to web frontend, attack script to prepare to execute a
replay attack with no compromised keys against the Director.
This attack is described in README.md, section 3.3.
1. Back up the existing, soon-to-be-outdated timestamp file, so that it can
be replayed in replay_attack_nokeys().
2. Call write_to_live to issue a new timestamp file, so that the backed-up
timestamp file is now outdated.
After this is done, the Primary should update so that it has seen the new
version of the timestamp data. Then, replay_attack_nokeys() should be run to
actually perform the attack.
"""
print(LOG_PREFIX + 'PREPARE ATTACK: replay attack, no keys, on VIN ' +
repr(vin))
backup_timestamp(vin=vin)
write_to_live(vin_to_update=vin)
print(LOG_PREFIX + 'COMPLETED ATTACK PREPARATION')
def replay_attack_nokeys(vin):
"""
Actually perform the replay attack.
This attack is described in README.md, section 3.3.
prepare_replay_attack_nokeys should be called first, and then the Primary
should have updated before this is called.
"""
print(LOG_PREFIX + 'ATTACK: replay attack, no keys, on VIN ' + repr(vin))
replay_timestamp(vin=vin)
print(LOG_PREFIX + 'COMPLETED ATTACK')
def undo_replay_attack_nokeys(vin):
"""
Undo the replay attack, putting the vehicle's Director repository back into
a normal state.
This attack is attack described in README.md, section 3.3.
"""
print(LOG_PREFIX + 'UNDO ATTACK: replay attack, no keys, on VIN ' + repr(vin))
restore_timestamp(vin=vin)
print(LOG_PREFIX + 'COMPLETED UNDO ATTACK')
def keyed_arbitrary_package_attack(vin, ecu_serial, target_filepath):
"""
Add a new, malicious target to the Director repository for the vehicle,
assigning it to the given ECU Serial, and signing malicious metadata with
the valid Director timestamp, snapshot, and targets keys.
This attack is described in README.md, section 3.4.
"""
print(LOG_PREFIX + 'ATTACK: keyed_arbitrary_package_attack with parameters '
': vin ' + repr(vin) + '; ecu_serial ' + repr(ecu_serial) + '; '
'target_filepath ' + repr(target_filepath))
# TODO: Back up the image and then restore it in the undo function instead of
# hard-coding the contents it's changed back to in the undo function.
# That would require that we pick a temp file location.
# Determine the location the specified file would occupy in the repository.
| |
: xr.DataArray
Numbers to round.
p : int, optional
A positive number specifies the number of digits after the decimal point to round to.
A negative number means rounding to a power of ten, so for example -2 rounds to the nearest hundred.
Defaults to 0.
Returns
-------
xr.DataArray :
The rounded numbers.
"""
#error for float objects: 'float' object has no attribute 'round'
return x.round(p)
@staticmethod
def exec_da():
pass
########################################################################################################################
# Exp Process
########################################################################################################################
@process
def exp():
"""
Returns class instance of `Exp`.
For more details, please have a look at the implementations inside `Exp`.
Returns
-------
Exp
Class instance implementing all 'exp' processes.
"""
return Exp()
class Exp:
"""
Class implementing all 'exp' processes.
"""
@staticmethod
def exec_num(p):
"""
Exponential function to the base e raised to the power of `p`.
The no-data value None is passed through and therefore gets propagated.
Parameters
----------
p : int or float
The numerical exponent.
Returns
-------
float :
The computed value for e raised to the power of `p`.
"""
return np.exp(p) if p is not None else p
@staticmethod
def exec_np(p):
"""
Exponential function to the base e raised to the power of `p`.
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
p : np.array
The numerical exponent.
Returns
-------
np.array :
The computed values for e raised to the power of `p`.
"""
return np.exp(p)
@staticmethod
def exec_xar(p):
"""
Exponential function to the base e raised to the power of `p`.
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
p : xr.DataArray
The numerical exponent.
Returns
-------
xr.DataArray :
The computed values for e raised to the power of `p`.
"""
return xr.ufuncs.exp(p)
@staticmethod
def exec_da():
pass
########################################################################################################################
# Log Process
########################################################################################################################
@process
def log():
"""
Returns class instance of `Log`.
For more details, please have a look at the implementations inside `Log`.
Returns
-------
Log :
Class instance implementing all 'log' processes.
"""
return Log()
class Log:
"""
Class implementing all 'log' processes.
"""
@staticmethod
def exec_num(x, base):
"""
Logarithm to the base `base` of the number `x` is defined to be the inverse function of taking `base` to the
power of `x`. The no-data value None is passed through and therefore gets propagated if any of the arguments is
None. The computations follow IEEE Standard 754 whenever the processing environment supports it.
Therefore, exec_num(0, 2) results in ±infinity.
Parameters
----------
x : int or float
A number to compute the logarithm for.
base : int or float
The numerical base.
Returns
-------
float :
The computed logarithm.
"""
return Log.exec_np(x, base) if x is not None and base is not None else None
@staticmethod
def exec_np(x, base):
"""
Logarithm to the base `base` of the numbers `x` is defined to be the inverse function of taking `base` to the
powers of `x`. The no-data value np.nan is passed through and therefore gets propagated if any of the arguments
is None. The computations follow IEEE Standard 754 whenever the processing environment supports it.
Therefore, exec_np(0, 2) results in ±infinity.
Parameters
----------
x : np.array
Numbers to compute the logarithm for.
base : int or float
The numerical base.
Returns
-------
np.array :
The computed logarithm.
"""
return np.log(x)/np.log(base)
@staticmethod
def exec_xar(x, base):
"""
Logarithm to the base `base` of the numbers `x` is defined to be the inverse function of taking `base` to the
powers of `x`. The no-data value np.nan is passed through and therefore gets propagated if any of the arguments
is None. The computations follow IEEE Standard 754 whenever the processing environment supports it.
Therefore, exec_np(0, 2) results in ±infinity.
Parameters
----------
x : xr.DataArray
Numbers to compute the logarithm for.
base : int or float
The numerical base.
Returns
-------
xr.DataArray :
The computed logarithm.
"""
l = xr.ufuncs.log(x)/xr.ufuncs.log(base)
if isinstance(x, xr.DataArray):
l.attrs = x.attrs
return l
@staticmethod
def exec_da():
pass
########################################################################################################################
# Ln Process
########################################################################################################################
@process
def ln():
"""
Returns class instance of `Ln`.
For more details, please have a look at the implementations inside `Ln`.
Returns
-------
Ln :
Class instance implementing all 'ln' processes.
"""
return Ln()
class Ln:
"""
Class implementing all 'ln' processes.
"""
@staticmethod
def exec_num(x):
"""
The natural logarithm is the logarithm to the base e of the number `x`, which equals to using the log process
with the base set to e. The natural logarithm is the inverse function of taking e to the power `x`. The no-data
value None is passed through. The computations follow IEEE Standard 754.
Therefore, exec_num(0) results in ±infinity.
Parameters
----------
x : int or float
A number to compute the natural logarithm for.
Returns
-------
float :
The computed natural logarithm.
"""
return np.log(x) if x is not None else x
@staticmethod
def exec_np(x):
"""
The natural logarithm is the logarithm to the base e of the numbers `x`, which equals to using the log process
with the base set to e. The natural logarithm is the inverse function of taking e to the powers `x`. The no-data
value np.nan is passed through. The computations follow IEEE Standard 754.
Therefore, exec_np(0) results in ±infinity.
Parameters
----------
x : np.array
Numbers to compute the natural logarithm for.
Returns
-------
np.array :
The computed natural logarithms.
"""
return np.log(x)
@staticmethod
def exec_xar(x):
"""
The natural logarithm is the logarithm to the base e of the numbers `x`, which equals to using the log process
with the base set to e. The natural logarithm is the inverse function of taking e to the powers `x`. The no-data
value np.nan is passed through. The computations follow IEEE Standard 754.
Therefore, exec_np(0) results in ±infinity.
Parameters
----------
x : xr.DataArray
Numbers to compute the natural logarithm for.
Returns
-------
xr.DataArray :
The computed natural logarithms.
"""
return xr.ufuncs.log(x)
@staticmethod
def exec_da():
pass
########################################################################################################################
# Cos Process
########################################################################################################################
@process
def cos():
"""
Returns class instance of `Cos`.
For more details, please have a look at the implementations inside `Cos`.
Returns
-------
Cos :
Class instance implementing all 'cos' processes.
"""
return Cos()
class Cos:
"""
Class implementing all 'cos' processes.
"""
@staticmethod
def exec_num(x):
"""
Computes the cosine of `x`.
Works on radians only. The no-data value None is passed through and therefore gets propagated.
Parameters
----------
x : int or float
An angle in radians.
Returns
-------
float :
The computed cosine of `x`.
"""
return np.cos(x) if x is not None else x
@staticmethod
def exec_np(x):
"""
Computes the cosine of `x`.
Works on radians only. The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : np.array
Angles in radians.
Returns
-------
np.array :
The computed cosines of `x`.
"""
return np.cos(x)
@staticmethod
def exec_xar(x):
"""
Computes the cosine of `x`.
Works on radians only. The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : xr.DataArray
Angles in radians.
Returns
-------
xr.DataArray :
The computed cosines of `x`.
"""
return xr.ufuncs.cos(x)
@staticmethod
def exec_da():
pass
########################################################################################################################
# Arccos Process
########################################################################################################################
@process
def arccos():
"""
Returns class instance of `Arccos`.
For more details, please have a look at the implementations inside `Arccos`.
Returns
-------
Arccos :
Class instance implementing all 'arccos' processes.
"""
return Arccos()
class Arccos:
"""
Class implementing all 'arccos' processes.
"""
@staticmethod
def exec_num(x):
"""
Computes the arc cosine of `x`. The arc cosine is the inverse function of the cosine so that
`arccos(cos(x)) = x`. Works on radians only. The no-data value None is passed through and therefore gets
propagated.
Parameters
----------
x : int or float
A number.
Returns
-------
float :
The computed angle in radians.
"""
return np.arccos(x) if x is not None else x
@staticmethod
def exec_np(x):
"""
Computes the arc cosine of `x`. The arc cosine is the inverse function of the cosine so that
`arccos(cos(x)) = x`. Works on radians only. The no-data value | |
<reponame>deepsphere/deepsphere-weather<filename>modules/xscaler.py
#!/usr/bin/env python3
"""
Created on Mon Dec 14 13:25:46 2020
@author: ghiggi
"""
import xarray as xr
import numpy as np
import os
import time
# import xscaler
# xscaler.GlobalScaler.MinMaxScaler
# xscaler.GlobalScaler.StandardScaler
# GlobalScaler
# TemporalScaler
# xr.ALL_DIMS # ...
## Make "elapsed time" optional
## GitHub issues related to groupby(time)
# - https://github.com/pydata/xarray/issues/2237
##----------------------------------------------------------------------------.
## TODO
# - Robust standardization (IQR, MEDIAN) (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html#sklearn.preprocessing.RobustScaler)
# - feature_min, feature_max as dictionary per variable for MinMaxScaler ...
# - Add lw and up to std scalers (avoid outliers alter the distribution)
# - In TemporalScalers, when new_data contain new time_groupby indices values, insert NaN values
# in mean_, std_ for the missing time_groupby values
# - check_time_groups might also check that t_res specified is > to t_res data
##----------------------------------------------------------------------------.
# # Loop over each variable (for Datasets)
# gs = GlobalStandardScaler(data=ds)
# gs.fit()
# mean_ = gs.mean_
# ds['z500'] = ds['z500'] - mean_['z500']
# # Loop over each variable (for DataArray)
# gs = GlobalStandardScaler(data=da, variable_dim="feature")
# gs.fit()
# mean_ = gs.mean_
# da.loc[dict(feature='z500')] = da.loc[dict(feature='z500')] - mean_.loc[dict(feature='z500')]
# How to generalize to Dataset and DataArray:
# var = "z500"
# sel = "['" + var + "']"
# sel = ".loc[dict(" + variable_dim + "='" + var + "')]"
# exec_cmd = "x" + sel + " = x" + sel "- mean_" + sel
# exec(exec_cmd)
##----------------------------------------------------------------------------.
#### Possible future improvements
## RollingScaler
# - No rolling yet implemented for groupby xarray object
## SpatialScaler
# - Requires a groupby_spatially(gpd_poly or xr.grid)
## In future: multidimensional groupby? :
# - http://xarray.pydata.org/en/stable/groupby.html
# - http://xarray.pydata.org/en/stable/generated/xarray.IndexVariable.html#xarray.IndexVariable
# - https://github.com/pydata/xarray/issues/324
# - https://github.com/pydata/xarray/issues/1569
## sklearn-xarray
# https://phausamann.github.io/sklearn-xarray/content/pipeline.html
##----------------------------------------------------------------------------.
#### Utils ####
def get_valid_scaler_class():
"""Return list of implemented xscaler objects."""
scaler_classes = ['GlobalStandardScaler', 'TemporalStandardScaler',
'GlobalMinMaxScaler', 'TemporalMinMaxScaler',
'SequentialScaler']
return scaler_classes
def check_valid_scaler(scaler):
"""Check is a valid scaler."""
# TODO : Check type/class instead looking for attribute...
if scaler.scaler_class not in get_valid_scaler_class():
print(scaler.scaler_class)
raise ValueError("Not a valid scaler")
def check_variable_dim(variable_dim, data):
"""Check that the correct variable dimension (for DataArray) is specified."""
# Check type
if variable_dim is None:
return None
if not isinstance(variable_dim, str):
raise TypeError("Provide 'variable_dim' as a string")
# Check validity
dims = list(data.dims)
if variable_dim not in dims:
raise ValueError("'variable_dim' must be a dimension coordinate of the xarray object")
# Return variable_dim as a string
return variable_dim
def check_groupby_dims(groupby_dims, data):
"""Check that valid groupby dimensions are specified."""
# Check type
if isinstance(groupby_dims, str):
groupby_dims = [groupby_dims]
if not (isinstance(groupby_dims, list) or isinstance(groupby_dims, tuple)):
raise TypeError("Provide 'groupby_dims' as a string, list or tuple")
# Check validity
dims = np.array(list(data.dims))
if not np.all(np.isin(groupby_dims, dims)):
raise ValueError("'groupby_dims' must be dimension coordinates of the xarray object")
# Return grouby_dims as a list of strings
return groupby_dims
def check_rename_dict(data, rename_dict):
"""Check rename_dict validity."""
if not isinstance(rename_dict, dict):
raise ValueError("'rename_dict' must be a dictionary.")
data_dims = list(data.dims)
keys = list(rename_dict.keys())
vals = list(rename_dict.values())
keys_all = np.all(np.isin(keys, data_dims))
vals_all = np.all(np.isin(vals, data_dims))
if keys_all:
new_dict = rename_dict
elif vals_all:
new_dict = {v: k for k,v in rename_dict.items()}
else:
raise ValueError("The specified dimensions in 'rename_dict' are not dimensions of the supplied data.")
return new_dict
def get_xarray_variables(data, variable_dim = None):
"""Return the variables of an xarray Dataset or DataArray."""
if isinstance(data, xr.Dataset):
return list(data.data_vars.keys())
elif isinstance(data, xr.DataArray):
if variable_dim is None:
return data.name
else:
variable_dim = check_variable_dim(variable_dim = variable_dim, data = data)
return data[variable_dim].values.tolist()
else:
raise TypeError("Provide an xarray Dataset or DataArray")
def _check_is_fitted(self):
# TODO: this could be assigned to a superclass of scalers
if not self.fitted:
raise ValueError("Please fit() the scaler before saving it!")
def _check_save_fpath(fpath, force):
# Check basepath exists
if not os.path.exists(os.path.dirname(fpath)):
# If not exist, create directory
os.makedirs(os.path.dirname(fpath))
print("The directory {} did not exist and has been created !".format(os.path.dirname(fpath)))
# Check end with .nc
if fpath[-3:] != ".nc":
fpath = fpath + ".nc"
print("Added .nc extension to the provided fpath.")
# If the netCDF file already exists, remove if force=True
if os.path.exists(fpath):
if force:
os.remove(fpath)
else:
raise ValueError("{} already exists on disk. Set force=True to overwrite.".format(fpath))
return fpath
#-----------------------------------------------------------------------------.
# ################################
#### Utils for TemporalScalers ###
# ################################
def check_time_dim(time_dim, data):
"""Check that the correct time dimension is specified."""
# Check type
if not isinstance(time_dim, str):
raise TypeError("Specify 'time_dim' as a string.")
# Check validity
dims = list(data.dims)
if time_dim not in dims:
raise ValueError("'time_dim' must specify the time dimension coordinate of the xarray object.")
if not isinstance(data[time_dim].values[0], np.datetime64):
raise ValueError("'time_dim' must indicate a time dimension coordinate with np.datetime64 dtype.")
# Return time_dim as a string
return time_dim
def get_valid_time_groups():
"""Return valid time groups."""
time_groups = ['year','season','quarter','month','day',
'weekofyear','dayofweek','dayofyear',
'hour','minute','second']
return time_groups
def get_dict_season():
"""Return dictionary for conversion (to integers) of season strings."""
dict_season = {'DJF': 1,
'MAM': 2,
'JJA': 3,
'SON': 4,
}
return dict_season
def get_time_group_max(time_group):
"""Return dictionary with max values for each time group."""
dict_time_max = {'year': 5000, # dummy large value for year since unbounded ...
'season': 4,
'quarter': 4,
'month': 12,
'weekofyear': 53,
'dayofweek': 7, # init 0, end 6
'dayofyear': 366,
'day': 31,
'hour': 24, # init 0, end 23
'minute': 60, # init 0, end 59
'second': 60, # init 0, end 59
}
return dict_time_max[time_group]
def get_time_group_min(time_group):
"""Return dictionary with min values for each time group."""
dict_time_min = {'year': 0,
'season': 1,
'quarter': 1,
'month': 1,
'weekofyear': 1,
'dayofweek': 0,
'dayofyear': 1,
'day': 1,
'hour': 0,
'minute': 0,
'second': 0
}
return dict_time_min[time_group]
def get_time_groupby_name(time_groups):
"""Return a name reflecting the temporal groupby operation."""
# Define time groupby name
time_groups_list = []
for k, v in time_groups.items():
if v == 1:
time_groups_list.append(k)
else:
time_groups_list.append(str(v) + k)
time_groupby_name = "Time_GroupBy " + '-'.join(time_groups_list)
return time_groupby_name
def check_time_groups(time_groups):
"""Check validity of time_groups."""
if time_groups is None:
return None
# Check type
if isinstance(time_groups, str):
time_groups = [time_groups]
if isinstance(time_groups, list):
time_groups = {k: 1 for k in time_groups}
if not isinstance(time_groups, dict):
raise TypeError("Provide time_groups as string, list or dictionary.")
##------------------------------------------------------------------------.
# Check time_groups name validity
time_groups_name = np.array(list(time_groups.keys()))
unvalid_time_groups_name = time_groups_name[np.isin(time_groups_name, get_valid_time_groups(), invert=True)]
if len(unvalid_time_groups_name) > 0:
raise ValueError("{} are not valid 'time_groups' keys".format(unvalid_time_groups_name))
##------------------------------------------------------------------------.
# Check time_groups time aggregation validity
for k, v in time_groups.items():
# Check min value
if v < 1:
raise ValueError("The aggregation period of '{}' must be at least 1".format(k))
# Check max value
max_val = get_time_group_max(time_group=k)
if v > get_time_group_max(time_group=k):
raise ValueError("The maximum aggregation period of '{}' is {}".format(k, max_val))
# Check max value is divisible by specified time aggregation
if ((max_val % v) != 0):
print("Attention, the specified aggregation period ({}) does not allow uniform subdivision of '{}'".format(v, k))
##------------------------------------------------------------------------.
return time_groups
def check_time_groupby_factors(time_groupby_factors, time_groups):
"""Check validity of time_groupby_factors."""
if time_groupby_factors is None:
return {}
if time_groups is not None:
if not np.all(np.isin(time_groups.keys(), time_groupby_factors.keys())):
raise ValueError("All time groups must be included in time_groupby_factors.")
return time_groupby_factors
else:
return {}
def check_new_time_groupby_idx(time_groupby_idx, scaler_stat):
"""Check that the fitted scaler contains all time_groupby_idx of new_data."""
time_groupby_idx_orig = np.unique(scaler_stat[time_groupby_idx.name].values)
time_groupby_idx_new = np.unique(time_groupby_idx.values)
if not np.all(np.isin(time_groupby_idx_new, time_groupby_idx_orig)):
raise ValueError("The TemporalScaler does not contain representative statistics for all time_groups indices of 'new_data'.")
##----------------------------------------------------------------------------.
def get_time_groupby_idx(data, time_dim, time_groups, time_groupby_factors=None):
"""Return a 1D array with unique index for temporal groupby operation."""
# Check time groups
time_groups_dict = check_time_groups(time_groups=time_groups)
# Check time_groupby_factors
time_groupby_factors = check_time_groupby_factors(time_groupby_factors, time_groups=time_groups_dict)
no_time_groupby_factors = len(time_groupby_factors) == 0
##------------------------------------------------------------------------.
# Retrieve groupby indices
if time_groups is not None:
tmp_min_interval = 0
l_time_groups_dims = []
for i, (time_group, time_agg) in enumerate(time_groups_dict.items()):
# Retrieve max time aggregation
time_agg_max = get_time_group_max(time_group=time_group)
# Retrieve time index (for specific time group)
# - dt.week, dt.weekofyear has been deprecated in Pandas ...
if time_group == "weekofyear":
idx = data[time_dim].dt.isocalendar().week
else:
idx = data[time_dim].dt.__getattribute__(time_group)
l_time_groups_dims.append(idx)
# Preprocessing if 'season' (string to integer)
if time_group == 'season':
dict_season = get_dict_season()
idx_values = [dict_season[s] for s in idx.values]
idx.values = idx_values
##----------------------------------------------------------------.
# | |
headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: float or ClientRawResponse if raw=true
:rtype: float or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
# Construct URL
url = self.get_big_double_positive_decimal.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('float', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_big_double_positive_decimal.metadata = {'url': '/number/big/double/99999999.99'}
async def put_big_double_negative_decimal(
self, *, custom_headers=None, raw=False, **operation_config):
"""Put big double value -99999999.99.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
number_body = -99999999.99
# Construct URL
url = self.put_big_double_negative_decimal.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(number_body, 'float')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_big_double_negative_decimal.metadata = {'url': '/number/big/double/-99999999.99'}
async def get_big_double_negative_decimal(
self, *, custom_headers=None, raw=False, **operation_config):
"""Get big double value -99999999.99.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: float or ClientRawResponse if raw=true
:rtype: float or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
# Construct URL
url = self.get_big_double_negative_decimal.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('float', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_big_double_negative_decimal.metadata = {'url': '/number/big/double/-99999999.99'}
async def put_big_decimal(
self, number_body, *, custom_headers=None, raw=False, **operation_config):
"""Put big decimal value 2.5976931e+101.
:param number_body:
:type number_body: decimal.Decimal
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
# Construct URL
url = self.put_big_decimal.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(number_body, 'decimal')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_big_decimal.metadata = {'url': '/number/big/decimal/2.5976931e+101'}
async def get_big_decimal(
self, *, custom_headers=None, raw=False, **operation_config):
"""Get big decimal value 2.5976931e+101.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: decimal.Decimal or ClientRawResponse if raw=true
:rtype: decimal.Decimal or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
# Construct URL
url = self.get_big_decimal.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('decimal', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_big_decimal.metadata = {'url': '/number/big/decimal/2.5976931e+101'}
async def put_big_decimal_positive_decimal(
self, *, custom_headers=None, raw=False, **operation_config):
"""Put big decimal value 99999999.99.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
number_body = Decimal(99999999.99)
# Construct URL
url = self.put_big_decimal_positive_decimal.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(number_body, 'decimal')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_big_decimal_positive_decimal.metadata = {'url': '/number/big/decimal/99999999.99'}
async def get_big_decimal_positive_decimal(
self, *, custom_headers=None, raw=False, **operation_config):
"""Get big decimal value 99999999.99.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: decimal.Decimal or ClientRawResponse if raw=true
:rtype: decimal.Decimal or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
# Construct URL
url = self.get_big_decimal_positive_decimal.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('decimal', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_big_decimal_positive_decimal.metadata = {'url': '/number/big/decimal/99999999.99'}
async def put_big_decimal_negative_decimal(
self, *, custom_headers=None, raw=False, **operation_config):
"""Put big decimal value -99999999.99.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
number_body = Decimal(-99999999.99)
# Construct URL
url = self.put_big_decimal_negative_decimal.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(number_body, 'decimal')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_big_decimal_negative_decimal.metadata = {'url': '/number/big/decimal/-99999999.99'}
async def get_big_decimal_negative_decimal(
self, *, custom_headers=None, raw=False, **operation_config):
"""Get big decimal value -99999999.99.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: decimal.Decimal or ClientRawResponse if raw=true
:rtype: decimal.Decimal or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
# Construct URL
url = self.get_big_decimal_negative_decimal.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('decimal', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_big_decimal_negative_decimal.metadata = {'url': '/number/big/decimal/-99999999.99'}
async def put_small_float(
self, number_body, *, custom_headers=None, raw=False, **operation_config):
"""Put small float value 3.402823e-20.
:param number_body:
:type number_body: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodynumber.models.ErrorException>`
"""
# Construct URL
url = self.put_small_float.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(number_body, 'float')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_small_float.metadata = {'url': '/number/small/float/3.402823e-20'}
async def get_small_float(
self, *, custom_headers=None, raw=False, **operation_config):
"""Get big double value 3.402823e-20.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
| |
Sec. 9.4.1 of Faltinsen 2005.
"""
if runGeoLengths:
self.get_geo_lengths() #Calculated wetted lengths in get_eom_matrices()
W = self.weight
U = self.speed
rho = self.rho
b = self.beam
lcg = self.lcg
tau = self.tau
beta = self.beta
g = self.g
r_g = self.r_g
eta_5 = self.eta_5
L_K = self.L_K
L_C = self.L_C
lambda_W = self.lambda_W
x_s = self.x_s
z_max = self.z_max
pi = np.pi
def get_mass_matrix():
"""This function returns the added mass coefficients following Sec. 9.4.1 of Faltinsen 2005, including weight and moment of inertia
"""
#Distance of CG from keel-WL intersection
x_G = L_K - lcg
#K constant (Eq. 9.63 of Faltinsen 2005)
K = (pi / np.sin(pi/180*beta) * gamma(1.5 - beta/180) / (gamma(1 - beta/180)**2 * gamma(0.5 + beta/180)) - 1) / np.tan(pi/180*beta)
kappa = (1 + z_max) * (pi/180)*(tau + eta_5) #User defined constant
#Based on Faltinsen's
A1_33 = rho * kappa**2 * K * x_s**3 / 3
A1_35 = A1_33 * (x_G - x_s * 3/4)
A1_53 = A1_35
A1_55 = A1_33 * (x_G**2 - 3/2 * x_G * x_s + 3/5 * x_s**2)
#Contribution from wet-chine region
if L_C > 0:
C_1 = 2 * np.tan(pi/180*beta)**2 / pi * K
A2_33 = (rho * b**3) * C_1 * pi / 8 * L_C / b
A2_35 = (rho * b**4) * (- C_1 * pi / 16 * ((L_K / b)**2 - (x_s / b)**2) + x_G / b * A2_33 / (rho * b**3))
A2_53 = A2_35
A2_55 = (rho * b**5) * (C_1 * pi / 24 * ((L_K / b)**3 - (x_s / b)**3) - C_1 / 8 * pi * (x_G / b) * ((L_K / b)**2 - (x_s / b)**2) + (x_G / b)**2 * A2_33 / (rho * b**3))
else:
A2_33 = 0
A2_35 = 0
A2_53 = 0
A2_55 = 0
#Total added mass & update values
A_33 = A1_33 + A2_33 + W/g # kg, A_33
A_35 = A1_35 + A2_35 # kg*m/rad, A_35
A_53 = A1_53 + A2_53 # kg*m, A_53
A_55 = A1_55 + A2_55 + W/g*r_g**2 # kg*m^2/rad, A_55
self.mass_matrix = np.array([[A_33, A_35], [A_53, A_55]])
def get_damping_matrix():
"""This function returns the damping coefficients following Sec. 9.4.1 of Faltinsen 2005
"""
#Heave-heave added mass (need to substract W/g since it was added)
A_33 = self.mass_matrix[0,0] - W/g
if L_C > 0:
d = 0.5 * b * np.tan(pi/180*beta)
else:
d = (1 + z_max) * (pi/180)*(tau + eta_5) * L_K
#K constant (Eq. 9.63 of Faltinsen 2005, P. 369)
K = (pi / np.sin(pi/180*beta) * gamma(1.5 - beta/180) / (gamma(1 - beta/180)**2 * gamma(0.5 + beta/180)) - 1) / np.tan(pi/180*beta)
#2D Added mass coefficient in heave
a_33 = rho * d**2 * K
#Infinite Fn lift coefficient
C_L0 = (tau + eta_5)**1.1 * 0.012 * lambda_W**0.5
#Derivative w.r.t. tau (rad) of inf. Fn C_L0
dC_L0 = (180 / pi)**1.1 * 0.0132 * (pi/180*(tau + eta_5))**0.1 * lambda_W**0.5
#Derivative w.r.t. tau (rad) of inf. Fn C_Lbeta
dC_Lbeta = dC_L0 * (1 - 0.0039 * beta * C_L0**-0.4)
#Damping coefficients & update values
B_33 = rho / 2 * U * b**2 * dC_Lbeta # kg/s, B_33, Savitsky based
B_35 = - U * (A_33 + lcg * a_33) # kg*m/(s*rad), B_35, Infinite frequency based
B_53 = B_33 * (0.75 * lambda_W * b - lcg) # kg*m/s, B_53, Savitsky based
B_55 = U * lcg**2 * a_33 # kg*m**2/(s*rad), B_55, Infinite frequency based
self.damping_matrix = np.array([[B_33, B_35], [B_53, B_55]])
def get_restoring_matrix(diffType=1, step=10**-6.6):
"""This function returns the restoring coefficients following the approach in Sec. 9.4.1 of Faltinsen 2005
Args:
diffType (int, optional): 1 (recommended) = Complex step method, 2 = Foward step difference. Defaults to 1.
step (float, optional): Step size if using diffType == 2. Defaults to 10**-6.
"""
def _func(eta):
self.eta_3 = eta[0]
self.eta_5 = eta[1]
self.get_forces()
return self.net_force[1:3]
temp_eta_3 = self.eta_3
temp_eta_5 = self.eta_5
if diffType == 1:
C_full = -ndmath.complexGrad(_func, [temp_eta_3, temp_eta_5])
elif diffType == 2:
C_full = -ndmath.finiteGrad(_func, [temp_eta_3, temp_eta_5], 10**-6.6)
#Reset values
self.eta_3 = temp_eta_3
self.eta_5 = temp_eta_5
self.get_forces()
#Conversion deg to rad (degree in denominator)
C_full[0,1] = C_full[0,1] / (pi/180) # N/rad, C_35
C_full[1,1] = C_full[1,1] / (pi/180) # N*m/rad, C_55
#Update values
self.restoring_matrix = C_full
#Call functions
get_mass_matrix()
get_damping_matrix()
get_restoring_matrix()
def check_porpoising(self, stepEstimateType=1):
"""This function checks for porpoising.
Adds/updates the following parameters:
- :attr:`porpoising` (list):
Args:
stepEstimateType (int, optional): Pitch step response settling time estimate type, 1 = -3/np.real(eigVals[0])], 2 = Time-domain simulation estimate. Defaults to 1.
"""
#Eigenvalue analysis
try:
self.mass_matrix
except AttributeError:
warnings.warn('No Equation Of Motion (EOM) matrices found. Running get_eom_matrices().', stacklevel=2)
self.get_eom_matrices()
M = self.mass_matrix
C = self.damping_matrix
K = self.restoring_matrix
nDim = len(M)
A_ss = np.concatenate((np.concatenate((np.zeros((nDim,nDim)), np.identity(nDim)), axis=1), np.concatenate((-np.linalg.solve(M,K), -np.linalg.solve(M,C)), axis=1))) #State space reprecentation
eigVals = np.linalg.eigvals(A_ss)
eig_porpoise = any(eigVal >= 0 for eigVal in eigVals)
if stepEstimateType == 1:
settling_time = -3/np.real(eigVals[0])
elif stepEstimateType == 2:
B_ss = np.array([[1],[0],[0],[0]]) #Pitch only
C_ss = np.array([[1,0,0,0]]) #Pitch only
D_ss = np.array([[0]])
system = (A_ss,B_ss,C_ss,D_ss)
t, y = signal.step(system)
settling_time = (t[next(len(y)-i for i in range(2,len(y)-1) if abs(y[-i]/y[-1])>1.02)]-t[0])
#Savitsky '64 chart method
C_L = self.weight/(1/2*self.rho*self.speed**2*self.beam**2)
x = np.sqrt(C_L/2)
#Warnings
if x > 0.3 or x < 0.13:
warnings.warn('Lift Coefficient = {0:.3f} outside of bounds (0.0338-0.18) for porpoising estimates with Savitsky 1964. Results are extrapolations.'.format(C_L), stacklevel=2)
if self.beta > 20:
warnings.warn('Deadrise = {0:.3f} outside of bounds (0-20 deg) for porpoising estimates with Savitsky 1964. Results are extrapolations.'.format(self.beta), stacklevel=2)
tau_crit_0 = -376.37*x**3 + 329.74*x**2 - 38.485*x + 1.3415
tau_crit_10 = -356.05*x**3 + 314.36*x**2 - 41.674*x + 3.5786
tau_crit_20 = -254.51*x**3 + 239.65*x**2 - 23.936*x + 3.0195
tau_crit_func = interpolate.interp1d([0, 10, 20], [tau_crit_0, tau_crit_10, tau_crit_20], kind='quadratic', fill_value='extrapolate')
tau_crit = tau_crit_func(self.beta)
if self.tau > tau_crit:
chart_porpoise = True
else:
chart_porpoise = False
#Update values
self.porpoising = [[eig_porpoise, settling_time], [chart_porpoise, float(tau_crit)]]
def get_seaway_behavior(self):
"""This function calculates the seaway behavior as stated in Savitsky & Brown '76.
Adds/updates the following parameters:
- :attr:`avg_impact_acc`
- :attr:`R_AW`
"""
if self.H_sig is None:
self.H_sig = self.beam*0.5 #Arbitrary wave height if no user-defined wave height
warnings.warn('Significant wave height has not been specified. Using beam*0.5 = {0:.3f} m.'.format(self.H_sig), stacklevel=2)
if self.length is None:
self.length = self.beam*3
warnings.warn('Vessel length has not been specified. Using beam*3 = {0:.3f} m.'.format(self.length), stacklevel=2)
H_sig = self.H_sig
W = self.weight
beta = self.beta
tau = self.tau
pi = np.pi
Delta_LT = W/9964 #Displacement in long tons
Delta = Delta_LT*2240 #Displacement in lbf
L = self.length*3.281 #Length in ft
b = self.beam*3.281 #Beam in ft
Vk = self.speed*1.944 #Speed in knots
Vk_L = Vk/np.sqrt(L) #Vk/sqrt(L)
H_sig = H_sig*3.281 #Significant wave height in ft
w = self.rho*self.g/(4.448*35.315) #Specific weight in lbf/ft^3
C_Delta = Delta/(w*b**3) #Static beam-loading coefficient
if self.seaway_drag_type == 1: #Savitsky '76
#Check that variables are inside range of applicability (P. 395 of Savitsky & Brown '76)
P1 = Delta_LT/(0.01*L)**3
P2 = L/b
P5 = H_sig/b
P6 = Vk_L
if P1 < 100 or P1 > 250:
warnings.warn('Vessel displacement coefficient = {0:.3f}, outside of range of applicability (100 <= Delta_LT/(0.01*L)^3 <= 250, with units LT/ft^3). Results are extrapolations.'.format(P1), stacklevel=2)
if P2 < 3 or P2 > 5:
warnings.warn('Vessel length/beam = {0:.3f}, outside of range of applicability (3 <= L/b <= 5). Results are extrapolations.'.format(P2), stacklevel=2)
if tau < 3 or tau > 7:
warnings.warn('Vessel trim = {0:.3f}, outside of range of applicability (3 deg <= tau <= 7 deg). Results are extrapolations.'.format(tau), stacklevel=2)
if beta < 10 or beta > 30:
warnings.warn('Vessel deadrise = {0:.3f}, outside of range of applicability (10 deg <= beta <= 30 deg). Results are extrapolations.'.format(beta), stacklevel=2)
if P5 < 0.2 or P5 > 0.7:
warnings.warn('Significant wave | |
is not empty
inmarkup = str(input("Insert markup: "))
markup = re.sub('[^A-Za-z0-9:]+', '', inmarkup)
print(markup)
print("---")
list_markup.append(add_markup(obj_cursor.pozicija, markup, obj_cursor.trajanje, 0))
# if list_chords:
# for i in list_chords:
# if i.pozicija >= obj_cursor.pozicija:
# i.pozicija += obj_cursor.trajanje + 1
# list_chords.append(add_chord(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 0))
# obj_cursor.pozicija += obj_cursor.trajanje + 1
# #if list jet is empty first time only
# else:
# lista_nota.append(dodaj_notu(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 0))
# obj_cursor.pozicija += obj_cursor.trajanje + 1
#remove all chords under the cursor
if event.key == pygame.K_d:
x = [i for i in list_markup if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
for i in x:
if i in list_markup:
list_markup.remove(i)
#w: Move forward to the beginning of a word.
if event.key == pygame.K_w:
x = [ i.pozicija for i in list_markup if i.pozicija > obj_cursor.pozicija ]
if x:
x.sort()
obj_cursor.pozicija = x[0]
#obj_cursor.pozicija = min(x, key = lambda t: t[0])
#print(x)
#b: Move backward to the beginning of a word.
if event.key == pygame.K_b:
x = [ i.pozicija for i in list_markup if i.pozicija < obj_cursor.pozicija ]
if x:
x.sort()
obj_cursor.pozicija = x[-1]
#obj_cursor.pozicija = max(x, key = lambda t: t[0])
#print(x)
#print(max(x, key = lambda t: t[0]))
#Keyboard buttons with LSHIFT as mod
if pygame.key.get_mods() & pygame.KMOD_LSHIFT:
#modes defined
#no modes
if not modes():
pass
#I insert note at the beginning of the bar
if event.key == pygame.K_i:
insert_mode = 1
obj_cursor.trajanje = insert_mode_cursor_length
obj_cursor.pozicija = int(obj_cursor.pozicija / 16) * 16
#A append cursor to the end of current bar
if event.key == pygame.K_a:
insert_mode = 1
obj_cursor.pozicija = int(obj_cursor.pozicija / 16) * 16 + 15
#O Open up a new bar in front of the current bar and add notes there
if event.key == pygame.K_o:
insert_mode = 1
obj_cursor.pozicija = int(obj_cursor.pozicija / 16) * 16
#obj_cursor.bg_scroll_x = obj_cursor.pozicija - 2
if lista_nota:
for i in lista_nota:
if i.pozicija >= obj_cursor.pozicija:
i.pozicija += 16
broj_taktova += 1
if insert_mode:
#enlarge CURSOR.lenght by 1
if event.key == pygame.K_RIGHT:
if obj_cursor.trajanje < 15:
obj_cursor.trajanje += 1
#reduce CURSOR.lenght by 1
if event.key == pygame.K_LEFT:
if obj_cursor.trajanje > 0:
obj_cursor.trajanje -= 1
if old_mode:
obj_cursor.sprite = 1
if event.key == pygame.K_RIGHT:
if obj_cursor.trajanje < 15:
obj_cursor.trajanje += 1
if event.key == pygame.K_LEFT:
if obj_cursor.trajanje > 0:
obj_cursor.trajanje -= 1
if event.key == pygame.K_t:
if broj_taktova > 0:
broj_taktova -= 1
#add a sharp note
if event.key == pygame.K_RETURN:
if lista_nota:
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
if ((obj_cursor.ton not in [y.ton for y in x]) and all(obj_cursor.pozicija == y.pozicija for y in x) and all(obj_cursor.trajanje == y.trajanje for y in x)):
lista_nota.append(dodaj_notu(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 1))
else:
obj_cursor.sprite = 2
else:
lista_nota.append(dodaj_notu(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 1))
else:
lista_nota.append(dodaj_notu(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 1))
#Keyboard buttons with LCTRL as mod
if ((pygame.key.get_mods() & pygame.KMOD_LCTRL)):
if not modes():
#save project to file
if event.key == pygame.K_s:
pickle.dump((lista_nota, list_chords, list_markup, broj_taktova, obj_cursor.pozicija), open( args.projectname, "wb" ))
#pickle.dump((lista_nota, list_chords), open( args.projectname, "wb" ))
print("save")
#load project from file
if event.key == pygame.K_l:
lista_nota, list_chords, list_markup, broj_taktova, obj_cursor.pozicija = pickle.load(open( args.projectname, "rb" ))
#lista_nota = pickle.load(open( args.projectname, "rb" ))
print("load")
#play only first voice
#midiout.send_message([144, nota2MidiNumber(i[0]), 100])
if event.key == pygame.K_1:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
midi_notes.append([kljucevi_nota[i][3],time.clock(), 0])
else:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
#play only second voice
if event.key == pygame.K_2:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
midi_notes.append([kljucevi_nota[i][2],time.clock(), 0])
else:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
#play only third voice
if event.key == pygame.K_3:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
midi_notes.append([kljucevi_nota[i][1],time.clock(), 0])
else:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
#play only forth voice
if event.key == pygame.K_4:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
else:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
if event.key == pygame.K_0:
midi_notes = []
midiout.send_message([176, 123, 0])
if old_mode:
#obj_cursor.sprite = 1
if event.key == pygame.K_RIGHT:
obj_cursor.pozicija += 1
if event.key == pygame.K_LEFT:
if obj_cursor.pozicija > -15:
obj_cursor.pozicija -= 1
#if event.key == pygame.K_UP:
# if obj_cursor.ton < 40:
# obj_cursor.ton += 1
#if event.key == pygame.K_DOWN:
# if obj_cursor.ton > 0:
# obj_cursor.ton -= 1
#if event.key == pygame.K_RIGHT:
# if obj_cursor.trajanje > 0:
# obj_cursor.trajanje -= 1
# obj_cursor.pozicija += 1
#if event.key == pygame.K_LEFT:
# if obj_cursor.trajanje < 16:
# obj_cursor.trajanje += 1
# obj_cursor.pozicija -= 1
if event.key == pygame.K_RETURN:
if lista_nota:
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
if ((obj_cursor.ton not in [y.ton for y in x]) and all(obj_cursor.pozicija == y.pozicija for y in x) and all(obj_cursor.trajanje == y.trajanje for y in x)):
lista_nota.append(dodaj_notu(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 2))
else:
obj_cursor.sprite = 2
else:
lista_nota.append(dodaj_notu(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 2))
else:
lista_nota.append(dodaj_notu(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 2))
#remove all notes under the cursor
if event.key == pygame.K_d:
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
for i in x:
if i in lista_nota:
lista_nota.remove(i)
if event.key == pygame.K_0:
midi_notes = []
midiout.send_message([176, 123, 0])
#p play note as midi
if event.key == pygame.K_p:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
#midi_notes = [[i,time.clock(), 0] for i in lista_nota if findnote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
for i in lista_nota:
if (obj_cursor.pozicija+16*4) >= i.pozicija >= obj_cursor.pozicija:
midi_notes.append([i,time.clock(), 0])
#play only first voice
if event.key == pygame.K_1:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
odabrana_lista_nota = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in odabrana_lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in odabrana_lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
midi_notes.append([kljucevi_nota[i][3],time.clock(), 0])
else:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
#play only second voice
if event.key | |
#! /usr/bin/env python
import logging
import threading
import time
import traceback
from urllib import urlopen
from autopyfactory.interfaces import WMSStatusInterface, _thread
from autopyfactory.info import WMSStatusInfo
from autopyfactory.info import WMSQueueInfo
from autopyfactory.info import SiteInfo
from autopyfactory.info import CloudInfo
import autopyfactory.utils as utils
import autopyfactory.external.panda.Client as Client
class _panda(_thread, WMSStatusInterface):
"""
-----------------------------------------------------------------------
PanDA-flavored version of WMSStatus class.
It queries the PanDA server to check the status
of the clouds, the sites and the jobs queues.
-----------------------------------------------------------------------
Public Interface:
the interfaces inherited from Thread and from WMSStatusInterface
-----------------------------------------------------------------------
"""
def __init__(self, apfqueue, config, section):
# NOTE:
# the **kw is not needed at this time,
# but we use it to keep compatibility with WMS Status Condor plugin
# However, it would allow for more than one PanDA server.
_thread.__init__(self)
apfqueue.factory.threadsregistry.add("plugin", self)
try:
self.apfqueue = apfqueue
self.log = logging.getLogger('autopyfactory.wmsstatus.%s' %apfqueue.apfqname)
self.log.debug("WMSStatusPlugin: Initializing object...")
self.maxage = self.apfqueue.fcl.generic_get('Factory', 'wmsstatus.panda.maxage', default_value=360)
self.sleeptime = self.apfqueue.fcl.getint('Factory', 'wmsstatus.panda.sleep')
self._thread_loop_interval = self.sleeptime
# current WMSStatusIfno object
self.currentcloudinfo = None
self.currentjobinfo = None
self.currentsiteinfo = None
# Using the Squid Cache when contacting the PanDA server
Client.useWebCache()
self.log.info('WMSStatusPlugin: Object initialized.')
except Exception as ex:
self.log.error("WMSStatusPlugin object initialization failed. Raising exception")
raise ex
# Using the Squid Cache when contacting the PanDA server
Client.useWebCache()
def _run(self):
self.log.debug('Starting.')
self._update()
self.log.debug('Leaving.')
def getInfo(self, queue=None):
"""
Returns current WMSStatusInfo object
If the info recorded is older than that maxage,
None is returned,
"""
self.log.debug('get: Starting with inputs maxtime=%s' % self.maxage)
if self.currentjobinfo is None:
self.log.debug('Info not initialized. Return None.')
return None
elif self.maxage > 0 and (int(time.time()) - self.currentjobinfo.lasttime) > self.maxage:
self.log.debug('Info is too old. Maxage = %d. Returning None' % self.maxage)
return None
else:
if queue:
return self.currentjobinfo[queue]
else:
self.log.debug('Leaving. Returning info with %d items' %len(self.currentjobinfo))
return self.currentjobinfo
def getCloudInfo(self, cloud=None):
"""
selects the entry corresponding to cloud
from the info retrieved from the PanDA server (as a dict)
using method userinterface.Client.getCloudSpecs()
"""
if self.currentcloudinfo is None:
self.log.debug('Info not initialized. Return None.')
return None
elif self.maxage > 0 and (int(time.time()) - self.currentcloudinfo.lasttime) > self.maxage:
self.log.debug('Info is too old. Maxage = %d. Returning None' % self.maxage)
return None
else:
if cloud:
return self.currentcloudinfo[cloud]
else:
self.log.debug('getInfo: Leaving. Returning info with %d items' %len(self.currentcloudinfo))
return self.currentcloudinfo
def getSiteInfo(self, site=None):
"""
selects the entry corresponding to sites
from the info retrieved from the PanDA server (as a dict)
using method userinterface.Client.getSiteSpecs(siteType='all')
"""
if self.currentsiteinfo is None:
self.log.debug('Info not initialized. Return None.')
return None
elif self.maxage > 0 and (int(time.time()) - self.currentsiteinfo.lasttime) > self.maxage:
self.log.debug('Info is too old. Maxage = %d. Returning None' % self.maxage)
return None
else:
if site:
return self.currentsiteinfo[site]
else:
self.log.debug('getInfo: Leaving. Returning info with %d items' %len(self.currentsiteinfo))
return self.currentsiteinfo
def _getmaxtimeinfo(self, infotype, maxtime):
"""
Grab requested info with maxtime.
Returns info if OK, None otherwise.
"""
self.log.debug('Start. infotype = %s, maxtime = %d' % (infotype, maxtime))
out = None
now = int(time.time())
delta = now - self.currentinfo.lasttime
if infotype in ['jobs','cloud','site']:
if delta < maxtime:
attrname = 'current%sinfo' % infotype
out = getattr(attrname)
else:
self.log.info("_getMaxtimeinfo: Info too old. Delta is %d maxtime is %d" % (delta,maxtime))
self.log.debug('Leaving.')
return out
def _update(self):
"""
Queries the PanDA server for updated information about
- Clouds configuration
- Sites configuration
- Jobs status per site
"""
self.log.debug('Starting.')
try:
newcloudinfo = self._updateclouds()
if newcloudinfo:
newcloudinfo.lasttime = int(time.time())
newsiteinfo = self._updatesites()
if newsiteinfo:
newsiteinfo.lasttime = int(time.time())
newjobinfo = self._updatejobs()
if newjobinfo:
newjobinfo.lasttime = int(time.time())
self.log.debug("Replacing old info with newly generated info.")
self.currentjobinfo = newjobinfo
self.currentcloudinfo = newcloudinfo
self.currentsiteinfo = newsiteinfo
except Exception as e:
self.log.error("Exception: %s" % str(e))
self.log.error("Exception: %s" % traceback.format_exc())
self.log.debug('Leaving.')
def _updateclouds(self):
"""
Client.getCloudSpecs() ->
{
'US': { 'countries': 'usatlas',
'dest': 'BNL_ATLAS_1',
'fasttrack': 'true',
'mcshare': 25,
'name': 'US',
'nprestage': 12000,
'pilotowners': '|<NAME>|<NAME>|<NAME>|<NAME>|/atlas/usatlas/Role=production|/atlas/Role=pilot|',
'relocation': '',
'server': 'pandasrv.usatlas.bnl.gov',
'sites': [ 'BNL_ATLAS_1',
'BU_ATLAS_Tier2o',
'BU_Atlas_Tier2o_Install',
'AGLT2_Install',
'IllinoisCC',
'SLACXRD_LMEM',
'OU_OCHEP_SWT2_Install',
'BNL_ATLAS_Install',
'SLACXRD',
'UTA_SWT2',
'UTA_SWT2_CVMFS',
'MWT2_UC_Install',
'BNL_ITB',
'IU_OSG',
'UC_ATLAS_MWT2_Install',
'OU_OSCER_ATLAS_Install',
'MWT2',
'Nebraska-Lincoln-red',
'BELLARMINE-ATLAS-T3',
'UC_ATLAS_MWT2',
'BNL_T3',
'SWT2_CPB',
'UC_ITB',
'UTA_SWT2_Install',
'HU_ATLAS_Tier2',
'IllinoisHEP',
'WT2_Install',
'HU_ATLAS_Tier2_Install',
'BNL_XRD',
'OU_OSCER_ATLAS_MPI',
'Nebraska-Lincoln-red_Install',
'UTD-HEP_Install',
'IllinoisHEP_Install',
'BNL_ITB_Install',
'IU_OSG_Install',
'Tufts_ATLAS_Tier3',
'OU_OCHEP_SWT2',
'MP_IllinoisHEP',
'Tufts_ATLAS_Tier3_Install',
'BNL_CVMFS_1',
'BNL_ATLAS_2',
'Nebraska-Omaha-ffgrid_Install',
'AGLT2',
'SWT2_CPB_Install',
'GLOW-ATLAS_Install',
'MWT2_UC',
'UTD-HEP',
'GLOW-ATLAS',
'Nebraska-Omaha-ffgrid',
'OU_OSCER_ATLAS',
'Hampton_T3'],
'source': 'BNL_ATLAS_1',
'status': 'online',
'tier1': 'BNL_ATLAS_1',
'tier1SE': [ 'BNLDISK',
'BNLTAPE',
'BNLPANDA',
'BNL-OSG2_MCDISK',
'BNL-OSG2_MCTAPE',
'BNL-OSG2_DATADISK',
'BNL-OSG2_DATATAPE',
'BNL-OSG2_HOTDISK'],
'transtimehi': 1,
'transtimelo': 4,
'validation': 'true',
'waittime': 0,
'weight': 2}
}
"""
before = time.time()
# get Clouds Specs
clouds_err, all_clouds_config = Client.getCloudSpecs()
delta = time.time() - before
self.log.debug('it took %s seconds to perform the query' %delta)
self.log.debug('%s seconds to perform query' %delta)
out = None
if clouds_err:
self.log.error('Client.getCloudSpecs() failed')
return None
else:
cloudsinfo = WMSStatusInfo()
for cloud in all_clouds_config.keys():
ci = CloudInfo()
cloudsinfo[cloud] = ci
attrdict = all_clouds_config[cloud]
ci.fill(attrdict)
return cloudsinfo
def _updatesites(self):
"""
Client.getSiteSpecs(siteType='all') ->
{
'BNL_ATLAS_1': { 'accesscontrol': '',
'allowdirectaccess': False,
'allowedgroups': '',
'cachedse': '',
'cloud': 'US',
'cloudlist': ['US'],
'cmtconfig': [ 'i686-slc4-gcc34-opt',
'i686-slc5-gcc43-opt'],
'comment': 'ELOG.31117',
'copysetup': '/usatlas/OSG/osg_wn_client/current/setup.sh',
'ddm': 'BNL-OSG2_DATADISK',
'defaulttoken': '',
'dq2url': '',
'gatekeeper': 'gridgk03.racf.bnl.gov',
'glexec': '',
'lfchost': 'lfc.usatlas.bnl.gov',
'lfcregister': '',
'localqueue': '',
'maxinputsize': 60000,
'maxtime': 0,
'memory': 0,
'nickname': 'BNL_ATLAS_1-condor',
'priorityoffset': '',
'queue': 'gridgk03.racf.bnl.gov/jobmanager-condor',
'releases': [ '10.0.1',
'10.0.4',
'11.0.0',
'11.0.1',
'11.0.2',
'11.0.3',
'11.0.4',
'11.0.42',
'11.0.5',
'11.3.0',
'11.5.0',
'12.0.0',
'12.0.1',
'12.0.2',
'12.0.3',
'12.0.31',
'12.0.4',
'12.0.5',
'12.0.6',
'12.0.7',
'12.0.8',
'12.0.95',
'13.0.10',
'13.0.20',
'13.0.25',
'13.0.25-slc3',
'13.0.26',
'13.0.30',
'13.0.35',
'13.0.30',
'13.0.35',
'13.0.35-slc3',
'13.0.40',
'13.2.0',
'14.0.0',
'14.0.10',
'14.1.0',
'14.2.0',
'14.2.10',
'14.2.11',
'14.2.20',
'14.2.20.bak',
'14.2.21',
'14.2.22',
'9.0.4'],
'retry': False,
'se': 'token:ATLASDATADISK:srm://dcsrm.usatlas.bnl.gov:8443/srm/managerv2?SFN=',
'seprodpath': { 'ATLASDATADISK': '/pnfs/usatlas.bnl.gov/BNLT0D1/',
'ATLASDATATAPE': '/pnfs/usatlas.bnl.gov/BNLT1D0/',
'ATLASMCTAPE': '/pnfs/usatlas.bnl.gov/MCTAPE/'},
'setokens': { 'ATLASDATADISK': 'BNL-OSG2_DATADISK',
'ATLASDATATAPE': 'BNL-OSG2_DATATAPE',
'ATLASMCTAPE': 'BNL-OSG2_MCTAPE'},
'sitename': 'BNL_ATLAS_1',
'space': 481594,
'status': 'offline',
'statusmodtime': datetime.datetime(2011, 10, 18, 10, 45, 44),
'type': 'production',
'validatedreleases': ['True']},
}
-------------------------------------------------------------------------------------------------------
$ curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=queuedata&nickname=TEST2&pandasite=TEST2'
(lp1
(dp2
S'gatekeeper'
p3
S'gridgk01.racf.bnl.gov'
...
...
)
-------------------------------------------------------------------------------------------------------
This is what APF 1.X is getting from SchedConfig:
http://pandaserver.cern.ch:25085/cache/schedconfig/ANALY_BNL_ATLAS_1-condor.factory.json
{
"cloud": "US",
"depthboost": null,
"environ": "APP=/usatlas/OSG TMP=/tmp DATA=/usatlas/prodjob/share/",
"glexec": null,
"idlepilotsupression": null,
"jdl": "ANALY_BNL_ATLAS_1-condor",
"localqueue": null,
"maxtime": 0,
"memory": 0,
"nickname": "ANALY_BNL_ATLAS_1-condor",
"nqueue": 300,
"pilotlimit": null,
"proxy": "donothide",
"queue": "gridgk05.racf.bnl.gov/jobmanager-condor",
"site": "BNL",
"siteid": "ANALY_BNL_ATLAS_1",
"status": "online",
"system": "osg",
"transferringlimit": null
}
"""
before = time.time()
# get Sites Specs from Client.py
sites_err, all_sites_config = Client.getSiteSpecs(siteType='all')
delta = time.time() - before
self.log.debug('_updateSites: it took %s seconds to perform the query' %delta)
self.log.debug('_updateSites: %s seconds to perform query' %delta)
out = None
if sites_err:
self.log.error('Client.getSiteSpecs() failed.')
return None
else:
sitesinfo = WMSStatusInfo()
for site in all_sites_config.keys():
si = SiteInfo()
sitesinfo[site] = si
attrdict = all_sites_config[site]
si.fill(attrdict)
return sitesinfo
def _updatejobs(self):
"""
Client.getJobStatisticsPerSite(
countryGroup='',
workingGroup='',
jobType='test,prod,managed,user,panda,ddm,rc_test'
) ->
{ None: { 'activated': 0,
'assigned': 0,
'cancelled': 11632,
'defined': 2196,
'failed': 0,
'finished': 0,
'running': 0},
'AGLT2': { 'activated': 495,
'assigned': 170,
'cancelled': 1,
'failed': 15,
'finished': 114,
'holding': 9,
'running': 341,
'starting': 1,
'transferring': 16},
}
Client.getJobStatisticsWithLabel() ->
{'FZK-LCG2': {'prod_test': {'activated': 8,
'holding': 1 },
'managed': {'assigned': 98,
'running': 3541,
'transferring': 135,
'activated': 6684,
'holding': 70 },
'rc_test': {'activated': 1}
},
'BU_ATLAS_Tier2o': {'prod_test': {'running': 8,
'activated': 1,
'holding': 6},
'managed': {'defined': 33,
'transferring': 262,
'activated': 1362,
'assigned': 10,
'running': 746,
'holding': 7},
'rc_test': {'activated': 2}
}
}
"""
before = time.time()
# get Jobs Specs
#jobs_err, all_jobs_config = Client.getJobStatisticsPerSite(
# countryGroup='',
# workingGroup='',
# jobType='test,prod,managed,user,panda,ddm,rc_test,prod_test'
# )
jobs_err, all_jobs_config = Client.getJobStatisticsWithLabel()
# NOTE: reason to use getJobStatisticsWithLabel()
# is because by default PanDA does not give info on all labels.
# Jobs info for labels like "rc-test" is hidden,
# so we ask explicitly for all labels.
delta = time.time() - before
self.log.debug('_updateJobs: %s seconds to perform query' %delta)
out = None
if jobs_err:
self.log.error('Client.getJobStatisticsPerSite() failed.')
return None
self.jobsstatisticspersite2info = self.apfqueue.factory.mappingscl.section2dict('PANDAWMSSTATUS-JOBSSTATISTICSPERSITE2INFO')
self.log.debug('jobsstatisticspersite2info mappings are %s' %self.jobsstatisticspersite2info)
###self.jobsstatisticspersite2info = {'pending' : 'notready',
### 'defined' : 'notready',
### 'assigned' : 'notready',
### 'waiting' : 'notready',
### 'activated' : 'ready',
### 'starting' : 'running',
### 'sent' : 'running',
### | |
#!/usr/bin/env python
import datetime
import json
import os
import os.path
import shutil
import sys
import traceback
from distutils import log
from distutils.command.build import build as BuildCommand
from distutils.core import Command
from subprocess import check_output
from setuptools import find_packages, setup
from setuptools.command.develop import develop as DevelopCommand
from setuptools.command.sdist import sdist as SDistCommand
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
# modified from:
# https://raw.githubusercontent.com/getsentry/sentry/055cfe74bb88bbb2083f37f5df21b91d0ef4f9a7/src/sentry/utils/distutils/commands/base.py
class BaseBuildCommand(Command):
user_options = [
("work-path=", "w", "The working directory for source files. Defaults to ."),
("build-lib=", "b", "directory for script runtime modules"),
(
"inplace",
"i",
"ignore build-lib and put compiled javascript files into the source "
+ "directory alongside your pure Python modules",
),
(
"force",
"f",
"Force rebuilding of static content. Defaults to rebuilding on version "
"change detection.",
),
]
boolean_options = ["force"]
def initialize_options(self):
self.build_lib = None
self.force = None
self.work_path = os.path.join(ROOT_PATH, "src/dispatch/static/dispatch")
self.inplace = None
def get_root_path(self):
return os.path.abspath(os.path.dirname(sys.modules["__main__"].__file__))
def get_dist_paths(self):
return []
def get_manifest_additions(self):
return []
def finalize_options(self):
# This requires some explanation. Basically what we want to do
# here is to control if we want to build in-place or into the
# build-lib folder. Traditionally this is set by the `inplace`
# command line flag for build_ext. However as we are a subcommand
# we need to grab this information from elsewhere.
#
# An in-place build puts the files generated into the source
# folder, a regular build puts the files into the build-lib
# folder.
#
# The following situations we need to cover:
#
# command default in-place
# setup.py build_js 0
# setup.py build_ext value of in-place for build_ext
# setup.py build_ext --inplace 1
# pip install --editable . 1
# setup.py install 0
# setup.py sdist 0
# setup.py bdist_wheel 0
#
# The way this is achieved is that build_js is invoked by two
# subcommands: bdist_ext (which is in our case always executed
# due to a custom distribution) or sdist.
#
# Note: at one point install was an in-place build but it's not
# quite sure why. In case a version of install breaks again:
# installations via pip from git URLs definitely require the
# in-place flag to be disabled. So we might need to detect
# that separately.
#
# To find the default value of the inplace flag we inspect the
# sdist and build_ext commands.
sdist = self.distribution.get_command_obj("sdist")
build_ext = self.get_finalized_command("build_ext")
# If we are not decided on in-place we are inplace if either
# build_ext is inplace or we are invoked through the install
# command (easiest check is to see if it's finalized).
if self.inplace is None:
self.inplace = (build_ext.inplace or sdist.finalized) and 1 or 0
# If we're coming from sdist, clear the hell out of the dist
# folder first.
if sdist.finalized:
for path in self.get_dist_paths():
try:
shutil.rmtree(path)
except (OSError, IOError):
pass
# In place means build_lib is src. We also log this.
if self.inplace:
log.debug("in-place js building enabled")
self.build_lib = "src"
# Otherwise we fetch build_lib from the build command.
else:
self.set_undefined_options("build", ("build_lib", "build_lib"))
log.debug("regular js build: build path is %s" % self.build_lib)
if self.work_path is None:
self.work_path = self.get_root_path()
def _needs_built(self):
for path in self.get_dist_paths():
if not os.path.isdir(path):
return True
return False
def _setup_git(self):
work_path = self.work_path
if os.path.exists(os.path.join(work_path, ".git")):
log.info("initializing git submodules")
self._run_command(["git", "submodule", "init"])
self._run_command(["git", "submodule", "update"])
def _setup_js_deps(self):
node_version = None
try:
node_version = self._run_command(["node", "--version"]).decode("utf-8").rstrip()
except OSError:
log.fatal("Cannot find node executable. Please install node" " and try again.")
sys.exit(1)
if node_version[2] is not None:
log.info("using node ({0})".format(node_version))
self._run_npm_command(["install"])
self._run_npm_command(["run", "build", "--quiet"])
def _run_command(self, cmd, env=None):
cmd_str = " ".join(cmd)
log.debug(f"running [{cmd_str}]")
try:
return check_output(cmd, cwd=self.work_path, env=env)
except Exception:
log.error(f"command failed [{cmd_str}] via [{self.work_path}]")
raise
def _run_npm_command(self, cmd, env=None):
self._run_command(["npm"] + cmd, env=env)
def update_manifests(self):
# if we were invoked from sdist, we need to inform sdist about
# which files we just generated. Otherwise they will be missing
# in the manifest. This adds the files for what webpack generates
# plus our own assets.json file.
sdist = self.distribution.get_command_obj("sdist")
if not sdist.finalized:
return
# The path down from here only works for sdist:
# Use the underlying file list so that we skip the file-exists
# check which we do not want here.
files = sdist.filelist.files
base = os.path.abspath(".")
# We need to split off the local parts of the files relative to
# the current folder. This will chop off the right path for the
# manifest.
for path in self.get_dist_paths():
for dirname, _, filenames in os.walk(os.path.abspath(path)):
for filename in filenames:
filename = os.path.join(dirname, filename)
files.append(filename[len(base):].lstrip(os.path.sep))
for file in self.get_manifest_additions():
files.append(file)
def run(self):
if self.force or self._needs_built():
self._setup_git()
self._setup_js_deps()
self._build()
self.update_manifests()
class BuildAssetsCommand(BaseBuildCommand):
user_options = BaseBuildCommand.user_options + [
(
"asset-json-path=",
None,
"Relative path for JSON manifest. Defaults to {dist_name}/assets.json",
),
(
"inplace",
"i",
"ignore build-lib and put compiled javascript files into the source "
+ "directory alongside your pure Python modules",
),
(
"force",
"f",
"Force rebuilding of static content. Defaults to rebuilding on version "
"change detection.",
),
]
description = "build static media assets"
def initialize_options(self):
self.work_path = os.path.join(ROOT_PATH, "src/dispatch/static/dispatch")
self.asset_json_path = os.path.join(self.work_path, "assets.json")
BaseBuildCommand.initialize_options(self)
def get_dist_paths(self):
return [os.path.join(self.work_path, "/dist")]
def get_manifest_additions(self):
return (self.asset_json_path,)
def _get_package_version(self):
"""
Attempt to get the most correct current version of Dispatch.
"""
pkg_path = os.path.join(ROOT_PATH, "src")
sys.path.insert(0, pkg_path)
try:
import dispatch
except Exception:
version = None
build = None
else:
log.info(f"pulled version information from 'dispatch' module. {dispatch.__file__}")
version = self.distribution.get_version()
build = dispatch.__build__
finally:
sys.path.pop(0)
if not (version and build):
json_path = self.get_asset_json_path()
try:
with open(json_path) as fp:
data = json.loads(fp.read())
except Exception:
pass
else:
log.info("pulled version information from '{}'".format(json_path))
version, build = data["version"], data["build"]
return {"version": version, "build": build}
def _needs_static(self, version_info):
json_path = self.get_asset_json_path()
if not os.path.exists(json_path):
return True
with open(json_path) as fp:
data = json.load(fp)
if data.get("version") != version_info.get("version"):
return True
if data.get("build") != version_info.get("build"):
return True
return False
def _needs_built(self):
if BaseBuildCommand._needs_built(self):
return True
version_info = self._get_package_version()
return self._needs_static(version_info)
def _build(self):
version_info = self._get_package_version()
log.info(
"building assets for {} v{} (build {})".format(
self.distribution.get_name(),
version_info["version"] or "UNKNOWN",
version_info["build"] or "UNKNOWN",
)
)
if not version_info["version"] or not version_info["build"]:
log.fatal("Could not determine dispatch version or build")
# sys.exit(1)
try:
self._build_static()
except Exception:
traceback.print_exc()
log.fatal("unable to build Dispatch's static assets!")
sys.exit(1)
log.info("writing version manifest")
manifest = self._write_version_file(version_info)
log.info("recorded manifest\n{}".format(json.dumps(manifest, indent=2)))
def _build_static(self):
# By setting NODE_ENV=production, a few things happen
# * Vue optimizes out certain code paths
# * Webpack will add version strings to built/referenced assets
env = dict(os.environ)
env["DISPATCH_STATIC_DIST_PATH"] = self.dispatch_static_dist_path
env["NODE_ENV"] = "production"
# TODO: Our JS builds should not require 4GB heap space
env["NODE_OPTIONS"] = (
(env.get("NODE_OPTIONS", "") + " --max-old-space-size=4096")
).lstrip()
# self._run_npm_command(["webpack", "--bail"], env=env)
def _write_version_file(self, version_info):
manifest = {
"createdAt": datetime.datetime.utcnow().isoformat() + "Z",
"version": version_info["version"],
"build": version_info["build"],
}
with open(self.get_asset_json_path(), "w") as fp:
json.dump(manifest, fp)
return manifest
@property
def dispatch_static_dist_path(self):
return os.path.abspath(os.path.join(self.build_lib, "src/static/dispatch/dist"))
def get_asset_json_path(self):
return os.path.abspath(os.path.join(self.build_lib, self.asset_json_path))
VERSION = "0.1.0.dev0"
IS_LIGHT_BUILD = os.environ.get("DISPATCH_LIGHT_BUILD") == "1"
def get_requirements(env):
with open("requirements-{}.txt".format(env)) as fp:
return [x.strip() for x in fp.read().split("\n") if not x.startswith("#")]
install_requires = get_requirements("base")
dev_requires = get_requirements("dev")
class DispatchSDistCommand(SDistCommand):
# If we are not a light build we want to also execute build_assets as
# part of our source build pipeline.
if not IS_LIGHT_BUILD:
sub_commands = SDistCommand.sub_commands + [("build_assets", None)]
class DispatchBuildCommand(BuildCommand):
def run(self):
if not IS_LIGHT_BUILD:
self.run_command("build_assets")
BuildCommand.run(self)
class DispatchDevelopCommand(DevelopCommand):
def run(self):
DevelopCommand.run(self)
if not IS_LIGHT_BUILD:
self.run_command("build_assets")
cmdclass = {
"sdist": DispatchSDistCommand,
"develop": DispatchDevelopCommand,
"build": DispatchBuildCommand,
"build_assets": BuildAssetsCommand,
}
# Get the long description from the README file
with open(os.path.join(ROOT_PATH, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="dispatch",
version=VERSION,
long_description=long_description,
long_description_content_type="text/markdown",
author="Netflix, Inc.",
classifiers=[ # Optional
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
package_dir={"": "src"},
packages=find_packages("src"),
python_requires=">=3.7",
install_requires=install_requires,
extras_require={"dev": dev_requires},
cmdclass=cmdclass,
zip_safe=False,
include_package_data=True,
entry_points={
"console_scripts": ["dispatch = dispatch.cli:entrypoint"],
"dispatch.plugins": [
"kandbox_rllib_env_proxy = dispatch.plugins.kandbox_planner.env.rllib_env_job2slot:KPlannerJob2SlotEnvProxy",
"kandbox_heuristic_realtime_agent = dispatch.plugins.kandbox_planner.agent.kandbox_heuristic_realtime_agent:KandboxHeuristicRealtimeAgentPlugin",
"kandbox_rule_working_hour = dispatch.plugins.kandbox_planner.rule.working_hour:KandboxRulePluginWithinWorkingHour",
"kandbox_rule_travel_time = dispatch.plugins.kandbox_planner.rule.travel_time:KandboxRulePluginSufficientTravelTime",
"kandbox_rule_requested_skills = dispatch.plugins.kandbox_planner.rule.requested_skills:KandboxRulePluginRequestedSkills",
"kandbox_rule_lunch_break = dispatch.plugins.kandbox_planner.rule.lunch_break:KandboxRulePluginLunchBreak",
"ortools_n_days_planner | |
set of baseIds for the component instances here.
@param components: List of Component object instances with baseId, window, and max window size set
@return: List of tuples of form (instance name, base id, window size)
"""
# Assign the assembly baseId and window size or use default if none exists
if assembly_base_id is None:
assembly_base_id = self.__config.get("assembly", "baseID")
PRINT.info(
"WARNING: No assembly base Id set, defaulting to %s" % assembly_base_id
)
if assembly_window is None:
assembly_window = self.__config.get("assembly", "window")
PRINT.info(
"WARNING: No assembly base Id window size set, defaulting to %s"
% assembly_window
)
out_base_ids_list = []
assembly_base_id = int(assembly_base_id)
assembly_window = int(assembly_window)
initial_comp_with_ID = (
[]
) # List of component tuples that have base IDS specified in the topology model
initial_comp_without_ID = (
[]
) # Lit of component tuples that do not have base IDS specified in the topology model
id = assembly_base_id
if id <= 0:
id = 1
window = assembly_window
# Pass 1 - Populate initial_comp lists with their respective items
for inst in instances:
t = self.__set_base_id_list(id, window, inst)
if inst.get_base_id() is None:
initial_comp_without_ID.append(t)
else:
initial_comp_with_ID.append(t)
# Pass 2 - Sort with_ID list by base ID and without_ID list by window size
initial_comp_with_ID.sort(key=lambda x: x[1])
initial_comp_without_ID.sort(key=lambda x: x[2])
# Pass 3 - Check with_ID list to ensure no base / window IDS collide
prev_id = 0
prev_window = 0
prev_name = "NONE"
for t in initial_comp_with_ID:
if t[1] < prev_id + prev_window:
err = "Component {} has a base ID {} which collides with the allocated IDs for component {} (base ID {} , window ID {})".format(
t[0], t[1], prev_name, prev_id, prev_window
)
PRINT.info(err)
raise Exception(err)
# Code below auto adjusts user specified IDS
# if(t[1] < prev_id + prev_window):
# t[1] = prev_id + prev_window
prev_name = t[0]
prev_id = t[1]
prev_window = t[2]
# Pass 4 - Merge ID lists
prev_id = id
prev_window = 0
with_ID_obj = None
without_ID_obj = None
while True:
if (
len(initial_comp_with_ID) == 0
and len(initial_comp_without_ID) == 0
and not with_ID_obj
and not without_ID_obj
):
break
if len(initial_comp_with_ID) > 0 and with_ID_obj is None:
with_ID_obj = initial_comp_with_ID.pop(0)
if len(initial_comp_without_ID) > 0 and without_ID_obj is None:
without_ID_obj = initial_comp_without_ID.pop(0)
next_poss_id = (
prev_id + prev_window
) # The next possible id that can be taken
if (
with_ID_obj is None and without_ID_obj is not None
): # If there is nothing in the with ID list, but items exist in the without ID list
without_ID_obj[1] = next_poss_id
out_base_ids_list.append(without_ID_obj)
without_ID_obj = None
elif (
with_ID_obj is not None and without_ID_obj is None
): # If items exist in the with ID list but don't in the without ID list
out_base_ids_list.append(with_ID_obj)
with_ID_obj = None
else: # if both objs exist
if next_poss_id + without_ID_obj[2] <= with_ID_obj[1]:
without_ID_obj[1] = next_poss_id
out_base_ids_list.append(without_ID_obj)
without_ID_obj = None
else:
out_base_ids_list.append(with_ID_obj)
with_ID_obj = None
prev_id = out_base_ids_list[-1][1]
prev_window = out_base_ids_list[-1][2]
# Pass 5 - Save and Print table
save_buffer = ""
prev = None
act_wind = 0
for t in out_base_ids_list:
if prev is not None:
# pylint: disable=E1136
act_wind = t[1] - prev[1]
save_buffer = self.__print_base_id_table(prev, act_wind, save_buffer)
prev = t
save_buffer = self.__print_base_id_table(prev, "inf.", save_buffer)
save_buffer = self.__print_base_id_table_comments(save_buffer)
if generate_list_file:
csv_removed_from_path_name = xml_file_path.replace(".XML", "")
csv_removed_from_path_name = csv_removed_from_path_name.replace(".xml", "")
save_log_file_path = csv_removed_from_path_name + "_IDTableLog.txt"
save_log_file = open(save_log_file_path, "w")
save_log_file.write(save_buffer)
save_log_file.close()
return out_base_ids_list
def __print_base_id_table_comments(self, save_buffer):
# First find the table length and the largest length of a column header
tableSize = 0
largestColHeader = 0
for header in self.__table_info:
headerLen = len(header[0])
tableSize += headerLen + 2 * header[1]
if headerLen > largestColHeader:
largestColHeader = headerLen
print_item = "-" * (tableSize + 4)
PRINT.info(print_item)
save_buffer += print_item + "\n"
tabLen = largestColHeader + 3
for header in self.__table_info:
headerLen = len(header[0])
desc = header[0] + " " * (largestColHeader - headerLen) + " - " + header[2]
firstRun = True
while desc != "":
if firstRun:
outString = desc[0:tableSize]
desc = desc[tableSize:]
firstRun = False
else:
newSize = tableSize - tabLen
outString = " " * tabLen + desc[0:newSize]
desc = desc[newSize:]
print_item = (
"| " + outString + ((tableSize - len(outString)) * " ") + " |"
)
PRINT.info(print_item)
save_buffer += print_item + "\n"
print_item = "-" * (tableSize + 4)
PRINT.info(print_item)
save_buffer += print_item + "\n"
return save_buffer
def __print_base_id_table(self, base_id_tuple, actual_window_size, save_buffer):
"""
Routing prints the base_id_list to a table format.
If base_id_list is None, the routing prints the header
"""
if base_id_tuple is None:
print_item = " | ".join(
header[1] * " " + header[0] + header[1] * " "
for header in self.__table_info
)
save_buffer += print_item + "\n"
PRINT.info(print_item)
else:
ns = ""
if base_id_tuple[3].get_base_max_id_window() is None:
ns = " (D)"
data_row = []
data_row.append(str(base_id_tuple[0]))
data_row.append(
str(base_id_tuple[1]) + " (" + str(hex(base_id_tuple[1])) + ")"
)
data_row.append(str(base_id_tuple[2]) + ns)
data_row.append(str(actual_window_size))
data_row.append(str(base_id_tuple[4]))
data_row.append(str(base_id_tuple[5]))
table_header_size = [
len(header[0]) + 2 * header[1] for header in self.__table_info
]
while True:
# Check if the items in the data_row have a length of zero
all_items_length_zero = True
for data_item in data_row:
if len(data_item) != 0:
all_items_length_zero = False
if all_items_length_zero:
break
row_string = ""
for i in range(len(self.__table_info)):
curr_write_string = data_row[i][0 : table_header_size[i]]
data_row[i] = data_row[i][table_header_size[i] :]
if i != 0:
row_string += " | "
format_string = "{0:^" + str(table_header_size[i]) + "}"
row_string += format_string.format(curr_write_string)
save_buffer += row_string + "\n"
PRINT.info(row_string)
return save_buffer
def __set_base_id_list(self, id, size, inst):
"""
Routine to set up the base id and window size with actual or instance set values.
Routine also checks window size against component max IDs needed if they are found.
"""
comp = inst.get_component_object()
# set instance name
n = inst.get_name()
#
"""
Logic for calculating base ids
1) If the user did not specify the base ID within an instance, set it to None
2) If the user did specify the base ID within an instance, check if it is greater than the base ID for the entire topology
a) if it is greater, use the base ID from the instance
b) if it is not greater, add the base ID from the instance and the base ID from the topology model and use the sum
"""
if inst.get_base_id() is None:
b = None
else:
if id > abs(int(inst.get_base_id())):
b = abs(int(inst.get_base_id())) + id
PRINT.info(
"WARNING: {} instance adding instance supplied base ID to the topology supplied base ID (New ID is {}) because instance supplied base ID is smaller than the topology supplied base ID.".format(
n, b
)
)
else:
b = abs(int(inst.get_base_id()))
PRINT.info("WARNING: %s instance resetting base id to %d" % (n, b))
#
# set window size or override it on instance basis
component_calculated_window_range = self.__compute_component_base_id_range(comp)
"""
Note: The calculated window range is really the largest ID (plus one) found in the component object.
Logic for calculating window size
1) If user specifies window size in instance tag, use that.
2) If the user does not specify the window size in the instance tag, use the larger of the default window size and the calculated window size
3) If the calculated window size is larger than the new window size, thrown an error
"""
if inst.get_base_id_window() is not None:
w = abs(int(inst.get_base_id_window()))
PRINT.info(
"{} instance resetting base id window range to instance specified size ({})".format(
n, w
)
)
else:
if size > component_calculated_window_range:
w = size
PRINT.info(
"{} instance resetting base id window range to default topology size ({})".format(
n, w
)
)
else:
w = component_calculated_window_range
PRINT.info(
"{} instance resetting base id window range to size calculated from the component XML file ({})".format(
n, w
)
)
if (
component_calculated_window_range is not None
and w < component_calculated_window_range
):
PRINT.info(
"ERROR: The specified window range for component {} is {}, which is smaller than the calculated window | |
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import scipy.spatial
import numpy as np
import scipy as sp
import sys
class SelfOrganizingMap:
"""
The weights of the output neurons base on the input from the input
neurons.
"""
def __init__(self, input_count, output_count):
"""
The constructor.
:param input_count: Number of input neurons
:param output_count: Number of output neurons
:return:
"""
self.input_count = input_count
self.output_count = output_count
self.weights = np.zeros([self.output_count, self.input_count])
self.distance = sp.spatial.distance.euclidean
def calculate_error(self, data):
bmu = BestMatchingUnit(self)
bmu.reset()
# Determine the BMU for each training element.
for input in data:
bmu.calculate_bmu(input)
# update the error
return bmu.worst_distance / 100.0
def classify(self, input):
if len(input) > self.input_count:
raise Exception("Can't classify SOM with input size of {} "
"with input data of count {}".format(self.input_count, len(input)))
min_dist = sys.maxfloat
result = -1
for i in range(self.output_count):
dist = self.distance.calculate(input, self.weights[i])
if dist < min_dist:
min_dist = dist
result = i
return result
def reset(self):
self.weights = (np.random.rand(self.weights.shape[0], self.weights.shape[1]) * 2.0) - 1
class BestMatchingUnit:
"""
The "Best Matching Unit" or BMU is a very important concept in the training
for a SOM. The BMU is the output neuron that has weight connections to the
input neurons that most closely match the current input vector. This neuron
(and its "neighborhood") are the neurons that will receive training.
This class also tracks the worst distance (of all BMU's). This gives some
indication of how well the network is trained, and thus becomes the "error"
of the entire network.
"""
def __init__(self, som):
"""
Construct a BestMatchingUnit class. The training class must be provided.
:param som: The SOM to evaluate.
"""
# The owner of this class.
self.som = som
# What is the worst BMU distance so far, this becomes the error for the
# entire SOM.
self.worst_distance = 0
def calculate_bmu(self, input):
"""
Calculate the best matching unit (BMU). This is the output neuron that
has the lowest Euclidean distance to the input vector.
:param input: The input vector.
:return: The output neuron number that is the BMU.
"""
result = 0
if len(input) > self.som.input_count:
raise Exception(
"Can't train SOM with input size of {} with input data of count {}.".format(self.som.input_count,
len(input)))
# Track the lowest distance so far.
lowest_distance = float("inf")
for i in range(self.som.output_count):
distance = self.calculate_euclidean_distance(self.som.weights, input, i)
# Track the lowest distance, this is the BMU.
if distance < lowest_distance:
lowest_distance = distance
result = i
# Track the worst distance, this is the error for the entire network.
if lowest_distance > self.worst_distance:
self.worst_distance = lowest_distance
return result
def calculate_euclidean_distance(self, matrix, input, output_neuron):
"""
Calculate the Euclidean distance for the specified output neuron and the
input vector. This is the square root of the squares of the differences
between the weight and input vectors.
:param matrix: The matrix to get the weights from.
:param input: The input vector.
:param outputNeuron: The neuron we are calculating the distance for.
:return: The Euclidean distance.
"""
result = 0
# Loop over all input data.
diff = input - matrix[output_neuron]
return np.sqrt(sum(diff*diff))
class BasicTrainSOM:
"""
This class implements competitive training, which would be used in a
winner-take-all neural network, such as the self organizing map (SOM). This
is an unsupervised training method, no ideal data is needed on the training
set. If ideal data is provided, it will be ignored.
Training is done by looping over all of the training elements and calculating
a "best matching unit" (BMU). This BMU output neuron is then adjusted to
better "learn" this pattern. Additionally, this training may be applied to
other "nearby" output neurons. The degree to which nearby neurons are update
is defined by the neighborhood function.
A neighborhood function is required to determine the degree to which
neighboring neurons (to the winning neuron) are updated by each training
iteration.
Because this is unsupervised training, calculating an error to measure
progress by is difficult. The error is defined to be the "worst", or longest,
Euclidean distance of any of the BMU's. This value should be minimized, as
learning progresses.
Because only the BMU neuron and its close neighbors are updated, you can end
up with some output neurons that learn nothing. By default these neurons are
not forced to win patterns that are not represented well. This spreads out
the workload among all output neurons. This feature is not used by default,
but can be enabled by setting the "forceWinner" property.
"""
def __init__(self, network, learning_rate, training, neighborhood):
# The neighborhood function to use to determine to what degree a neuron
# should be "trained".
self.neighborhood = neighborhood
# The learning rate. To what degree should changes be applied.
self.learning_rate = learning_rate
# The network being trained.
self.network = network
# How many neurons in the input layer.
self.input_neuron_count = network.input_count
# How many neurons in the output layer.
self.output_neuron_count = network.output_count
# Utility class used to determine the BMU.
self.bmu_util = BestMatchingUnit(network)
# Correction matrix.
self.correction_matrix = np.zeros([network.output_count, network.input_count])
# True is a winner is to be forced, see class description, or forceWinners
# method. By default, this is true.
self.force_winner = False
# When used with autodecay, this is the starting learning rate.
self.start_rate = 0
# When used with autodecay, this is the ending learning rate.
self.end_rate = 0
# When used with autodecay, this is the starting radius.
self.start_radius = 0
# When used with autodecay, this is the ending radius.
self.end_radius = 0
# This is the current autodecay learning rate.
self.auto_decay_rate = 0
# This is the current autodecay radius.
self.auto_decay_radius = 0
# The current radius.
self.radius = 0
# Training data.
self.training = training
def _apply_correction(self):
"""
Loop over the synapses to be trained and apply any corrections that were
determined by this training iteration.
"""
np.copyto(self.network.weights, self.correction_matrix)
def auto_decay(self):
"""
Should be called each iteration if autodecay is desired.
"""
if self.radius > self.end_radius:
self.radius += self.auto_decay_radius
if self.learning_rate > self.end_rate:
self.learning_rate += self.auto_decay_rate
self.neighborhood.radius = self.radius
def copy_input_pattern(self, matrix, output_neuron, input):
"""
Copy the specified input pattern to the weight matrix. This causes an
output neuron to learn this pattern "exactly". This is useful when a
winner is to be forced.
:param matrix: The matrix that is the target of the copy.
:param output_neuron: The output neuron to set.
:param input: The input pattern to copy.
"""
matrix[output_neuron, :] = input
def decay(self, decay_rate, decay_radius):
"""
Decay the learning rate and radius by the specified amount.
:param decay_rate: The percent to decay the learning rate by.
:param decay_radius: The percent to decay the radius by.
"""
self.radius *= (1.0 - decay_radius)
self.learning_rate *= (1.0 - decay_rate)
self.neighborhood.radius = self.radius
def _determine_new_weight(self, weight, input, currentNeuron, bmu):
"""
Determine the weight adjustment for a single neuron during a training
iteration.
:param weight: The starting weight.
:param input: The input to this neuron.
:param currentNeuron: The neuron who's weight is being updated.
:param bmu: The neuron that "won", the best matching unit.
:return: The new weight value.
"""
return weight \
+ (self.neighborhood.fn(currentNeuron, bmu) \
* self.learning_rate * (input - weight))
def _force_winners(self, matrix, won, least_represented):
"""
Force any neurons that did not win to off-load patterns | |
self.assertEquals('bad_url',
dos.get_url_domain('192.168.0.1'))
def testCaching(self):
"""Tests that cache eviction works properly."""
dos._DOMAIN_CACHE.clear()
old_size = dos.DOMAIN_CACHE_SIZE
try:
dos.DOMAIN_CACHE_SIZE = 2
dos._DOMAIN_CACHE['http://a.example.com/stuff'] = 'a.example.com'
dos._DOMAIN_CACHE['http://b.example.com/stuff'] = 'b.example.com'
dos._DOMAIN_CACHE['http://c.example.com/stuff'] = 'c.example.com'
self.assertEquals(3, len(dos._DOMAIN_CACHE))
# Old cache entries are hit:
self.assertEquals('c.example.com',
dos.get_url_domain('http://c.example.com/stuff'))
self.assertEquals(3, len(dos._DOMAIN_CACHE))
# New cache entries clear the contents.
self.assertEquals('d.example.com',
dos.get_url_domain('http://d.example.com/stuff'))
self.assertEquals(1, len(dos._DOMAIN_CACHE))
finally:
dos.DOMAIN_CACHE_SIZE = old_size
################################################################################
class OffsetOrAddTest(unittest.TestCase):
"""Tests for the offset_or_add function."""
def setUp(self):
"""Sets up the test harness."""
self.offsets = None
self.offset_multi = lambda *a, **k: self.offsets.next()(*a, **k)
self.adds = None
self.add_multi = lambda *a, **k: self.adds.next()(*a, **k)
def testAlreadyExist(self):
"""Tests when the keys already exist and can just be added to."""
def offset_multi():
yield lambda *a, **k: {'one': 2, 'three': 4}
self.offsets = offset_multi()
self.assertEquals(
{'one': 2, 'three': 4},
dos.offset_or_add({'blue': 15, 'red': 10}, 5,
offset_multi=self.offset_multi,
add_multi=self.add_multi))
def testKeysAdded(self):
"""Tests when some keys need to be re-added."""
def offset_multi():
yield lambda *a, **k: {'one': None, 'three': 4, 'five': None}
self.offsets = offset_multi()
def add_multi():
def run(adds, **kwargs):
self.assertEquals({'one': 5, 'five': 10}, adds)
return []
yield run
self.adds = add_multi()
self.assertEquals(
{'one': 5, 'three': 4, 'five': 10},
dos.offset_or_add({'one': 5, 'three': 0, 'five': 10}, 5,
offset_multi=self.offset_multi,
add_multi=self.add_multi))
def testAddsRace(self):
"""Tests when re-adding keys is a race that is lost."""
def offset_multi():
yield lambda *a, **k: {'one': None, 'three': 4, 'five': None}
yield lambda *a, **k: {'one': 5, 'five': 10}
self.offsets = offset_multi()
def add_multi():
def run(adds, **kwargs):
self.assertEquals({'one': 5, 'five': 10}, adds)
return ['one', 'five']
yield run
self.adds = add_multi()
self.assertEquals(
{'one': 5, 'three': 4, 'five': 10},
dos.offset_or_add({'one': 5, 'three': 0, 'five': 10}, 5,
offset_multi=self.offset_multi,
add_multi=self.add_multi))
def testOffsetsFailAfterRace(self):
"""Tests when the last offset call fails."""
def offset_multi():
yield lambda *a, **k: {'one': None, 'three': 4, 'five': None}
yield lambda *a, **k: {'one': None, 'five': None}
self.offsets = offset_multi()
def add_multi():
def run(adds, **kwargs):
self.assertEquals({'one': 5, 'five': 10}, adds)
return ['one', 'five']
yield run
self.adds = add_multi()
self.assertEquals(
{'one': None, 'three': 4, 'five': None},
dos.offset_or_add({'one': 5, 'three': 0, 'five': 10}, 5,
offset_multi=self.offset_multi,
add_multi=self.add_multi))
################################################################################
class SamplerTest(unittest.TestCase):
"""Tests for the MultiSampler class."""
def setUp(self):
"""Sets up the test harness."""
testutil.setup_for_testing()
self.domainA = 'mydomain.com'
self.domainB = 'example.com'
self.domainC = 'other.com'
self.domainD = 'meep.com'
self.url1 = 'http://mydomain.com/stuff/meep'
self.url2 = 'http://example.com/some-path?a=b'
self.url3 = 'http://example.com'
self.url4 = 'http://other.com/relative'
self.url5 = 'http://meep.com/another-one'
self.all_urls = [self.url1, self.url2, self.url3, self.url4, self.url5]
self.randrange_results = []
self.fake_randrange = lambda value: self.randrange_results.pop(0)
self.random_results = []
self.fake_random = lambda: self.random_results.pop(0)
self.gettime_results = []
self.fake_gettime = lambda: self.gettime_results.pop(0)
def verify_sample(self,
results,
key,
expected_count,
expected_frequency,
expected_average=1,
expected_min=1,
expected_max=1):
"""Verifies a sample key is present in the results.
Args:
results: SampleResult object.
key: String key of the sample to test.
expected_count: How many samples should be present in the results.
expected_frequency: The frequency of this single key.
expected_average: Expected average value across samples of this key.
expected_min: Expected minimum value across samples of this key.
expected_max: Expected maximum value across samples of this key.
Raises:
AssertionError if any of the expectations are not met.
"""
self.assertEquals(expected_count, results.get_count(key))
self.assertTrue(
-0.001 < (expected_frequency - results.get_frequency(key)) < 0.001,
'Difference %f - %f = %f' % (
expected_frequency, results.get_frequency(key),
expected_frequency - results.get_frequency(key)))
self.assertTrue(
-0.001 < (expected_average - results.get_average(key)) < 0.001,
'Difference %f - %f %f' % (
expected_average, results.get_average(key),
expected_average - results.get_average(key)))
self.assertEquals(expected_min, results.get_min(key))
self.assertEquals(expected_max, results.get_max(key))
def verify_no_sample(self, results, key):
"""Verifies a sample key is not present in the results.
Args:
results: SampleResult object.
key: String key of the sample to test.
Raises:
AssertionError if the key is present.
"""
self.assertEquals(0, len(results.get_samples(key)))
def testSingleAlways(self):
"""Tests single-config sampling when the sampling rate is 100%."""
config = dos.ReservoirConfig(
'always',
period=300,
rate=1,
samples=10000,
by_domain=True)
sampler = dos.MultiSampler([config], gettime=self.fake_gettime)
reporter = dos.Reporter()
reporter.set(self.url1, config)
reporter.set(self.url2, config)
reporter.set(self.url3, config)
reporter.set(self.url4, config)
reporter.set(self.url5, config)
self.gettime_results.extend([0, 10])
sampler.sample(reporter)
results = sampler.get(config)
self.assertEquals(5, results.total_samples)
self.assertEquals(5, results.unique_samples)
self.verify_sample(results, self.domainA, 1, 0.1)
self.verify_sample(results, self.domainB, 2, 0.2)
self.verify_sample(results, self.domainC, 1, 0.1)
self.verify_sample(results, self.domainD, 1, 0.1)
self.gettime_results.extend([0, 10])
sampler.sample(reporter)
results = sampler.get(config)
self.assertEquals(10, results.total_samples)
self.assertEquals(10, results.unique_samples)
self.verify_sample(results, self.domainA, 2, 0.2)
self.verify_sample(results, self.domainB, 4, 0.4)
self.verify_sample(results, self.domainC, 2, 0.2)
self.verify_sample(results, self.domainD, 2, 0.2)
reporter = dos.Reporter()
reporter.set(self.url1, config)
self.gettime_results.extend([0, 10])
sampler.sample(reporter)
results = sampler.get(config)
self.assertEquals(11, results.total_samples)
self.assertEquals(11, results.unique_samples)
self.verify_sample(results, self.domainA, 3, 0.3)
self.verify_sample(results, self.domainB, 4, 0.4)
self.verify_sample(results, self.domainC, 2, 0.2)
self.verify_sample(results, self.domainD, 2, 0.2)
def testSingleOverwrite(self):
"""Tests when the number of slots is lower than the sample count."""
config = dos.ReservoirConfig(
'always',
period=300,
rate=1,
samples=2,
by_domain=True)
sampler = dos.MultiSampler([config], gettime=self.fake_gettime)
# Writes samples index 0 and 1, then overwrites index 1 again with
# a URL in the same domain.
reporter = dos.Reporter()
reporter.set(self.url1, config)
reporter.set(self.url2, config)
reporter.set(self.url3, config)
self.gettime_results.extend([0, 1])
self.randrange_results.extend([1])
sampler.sample(reporter, randrange=self.fake_randrange)
results = sampler.get(config)
self.assertEquals(3, results.total_samples)
self.assertEquals(2, results.unique_samples)
self.verify_sample(results, self.domainA, 1, 1.5)
self.verify_sample(results, self.domainB, 1, 1.5)
# Overwrites the sample at index 0, skewing all results towards the
# domain from index 1.
reporter = dos.Reporter()
reporter.set(self.url3, config)
self.gettime_results.extend([0, 1])
self.randrange_results.extend([0])
sampler.sample(reporter, randrange=self.fake_randrange)
results = sampler.get(config)
self.assertEquals(4, results.total_samples)
self.assertEquals(2, results.unique_samples)
self.verify_sample(results, self.domainB, 2, 4.0)
self.verify_no_sample(results, self.domainA)
# Now a sample outside the range won't replace anything.
self.gettime_results.extend([0, 1])
self.randrange_results.extend([3])
sampler.sample(reporter, randrange=self.fake_randrange)
results = sampler.get(config)
self.assertEquals(5, results.total_samples)
self.assertEquals(2, results.unique_samples)
self.verify_sample(results, self.domainB, 2, 5.0)
self.verify_no_sample(results, self.domainA)
def testSingleSampleRate(self):
"""Tests when the sampling rate is less than 1."""
config = dos.ReservoirConfig(
'always',
period=300,
rate=0.2,
samples=10000,
by_domain=True)
sampler = dos.MultiSampler([config], gettime=self.fake_gettime)
reporter = dos.Reporter()
reporter.set(self.url1, config)
reporter.set(self.url2, config)
reporter.set(self.url3, config)
reporter.set(self.url4, config)
reporter.set(self.url5, config)
self.gettime_results.extend([0, 10])
self.random_results.extend([0.25, 0.199, 0.1, 0, 0.201])
sampler.sample(reporter, getrandom=self.fake_random)
results = sampler.get(config)
self.assertEquals(3, results.total_samples)
self.assertEquals(3, results.unique_samples)
self.verify_no_sample(results, self.domainA)
self.verify_no_sample(results, self.domainD)
self.verify_sample(results, self.domainB, 2,
(1.0/0.2) * (2.0/3.0) * (3.0/10.0))
self.verify_sample(results, self.domainC, 1,
(1.0/0.2) * (1.0/3.0) * (3.0/10.0))
def testSingleDoubleSampleRemoved(self):
"""Tests when the same sample key is set twice and one is skipped.
Setting the value twice should just overwite the previous value for a key,
but we store the keys in full order (with dupes) for simpler tests. This
ensures that incorrectly using the sampler with multiple sets won't barf.
"""
config = dos.ReservoirConfig(
'always',
period=300,
rate=0.2,
samples=4,
by_domain=True)
sampler = dos.MultiSampler([config], gettime=self.fake_gettime)
reporter = dos.Reporter()
reporter.set(self.url1, config)
reporter.set(self.url1, config)
reporter.set(self.url2, config)
reporter.set(self.url3, config)
reporter.set(self.url4, config)
reporter.set(self.url5, config)
self.gettime_results.extend([0, 10])
self.randrange_results.extend([0])
self.random_results.extend([0.25, 0.199, 0.1, 0, 0.3, 0.3])
sampler.sample(reporter, getrandom=self.fake_random)
results = sampler.get(config)
self.assertEquals(3, results.total_samples)
self.assertEquals(2, results.unique_samples)
self.verify_no_sample(results, self.domainA)
self.verify_no_sample(results, self.domainC)
self.verify_no_sample(results, self.domainD)
self.verify_sample(results, self.domainB, 2,
(1.0/0.2) * (2.0/2.0) * (3.0/10.0))
def testSingleSampleRateReplacement(self):
"""Tests when the sample rate is < 1 and slots are overwritten."""
config = dos.ReservoirConfig(
'always',
period=300,
rate=0.2,
samples=2,
by_domain=True)
sampler = dos.MultiSampler([config], gettime=self.fake_gettime)
reporter = dos.Reporter()
reporter.set(self.url1, config)
reporter.set(self.url2, config)
reporter.set(self.url3, config)
reporter.set(self.url4, config)
self.gettime_results.extend([0, 10])
self.randrange_results.extend([1])
self.random_results.extend([0.25, 0.199, 0.1, 0])
sampler.sample(reporter, getrandom=self.fake_random)
results = sampler.get(config)
self.assertEquals(3, results.total_samples)
self.assertEquals(2, results.unique_samples)
self.verify_no_sample(results, self.domainA)
self.verify_no_sample(results, self.domainD)
self.verify_sample(results, self.domainB, 1,
(1.0/0.2) * (1.0/2.0) * (3.0/10.0))
self.verify_sample(results, self.domainC, 1,
(1.0/0.2) * (1.0/2.0) * (3.0/10.0))
def testSingleSampleValues(self):
"""Tests various samples with expected values."""
config = dos.ReservoirConfig(
'always',
period=300,
rate=0.2,
samples=4,
by_domain=True)
sampler = dos.MultiSampler([config], gettime=self.fake_gettime)
reporter = dos.Reporter()
reporter.set(self.url1, config, 5)
reporter.set(self.url1, config, 20) # in
reporter.set(self.url2, config, 10) # in
reporter.set(self.url2 + '&more=true', config, 25) # in
reporter.set(self.url3, config, 20) # in
reporter.set(self.url4, config, 40) # in
reporter.set(self.url5, config, 60)
self.gettime_results.extend([0, 10])
self.randrange_results.extend([0])
self.random_results.extend([0.25, 0.199, 0.1, 0, 0, 0.1, 0.3])
sampler.sample(reporter,
randrange=self.fake_randrange,
getrandom=self.fake_random)
results = sampler.get(config)
self.assertEquals(5, results.total_samples)
self.assertEquals(4, results.unique_samples)
self.verify_no_sample(results, self.domainA)
self.verify_no_sample(results, self.domainD)
self.verify_sample(results, self.domainB, 3,
(1.0/0.2) * (3.0/4.0) * (5.0/10.0),
expected_average=18.333,
expected_min=10,
expected_max=25)
self.verify_sample(results, self.domainC, 1,
(1.0/0.2) * (1.0/4.0) * (5.0/10.0),
expected_average=40,
expected_min=40,
expected_max=40)
def testResetTimestamp(self):
"""Tests resetting the timestamp after the period elapses."""
config = dos.ReservoirConfig(
'always',
period=10,
samples=10000,
by_domain=True)
sampler = dos.MultiSampler([config], gettime=self.fake_gettime)
reporter = dos.Reporter()
reporter.set(self.url1, config)
self.gettime_results.extend([0, 5])
sampler.sample(reporter)
results = sampler.get(config)
self.assertEquals(1, results.total_samples)
self.assertEquals(1, results.unique_samples)
self.verify_sample(results, self.domainA, 1, 1.0 / 5)
self.verify_no_sample(results, self.domainB)
self.verify_no_sample(results, self.domainC)
self.verify_no_sample(results, self.domainD)
reporter = dos.Reporter()
reporter.set(self.url2, config)
self.gettime_results.extend([15, 16])
sampler.sample(reporter)
results = sampler.get(config)
self.assertEquals(1, results.total_samples)
self.assertEquals(1, results.unique_samples)
self.verify_sample(results, self.domainB, 1, 1.0)
self.verify_no_sample(results, self.domainA)
self.verify_no_sample(results, self.domainC)
self.verify_no_sample(results, self.domainD)
def testSingleUnicodeKey(self):
"""Tests when a sampling key is unicode.
Keys must be UTF-8 encoded because the memcache API will do this for us
(and break) if we don't.
| |
# (C) <NAME> and Carnegie Mellon University, 2017
import logging
import time
from tensorlog import comline
from tensorlog import config
from tensorlog import declare
from tensorlog import funs
from tensorlog import matrixdb
from tensorlog import ops
from tensorlog import util
conf = config.Config()
conf.reparameterizeMatrices = True; conf.help.reparameterizeMatrices = 'pass parameter matrices through a softplus to make keep them positive'
conf.ignoreTypeCheck= False; conf.help.ignoreTypeCheck = 'allow unknown types in a database with types'
TRAINING_TARGET_VARNAME = '_target_y'
class AbstractCrossCompiler(object):
""" Base class for tensorlog -> [theano|tensorflow|....] cross-compiler """
def __init__(self,prog):
# We need to create variables in different namespaces for
# different instances of an OpSeqFunction, so that the variables
# used to represent OpSeqFunction intermediate values don't clash.
# namespaces are defined by integer ids, and we allocate a new one
# for every distinct OpSeqFunction that gets compiled.
self._nextNamespaceId = 0
# holds outputs of compilation, indexed by mode
self._wsDict = {}
# holds output of current compilation process
self.ws = None
# pointers back to the program and matrixdb
self.prog = prog
self.db = prog.db
# when a db is 'typeless', ie all entities are of type
# matrixdb.THING, then _onlyType is set to THING
self._onlyType = None
# maps typeName to the vector used to introduce NULL entities,
# with low weight, into a vector of type typeName
self._nullSmoother = {}
# set after vectors are allocated for the nullSmoother's
self._globalsSet = None
# Cache 'handle expressions' for some of the objects in the
# tensorlog database. The handle expressions are indexed by a
# (functor,arity) pair. Handle expressions must be inserted by
# calling insertHandleExpr().
self._handleExpr = {}
# For each handle expression, there is some underlying variable
# with a gradient that is optimized. Often this is the same as the
# handle expression, but not always. These are indexed by
# functor,arity key.
self._handleExprVar = {}
logging.debug('AbstractCrossCompiler initialized %.3f Gb' % util.memusage())
#
# external UI
#
def close(self):
""" Release any resources
"""
pass
def inference(self,mode,inputs=None):
""" Returns (args,inferenceExpr) """
mode = self.ensureCompiled(mode,inputs=inputs)
return self._wsDict[mode].inferenceArgs, self._wsDict[mode].inferenceExpr
def inferenceFunction(self,mode,wrapInputs=True,unwrapOutputs=True):
"""Returns a python function which performs inference for the function
defined by that mode. The function takes a length-one tuple
containing one argument X, which can be a row vector or a
minibatch, and outputs a matrix with the same number of rows as X,
and the number of columns appropriate for the output type of the
mode.
"""
args,expr = self.inference(mode)
assert len(args)==1
return self._asOneInputFunction(args[0],expr,wrapInputs,unwrapOutputs)
def inferenceOutputType(self,mode,inputs=None):
""" The type associated with the output of a tensorlog function.
"""
mode = self.ensureCompiled(mode,inputs=inputs)
return self._wsDict[mode].tensorlogFun.outputType
def proofCount(self,mode,inputs=None):
""" Returns (args,proofCountExpr) """
mode = self.ensureCompiled(mode,inputs=inputs)
return self._wsDict[mode].proofCountArgs, self._wsDict[mode].proofCountExpr
def proofCountFunction(self,mode,wrapInputs=True,unwrapOutputs=True,inputs=None):
"""Returns a python function which performs counts proofs for the
queries defined by that mode. The function takes a length-one
tuple containing one argument X, which can be a row vector or a
minibatch, and outputs a matrix with the same number of rows as X,
and the number of columns appropriate for the output type of the
mode.
"""
args,expr = self.proofCount(mode,inputs=inputs)
assert len(args)==1
return self._asOneInputFunction(args[0],expr,wrapInputs,unwrapOutputs)
def proofCountOutputType(self,mode,inputs=None):
""" The type associated with the output of a tensorlog function.
"""
mode = self.ensureCompiled(mode,inputs=inputs)
return self._wsDict[mode].tensorlogFun.outputType
def dataLoss(self,mode,inputs=None):
""" Returns (args,dataLossExpr) """
mode = self.ensureCompiled(mode,inputs=inputs)
return self._wsDict[mode].dataLossArgs, self._wsDict[mode].dataLossExpr
def dataLossFunction(self,mode,wrapInputs=True,unwrapOutputs=True):
"""Returns a python function which compute the unregularized loss for
the function defined by that mode, relative to target outputs Y.
The function takes a single argument which is a list of (X,Y).
"""
args,expr = self.dataLoss(mode)
assert len(args)==2
return self._asTwoInputFunction(args[0],args[1],expr,wrapInputs,unwrapOutputs)
def dataLossGrad(self,mode,inputs=None):
"""Returns (args,[dataLossGrad1,....,dataLossGradn]), where each grad
is the gradient of one of the parameters.The order of the grads
is the same as the parameters.
"""
mode = self.ensureCompiled(mode,inputs=inputs)
return self._wsDict[mode].dataLossArgs, self._wsDict[mode].dataLossGradExprs
def dataLossGradFunction(self,mode,wrapInputs=True,unwrapOutputs=True,inputs=None):
"""Returns a python function which performs inference for the function
defined by that mode. The function takes a single argument which
is a list of (X,Y).
"""
args,exprList = self.dataLossGrad(mode,inputs=inputs)
assert len(args)==2
return self._exprListAsUpdateFunction(args[0],args[1],exprList,wrapInputs,unwrapOutputs)
#
# forwarded to the underlying database, or appropriate subclass
# routine
#
def asSymbol(self,symbolId,typeName=None):
""" Convert a typed integer id to a symbol
"""
return self.db.asSymbol(symbolId,typeName=typeName)
def asSymbolId(self,symbol,typeName=None):
""" Convert a typed symbol to an integer id
"""
return self.db.asSymbolId(symbol,typeName=typeName)
def wrapInput(self,x):
""" Convert scipy matrix to required input format
"""
return self._wrapMsg(x)
def unwrapInput(self,x):
"""Inverts wrapInput. Override this only if inputs and outputs are
in a different format.
"""
return self._unwrapOutput(x)
def unwrapOutput(self,y):
""" Convert output to scipy matrix
"""
return self._unwrapOutput(y)
def unwrapParam(self,y):
""" Convert output to scipy matrix
"""
return self._unwrapOutput(x)
def possibleOps(self,subExpr,typeName=None):
"""If a typeName is specified, then return a (expr,type) pairs, where
each expression performs one primitive tensorlog operation on the
subExpr given as input, and type is the name of the type for the
resulting subExpr.
If the typeName is NONE,
"""
# TODO add multiple-input and zero-input operations
if typeName is None:
typeName = matrixdb.THING
assert self.db.isTypeless(),'if database has types declared, you must specify the type of the input to possibleOps'
result = []
for (functor,arity) in self.db.matEncoding:
if arity==2:
mode = declare.asMode("%s(i,o)" % functor)
if self.db.schema.getDomain(functor,arity)==typeName:
op = self._vecMatMulExpr(subExpr, self._matrix(mode,transpose=False))
if self.db.isTypeless():
result.append(op)
else:
result.append((op,self.db.schema.getRange(functor,arity)))
if self.db.schema.getRange(functor,arity)==typeName:
op = self._vecMatMulExpr(subExpr, self._matrix(mode,transpose=True))
if self.db.isTypeless():
result.append(op)
else:
result.append((op,self.db.schema.getDomain(functor,arity)))
return result
#
# used in inferenceFunction, dataLossFunction, etc
#
def _asOneInputFunction(self,arg1,expr,wrapInputs,unwrapOutputs):
"""Return a python function which implements the expression,
optionally 'wrapping' the input and outputs. If inputs are
wrapped passing in scipy sparse matrices is ok. If outputs are
unwrapped then output will be scipy sparse matrices."""
assert False,'abstract method called'
def _asTwoInputFunction(self,arg1,arg2,expr,wrapInputs,unwrapOutputs):
"""Analogous to _asOneInputFunction but takes two inputs"""
assert False,'abstract method called'
def _exprListAsUpdateFunction(self,args,exprList,params,wrapInputs,unwrapOutputs):
"""Similar to _exprListAsUpdateFunction, but returns a python function
which returns a list of pairs (key,update), mapping parameter
'keys' -- i.e., functor,arity pairs -- to updates of those
parameters.
"""
assert False,'abstract method called'
def getParamVariables(self,mode,inputs=None):
"""Find target-language variables that are optimized to set the DB
parameters. These are the variables that will be optimized in
learning. Eg, if a weight vector V is reparameterized by passing
it through an softplus, this will be the underlying variable V0
such that softplus(V0)=V.
"""
mode = self.ensureCompiled(mode,inputs=inputs)
return [self._handleExprVar[key] for key in self.prog.getParamList()]
def getParamHandles(self,mode,inputs=None):
"""Find target-language variables corresponding to DB parameters.
These are the variables that store or compute the values that
correspond most closely to the parameters. Eg, if a weight vector
V is reparameterized by passing it through an softplus, this will
be the variable V such that V=softplus(V0), where V0 is optimized
in learning.
"""
mode = self.ensureCompiled(mode,inputs=inputs)
return [self._handleExpr[key] for key in self.prog.getParamList()]
def parameterFromDBToExpr(self,functor,arity):
return self._handleExpr.get((functor,arity))
def parameterFromDBToVariable(self,functor,arity):
return self._handleExprVar.get((functor,arity))
def pprint(self,mode,inputs=None):
"""Return list of lines in a pretty-print of the underlying, pre-compilation function.
To actual display these, use something like
print "\n".join(xcompiler.pprint("predict/io"))
"""
mode = self.ensureCompiled(mode,inputs=inputs)
return self._wsDict[mode].tensorlogFun.pprint()
def getWorkspace(self,mode,inputs=None):
""" Return the workspace associated with a mode
"""
mode = self.ensureCompiled(mode,inputs=inputs)
return self._wsDict[mode]
def _getParamVariables(self):
""" Convenience method to find variables corresponding to paramaters """
return [self._handleExprVar[key] for key in self.xcomp.prog.getParamList()]
#
# these all define the interface to the database. instead of
# returning a constant matrix M, they will return a 'handle
# expression', i.e., a target-language expression that evaluates to
# that matrix at learning time. In the simple cases, this is just
# the name for a shared variable, but it could be an expression
# based on that variable (eg its transpose)
#
def _vector(self, matMode):
""" Wraps a call to db.vector()
"""
assert matMode.arity==1
key = (matMode.getFunctor(),1)
if not key in self._handleExpr:
assert (matMode.functor,1) in self.db.matEncoding, 'DB does not contain a value for %s' % str(matMode)
variable_name = "v__" + matMode.getFunctor()
val = self._wrapDBVector(self.db.vector(matMode)) #ignores all but functor for arity 1
self._insertHandleExpr(key, variable_name, val, broadcast=True)
return self._handleExpr[key]
def _constantVector(self, variable_name, val):
""" Wrap a call to db.onehot(), db.zeros(), etc.
"""
key = (variable_name,0)
if not key in self._handleExpr:
wrapped_val = | |
<filename>Arty-Z7-10/components/ext_sources/u-boot-ectf/test/py/tests/test_mmc.py
#
# Copyright (c) 2016 <NAME>
#
# SPDX-License-Identifier: GPL-2.0
import pytest
import random
import re
import u_boot_utils
mmc_set_up = False
controllers = 0
devices = {}
@pytest.mark.buildconfigspec("cmd_mmc")
def test_mmc_list(u_boot_console):
output = u_boot_console.run_command("mmc list")
if "No MMC device available" in output:
pytest.skip('No SD/MMC/eMMC controller available')
array = output.split( )
global devices
global controllers
controllers = len(array) / 2
for x in range(0, controllers):
y = x * 2
devices[x] = {}
devices[x]["name"] = array[y]
global mmc_set_up
mmc_set_up = True
@pytest.mark.buildconfigspec("cmd_mmc")
def test_mmc_dev(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
fail = 0
for x in range(0, controllers):
output = u_boot_console.run_command('mmc dev %d' % x)
# Some sort of switch here
if "Card did not respond to voltage select" in output:
fail = 1
devices[x]["detected"] = "no"
else:
devices[x]["detected"] = "yes"
if fail:
pytest.fail("Card not present")
@pytest.mark.buildconfigspec("cmd_mmc")
def test_mmcinfo(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
u_boot_console.run_command('mmc dev %d' % x)
output = u_boot_console.run_command("mmcinfo")
if "busy timeout" in output:
pytest.skip('No SD/MMC/eMMC device present')
obj = re.search(r'Capacity: (\d+|\d+[\.]?\d)', output)
try:
capacity = float(obj.groups()[0])
print capacity
devices[x]["capacity"] = capacity
print ("Capacity of dev %d is: %g GiB" % (x, capacity))
except ValueError:
pytest.fail("MMC capacity not recognized")
@pytest.mark.buildconfigspec("cmd_mmc")
def test_mmc_info(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
u_boot_console.run_command('mmc dev %d' % x)
output = u_boot_console.run_command("mmc info")
obj = re.search(r'Capacity: (\d+|\d+[\.]?\d)', output)
try:
capacity = float(obj.groups()[0])
print capacity
if devices[x]["capacity"] != capacity:
pytest.fail("MMC capacity doesn't match mmcinfo")
except ValueError:
pytest.fail("MMC capacity not recognized")
@pytest.mark.buildconfigspec("cmd_mmc")
def test_mmc_rescan(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
if not devices:
pytest.skip('No devices detected')
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
u_boot_console.run_command('mmc dev %d' % x)
output = u_boot_console.run_command("mmc rescan")
# Not sure if this can be any error
if output:
pytest.fail("mmc rescan has something to check")
@pytest.mark.buildconfigspec("cmd_mmc")
def test_mmc_part(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
if not devices:
pytest.skip('No devices detected')
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
u_boot_console.run_command('mmc dev %d' % x)
output = u_boot_console.run_command("mmc part")
lines = output.split("\n")
part_fat = []
part_ext = []
for line in lines:
obj = re.search(r'(\d)\s+\d+\s+\d+\s+\w+\d+\w+-\d+\s+(\d+\w+)', line)
if obj:
part_id = int(obj.groups()[0])
part_type = obj.groups()[1]
print ("part_id:%d, part_type:%s" % (part_id, part_type))
if part_type == '0c' or part_type == '0b' or part_type == '0e':
print "Fat detected"
part_fat.append(part_id)
elif part_type == '83':
print "ext detected"
part_ext.append(part_id)
else:
pytest.fail("Unsupported Filesystem on device %d" % x)
devices[x]["ext4"] = part_ext
devices[x]["ext2"] = part_ext
devices[x]["fat"] = part_fat
if not part_ext and not part_fat:
pytest.fail("No partition detected on device %d" %x)
@pytest.mark.buildconfigspec("cmd_mmc")
@pytest.mark.buildconfigspec("cmd_fat")
def test_mmc_fatls_fatinfo(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
if not devices:
pytest.skip('No devices detected')
part_detect = 0
fs = "fat"
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
u_boot_console.run_command('mmc dev %d' % x)
try:
partitions = devices[x][fs]
except:
print ("No %s table on this device" % fs.upper());
continue
for part in partitions:
output = u_boot_console.run_command("fatls mmc %d:%s" % (x, part))
if "Unrecognized filesystem type" in output:
partitions.remove(part)
pytest.fail("Unrecognized filesystem")
if not re.search("\d file\(s\), \d dir\(s\)", output):
pytest.fail("%s read failed on device %d" % (fs.upper, x))
# Maybe separate this usecase
# Also maybe detect not readable chars
output = u_boot_console.run_command("fatinfo mmc %d:%s" % (x, part))
string = "Filesystem: %s" % fs.upper
if re.search(string, output):
pytest.fail("%s FS failed on device %d" % (fs.upper(), x))
part_detect = 1
if not part_detect:
pytest.skip("No %s partition detected" % fs.upper())
@pytest.mark.buildconfigspec("cmd_mmc")
@pytest.mark.buildconfigspec("cmd_fat")
@pytest.mark.buildconfigspec('cmd_memory')
def test_mmc_fatload_fatwrite(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
if not devices:
pytest.skip('No devices detected')
part_detect = 0
fs = "fat"
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
u_boot_console.run_command('mmc dev %d' % x)
try:
partitions = devices[x][fs]
except:
print ("No %s table on this device" % fs.upper());
continue
for part in partitions:
part_detect = 1
addr = u_boot_utils.find_ram_base(u_boot_console)
devices[x]["addr_%d" % part] = addr
size = random.randint(4, 1 * 1024 * 1024)
devices[x]["size_%d" % part] = size
# count CRC32
output = u_boot_console.run_command('crc32 %x %x' % (addr, size))
m = re.search('==> (.+?)', output)
if not m:
pytest.fail("CRC32 failed")
expected_crc32 = m.group(1)
devices[x]["expected_crc32_%d" % part] = expected_crc32
# do write
file = '%s_%d' % ("uboot_test", size)
devices[x]["file_%d" % part] = file
output = u_boot_console.run_command("%swrite mmc %d:%s %x %s %x" % (fs, x, part, addr, file, size))
assert "Unable to write" not in output
assert "Error" not in output
assert "overflow" not in output
expected_text = "%d bytes written" % size
assert expected_text in output
alignment = int(u_boot_console.config.buildconfig.get('config_sys_cacheline_size', 128))
offset = random.randrange(alignment, 1024, alignment)
output = u_boot_console.run_command("%sload mmc %d:%s %x %s" % (fs, x, part, addr + offset, file))
assert "Invalid FAT entry" not in output
assert "Unable to read file" not in output
assert "Misaligned buffer address" not in output
expected_text = "%d bytes read" % size
assert expected_text in output
output = u_boot_console.run_command('crc32 %x $filesize' % (addr + offset))
assert expected_crc32 in output
if not part_detect:
pytest.skip("No %s partition detected" % fs.upper())
@pytest.mark.buildconfigspec("cmd_mmc")
@pytest.mark.buildconfigspec("cmd_ext4")
def test_mmc_ext4ls(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
if not devices:
pytest.skip('No devices detected')
part_detect = 0
fs = "ext4"
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
try:
partitions = devices[x][fs]
except:
print ("No %s table on this device" % fs.upper());
continue
u_boot_console.run_command('mmc dev %d' % x)
for part in partitions:
output = u_boot_console.run_command("%sls mmc %d:%s" % (fs, x, part))
if "Unrecognized filesystem type" in output:
partitions.remove(part)
pytest.fail("Unrecognized filesystem")
part_detect = 1
if not part_detect:
pytest.skip("No %s partition detected" % fs.upper())
@pytest.mark.buildconfigspec("cmd_mmc")
@pytest.mark.buildconfigspec("cmd_ext4")
@pytest.mark.buildconfigspec("ext4_write")
@pytest.mark.buildconfigspec('cmd_memory')
def test_mmc_ext4load_ext4write(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
if not devices:
pytest.skip('No devices detected')
part_detect = 0
fs = "ext4"
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
u_boot_console.run_command('mmc dev %d' % x)
try:
partitions = devices[x][fs]
except:
print ("No %s table on this device" % fs.upper());
continue
for part in partitions:
part_detect = 1
addr = u_boot_utils.find_ram_base(u_boot_console)
devices[x]["addr_%d" % part] = addr
size = random.randint(4, 1 * 1024 * 1024)
devices[x]["size_%d" % part] = size
# count CRC32
output = u_boot_console.run_command('crc32 %x %x' % (addr, size))
m = re.search('==> (.+?)', output)
if not m:
pytest.fail("CRC32 failed")
expected_crc32 = m.group(1)
devices[x]["expected_crc32_%d" % part] = expected_crc32
# do write
file = '%s_%d' % ("uboot_test", size)
devices[x]["file_%d" % part] = file
output = u_boot_console.run_command("%swrite mmc %d:%s %x /%s %x" % (fs, x, part, addr, file, size))
assert "Unable to write" not in output
assert "Error" not in output
# maybe not valid for ext4
assert "overflow" not in output
expected_text = "%d bytes written" % size
assert expected_text in output
# Read it back with 64 aligned offset
# Todo read ALIGN
offset = random.randrange(128, 1024, 128)
output = u_boot_console.run_command("%sload mmc %d:%s %x /%s" % (fs, x, part, addr + offset, file))
expected_text = "%d bytes read" % size
assert expected_text in output
output = u_boot_console.run_command('crc32 %x $filesize' % (addr + offset))
assert expected_crc32 in output
if not part_detect:
pytest.skip("No %s partition detected" % fs.upper())
@pytest.mark.buildconfigspec("cmd_mmc")
@pytest.mark.buildconfigspec("cmd_ext2")
def test_mmc_ext2ls(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
if not devices:
pytest.skip('No devices detected')
part_detect = 0
fs = "ext2"
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
u_boot_console.run_command('mmc dev %d' % x)
try:
partitions = devices[x][fs]
except:
print ("No %s table on this device" % fs.upper());
continue
for part in partitions:
part_detect = 1
output = u_boot_console.run_command("%sls mmc %d:%s" % (fs, x, part))
if "Unrecognized filesystem type" in output:
partitions.remove(part)
pytest.fail("Unrecognized filesystem")
part_detect = 1
if not part_detect:
pytest.skip("No %s partition detected" % fs.upper())
@pytest.mark.buildconfigspec("cmd_mmc")
@pytest.mark.buildconfigspec("cmd_ext2")
@pytest.mark.buildconfigspec("cmd_ext4")
@pytest.mark.buildconfigspec("ext4_write")
@pytest.mark.buildconfigspec('cmd_memory')
def test_mmc_ext2load(u_boot_console):
if not mmc_set_up:
pytest.skip('No SD/MMC/eMMC controller available')
if not devices:
pytest.skip('No devices detected')
part_detect = 0
fs = "ext2"
for x in range(0, controllers):
if devices[x]["detected"] == "yes":
u_boot_console.run_command('mmc dev %d' % x)
try:
partitions = devices[x][fs]
except:
print ("No %s table on this device" % fs.upper());
continue
for part in partitions:
part_detect = 1
# Have to share information about ext4 file which was written
# Maybe just choose whatever available file
addr = devices[x]["addr_%d" % part]
size = devices[x]["size_%d" % part]
expected_crc32 = devices[x]["expected_crc32_%d" % part]
file = devices[x]["file_%d" % part]
# | |
"""
Copyright (c) <NAME>
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import copy
import json
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
from lib import config, data, ddc, test, metric, nets, train, utils
def add_parameter_noise(cfg, noise_ratio):
"""
Add noise to the hyperparameters of the Dynamic DropConnect model.
This changes the input dictionary in-place.
Args:
cfg: Dictionary containing the configuration with all hyperparameters
noise_ratio: Ratio of noise relative to magnitude of parameter base value
Returns:
Perturbed hyperparameter dictionary
"""
# Add uniform noise scaled relative to magnitude of input float.
def _add_relative_uniform_noise(x, noise_ratio):
noise_range = noise_ratio * abs(x)
return random.uniform(x - noise_range, x + noise_range)
# Noise only applies to the hyperparemeters of the model that we tune
cfg['prob_drift_down'] = _add_relative_uniform_noise(cfg['prob_drift_down'], noise_ratio)
cfg['prob_drift_up'] = _add_relative_uniform_noise(cfg['prob_drift_up'], noise_ratio)
cfg['prob_freeze'] = _add_relative_uniform_noise(cfg['prob_freeze'], noise_ratio)
cfg['grad_threshold'] = _add_relative_uniform_noise(cfg['grad_threshold'], noise_ratio)
return cfg
def generate_random_hyperparameter(task):
"""
Randomly sample task-specific hyperparameters.
Args:
task: Task name
Returns:
Dictionary with random hyperparameters
"""
if task == "mnist_energy":
# Tie prob_up and prob_down
prob_up_down = np.random.uniform(0.01, 0.1)
random_hyperparameter = {
"prob_drift_down": prob_up_down,
"prob_drift_up": prob_up_down,
"weight_decay": np.random.choice([0.001, 0.005, 0.01, 0.05]),
}
elif task == "perm_mnist" or task == "perm_fmnist":
random_hyperparameter = {
"prob_drift_down": np.random.uniform(0.01, 0.1),
"prob_drift_up": np.random.uniform(0.01, 0.1),
"prob_freeze": np.random.uniform(0.9, 0.96),
}
elif task == "split_mnist" or task == "split_fmnist":
random_hyperparameter = {
"prob_drift_down": np.random.uniform(0.01, 0.1),
"prob_drift_up": np.random.uniform(0.01, 0.1),
"prob_freeze": np.random.uniform(0.9, 0.96),
}
else:
raise ValueError("Task \"{}\" not defined.".format(task))
return random_hyperparameter
def load_default_config(task):
"""
Load default parameter configuration from file.
Args:
tasks: String with the task name
Returns:
Dictionary of default parameters for the given task
"""
if task == "mnist":
default_config = "etc/dyn_mnist.json"
elif task == "mnist_energy":
default_config = "etc/dyn_mnist_energy.json"
elif task == "mnist_fisher":
default_config = "etc/dyn_mnist_fisher.json"
elif task == "perm_fmnist":
default_config = "etc/dyn_perm_fmnist.json"
elif task == "perm_mnist":
default_config = "etc/dyn_perm_mnist.json"
elif task == "split_fmnist":
default_config = "etc/dyn_split_fmnist.json"
elif task == "split_mnist":
default_config = "etc/dyn_split_mnist.json"
elif task == "perm_mnist_cont":
default_config = "etc/dyn_perm_mnist_cont.json"
else:
raise ValueError("Task \"{}\" not defined.".format(task))
with open(default_config) as config_json_file:
cfg = json.load(config_json_file)
return cfg
def parse_shell_args(args):
"""
Parse shell arguments for this script
Args:
args: Command line arguments passed through sys.argv[1:]
Returns:
Dictionary with configuration
"""
parser = argparse.ArgumentParser(description="Run experiments with the Dynamic DropConnect model.")
parser.add_argument("--batch_size", type=int, default=argparse.SUPPRESS,
help="Size of mini batches during training.")
parser.add_argument("--dimensions", type=int, nargs="+",
default=argparse.SUPPRESS, help="Dimensions of the neural network.")
parser.add_argument("--epochs", type=int, default=argparse.SUPPRESS,
help="Number of epochs to train.")
parser.add_argument("--experiment_clamp", action='store_true', default=False,
help="Flag to conduct the probability clamp experiment.")
parser.add_argument("--experiment_fisher", action='store_true', default=False,
help="Flag to conduct the Fisher Information experiment.")
parser.add_argument("--experiment_lesion", action='store_true', default=False,
help="Flag to conduct the importance lesion experiment.")
parser.add_argument("--fixed_probs", type=float, default=argparse.SUPPRESS,
help="Fixed forward transmission probabilities decoupled from importance parameter.")
parser.add_argument("--grad_hard_reset", action='store_true', default=argparse.SUPPRESS,
help="Flag to enable resetting gradient cache after every training step (effectively turning it into a simple threshold).")
parser.add_argument("--no_grad_hard_reset", dest="grad_hard_reset", action='store_false', default=argparse.SUPPRESS,
help="Flag to disable resetting gradient cache after every training step (effectively turning it into a simple threshold).")
parser.add_argument("--hard_freeze", action='store_true', default=argparse.SUPPRESS,
help="Flag to enable complete freezing of weights when transmission probability is frozen.")
parser.add_argument("--no_hard_freeze", dest="hard_freeze", action='store_false', default=argparse.SUPPRESS,
help="Flag to disable complete freezing of weights when transmission probability is frozen.")
parser.add_argument("--learning_rate", type=float, default=argparse.SUPPRESS,
help="Learning rate for the parameters.")
parser.add_argument("--log_dir", type=str, default="",
help="Subdirectory within ./log/ where to store logs.")
parser.add_argument("--lr_modulation", action='store_true', default=argparse.SUPPRESS,
help="Flag to enable the learning rate modulation.")
parser.add_argument("--no_lr_modulation", dest="lr_modulation", action='store_false', default=argparse.SUPPRESS,
help="Flag to disable the learning rate modulation.")
parser.add_argument("--nonlinearity", choices=["leaky_relu", "relu", "sigmoid", "tanh"],
default=argparse.SUPPRESS, help="Nonlinearity between network layers.")
parser.add_argument("--normalise", action='store_true', default=argparse.SUPPRESS,
help="Flag to enable normalisation of sampled weights by their transmission probability.")
parser.add_argument("--no_normalise", dest="normalise", action='store_false', default=argparse.SUPPRESS,
help="Flag to disable normalisation of sampled weights by their transmission probability.")
parser.add_argument("--prob_drift_down", type=float, default=argparse.SUPPRESS,
help="Weight probability decrease parameter.")
parser.add_argument("--prob_drift_up", type=float, default=argparse.SUPPRESS,
help="Weight probability increase parameter.")
parser.add_argument("--prob_freeze", type=float, default=argparse.SUPPRESS,
help="Weight probability at which proabilities are frozen.")
parser.add_argument("--random_hyperparameter", action='store_true', default=False,
help="Use random hyperparameters.")
parser.add_argument("--relative_parameter_noise", type=float, default=None,
help="Relative noise to be added to the parameters.")
parser.add_argument("--seed", type=int, default=argparse.SUPPRESS, help="Random seed for pytorch")
parser.add_argument("--task", choices=["mnist", "mnist_energy", "mnist_fisher", "perm_fmnist", "perm_mnist", "perm_mnist_cont", "split_fmnist", "split_mnist"],
default="mnist_energy", help="Continual task to be solved.")
parser.add_argument("--weight_decay", type=float, default=argparse.SUPPRESS,
help="Weight decay (~l2-regularization strength).")
return vars(parser.parse_args(args))
def run_dyn_continual(cfg, verbose=True):
"""
Main routine of this script.
Args:
cfg: Dictionary containing the configuration with all hyperparameters
verbose: Boolean controlling the verbosity of the logger
Returns:
Results dictionary, trained dyn_mlp model, test_loaders
"""
# Initialize seed if specified (might slow down the model)
if cfg['seed'] is not None:
torch.manual_seed(cfg['seed'])
# Load specified dataset
train_loaders, test_loaders = data.create_dataloader(cfg["dataset"]["name"], **cfg["dataset"]["kwargs"])
# Initialize nonlinearity used as activation function
nonlinearity = utils.create_nonlinearity(cfg['nonlinearity'])
mlp = nets.MultilayerPerceptron(cfg['dimensions'], nonlinearity, cfg['fixed_bias'])
dyn_mlp = ddc.DynamicDropConnect(mlp, cfg['learning_rate'], cfg['grad_threshold'],
cfg['prob_drift_down'], cfg['prob_drift_up'], cfg['prob_freeze'],
cfg['prob_max'], cfg['prob_min'], cfg['fixed_bias'], cfg['fixed_probs'],
cfg['hard_freeze'], cfg['manual_freeze'], cfg['lr_modulation'],
cfg['weight_decay'], cfg['normalise']).to(config.device)
logging.info("Start training with parametrization:\n{}".format(
json.dumps(cfg, indent=4, sort_keys=True)))
# Store results in a dicitonary of tensors
results = {
"energy_neuron": torch.zeros(len(train_loaders), cfg['epochs']),
"energy_weight": torch.zeros(len(train_loaders), cfg['epochs']),
"frozen_prob": torch.zeros(len(train_loaders), cfg['epochs']),
"mean_prob": torch.zeros(len(train_loaders), cfg['epochs']),
"mutual_inf": torch.zeros(len(train_loaders), cfg['epochs']),
"prob_angles": torch.zeros(len(train_loaders), cfg['epochs'], len(dyn_mlp.weight_probs)),
"perm_angles": torch.zeros(len(train_loaders), cfg['epochs'], len(dyn_mlp.weight_probs)),
"task_acc": torch.zeros(len(train_loaders), len(train_loaders)),
"task_mutual_inf": torch.zeros(len(train_loaders), len(train_loaders)),
"test_acc": torch.zeros(len(train_loaders), cfg['epochs']),
"train_acc": torch.zeros(len(train_loaders), cfg['epochs']),
"train_loss": torch.zeros(len(train_loaders), cfg['epochs']),
}
# Train each task in a continual fashion
for task, (trainer, tester) in enumerate(zip(train_loaders, test_loaders)):
logging.info("Starting training of task {}".format(task + 1))
for epoch in range(cfg['epochs']):
# Store weights probs to quantify the change of important weights in the end
weight_probs_prev = copy.deepcopy(dyn_mlp.weight_probs)
# Train the model for a single epoch
train_acc, train_loss = train.train(dyn_mlp, trainer, method="ddc", verbose=verbose)
# Test the model on the current task
test_acc = test.accuracy(dyn_mlp, tester, cfg['test_samples'])
# Compute the mutual information between true and learned labels and the energy
mutual_inf = metric.mutual_information(dyn_mlp, tester)
energy_weight = dyn_mlp.energy
energy_neuron = test.activation(dyn_mlp, tester, cfg['test_samples'])
# Compute the mean transmission probabilities of all weights and ratio of frozen weights
mean_prob = utils.compute_mean_probs(dyn_mlp.weight_probs)
frozen_prob = utils.compute_frozen_probs(dyn_mlp.frozen_weight_mask)
# Compute the layerwise angles between weight_probs now and beginning of the epoch
angles = torch.stack([
utils.vector_angle(p_prev.view(-1), p_curr.view(-1))
for p_prev, p_curr in zip(weight_probs_prev, dyn_mlp.weight_probs)
])
# Compute permuted angles as a control
perm_angles = torch.stack([
utils.vector_angle(p_prev.view(-1), p_curr.view(-1)[torch.randperm(p_curr.numel())])
for p_prev, p_curr in zip(weight_probs_prev, dyn_mlp.weight_probs)
])
# Store results
results['energy_weight'][task, epoch] = energy_weight
results['energy_neuron'][task, epoch] = energy_neuron
results['frozen_prob'][task, epoch] = frozen_prob
results['mean_prob'][task, epoch] = mean_prob
results['mutual_inf'][task, epoch] = mutual_inf
results['prob_angles'][task, epoch] = angles
results['perm_angles'][task, epoch] = perm_angles
results['test_acc'][task, epoch] = train_acc
results['train_acc'][task, epoch] = train_acc
results['train_loss'][task, epoch] = train_loss
# Logging
if verbose:
logging.info("epoch {}/{}: train_acc {:.4f} \t test_acc {:.4f} \t mean_p: {:.4f} \t frozen_p: {:.6f} \t angle_p: {:.4f} \t mutual_inf {:.4f} \t energy: {:.4f} \t bit/energy: {:.4f}".format(epoch + 1, cfg['epochs'], train_acc, test_acc, mean_prob, frozen_prob, angles.mean(), mutual_inf, energy_weight, mutual_inf / energy_weight))
config.writer.add_scalars('task{}/accuracy'.format(task + 1), {'train': train_acc, 'test': test_acc}, epoch)
config.writer.add_scalar('task{}/information'.format(task + 1), mutual_inf, epoch)
config.writer.add_scalar('task{}/info_per_energy'.format(task + 1), mutual_inf / energy_weight, epoch)
config.writer.add_scalar('task{}/energy_weight'.format(task + 1), energy_weight, epoch)
config.writer.add_scalar('task{}/energy_neuron'.format(task + 1), energy_neuron, epoch)
config.writer.add_scalar('task{}/train_loss'.format(task + 1), train_loss, epoch)
config.writer.add_scalars('task{}/probability'.format(task + 1), {'mean': mean_prob, 'frozen': frozen_prob}, epoch)
for l, p in enumerate(dyn_mlp.weight_probs):
config.writer.add_histogram('task{}/probabilities/layer{}'.format(task + 1, l), p.view(-1), epoch)
for l, w in enumerate(dyn_mlp.weights):
config.writer.add_histogram('task{}/weights/layer{}'.format(task + 1, l), w.view(-1), epoch)
for l, (p, w) in enumerate(zip(dyn_mlp.weight_probs, dyn_mlp.weights)):
config.writer.add_histogram('task{}/weightxprob/layer{}'.format(task + 1, l), (p * w).view(-1), epoch)
# If manual freezing is enabled, use task boundaries to freeze weights
| |
E501
# verify the required parameter 'parameter_name' is set
if ('parameter_name' not in params or
params['parameter_name'] is None):
raise ValueError("Missing the required parameter `parameter_name` when calling `get_step_parameter`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
if 'step_id' in params:
if isinstance(params['step_id'], TeamCityObject):
path_params['stepId'] = params['step_id'].locator_id
else:
path_params['stepId'] = params['step_id'] # noqa: E501
if 'parameter_name' in params:
if isinstance(params['parameter_name'], TeamCityObject):
path_params['parameterName'] = params['parameter_name'].locator_id
else:
path_params['parameterName'] = params['parameter_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/steps/{stepId}/parameters/{parameterName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_step_parameters_with_http_info(self, bt_locator, step_id, **kwargs): # noqa: E501
"""get_step_parameters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_step_parameters_with_http_info(bt_locator, step_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param str step_id: (required)
:param str fields:
:return: Properties
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'step_id', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_step_parameters" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `get_step_parameters`") # noqa: E501
# verify the required parameter 'step_id' is set
if ('step_id' not in params or
params['step_id'] is None):
raise ValueError("Missing the required parameter `step_id` when calling `get_step_parameters`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
if 'step_id' in params:
if isinstance(params['step_id'], TeamCityObject):
path_params['stepId'] = params['step_id'].locator_id
else:
path_params['stepId'] = params['step_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/steps/{stepId}/parameters', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Properties', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_step_setting_with_http_info(self, bt_locator, step_id, field_name, **kwargs): # noqa: E501
"""get_step_setting # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_step_setting_with_http_info(bt_locator, step_id, field_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param str step_id: (required)
:param str field_name: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'step_id', 'field_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_step_setting" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `get_step_setting`") # noqa: E501
# verify the required parameter 'step_id' is set
if ('step_id' not in params or
params['step_id'] is None):
raise ValueError("Missing the required parameter `step_id` when calling `get_step_setting`") # noqa: E501
# verify the required parameter 'field_name' is set
if ('field_name' not in params or
params['field_name'] is None):
raise ValueError("Missing the required parameter `field_name` when calling `get_step_setting`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
if 'step_id' in params:
if isinstance(params['step_id'], TeamCityObject):
path_params['stepId'] = params['step_id'].locator_id
else:
path_params['stepId'] = params['step_id'] # noqa: E501
if 'field_name' in params:
if isinstance(params['field_name'], TeamCityObject):
path_params['fieldName'] = params['field_name'].locator_id
else:
path_params['fieldName'] = params['field_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/steps/{stepId}/{fieldName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_steps_with_http_info(self, bt_locator, **kwargs): # noqa: E501
"""get_steps # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_steps_with_http_info(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param str fields:
:return: Steps
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_steps" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `get_steps`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/steps', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Steps', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_template_with_http_info(self, bt_locator, template_locator, **kwargs): # noqa: E501
"""get_template # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_template_with_http_info(bt_locator, template_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param str template_locator: (required)
:param str fields:
:return: BuildType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'template_locator', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_template" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `get_template`") # noqa: E501
# verify the required parameter 'template_locator' is set
if ('template_locator' not in params or
params['template_locator'] is None):
raise ValueError("Missing the required parameter `template_locator` when calling `get_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
if 'template_locator' in params:
if isinstance(params['template_locator'], TeamCityObject):
path_params['templateLocator'] = params['template_locator'].locator_id
else:
path_params['templateLocator'] = params['template_locator'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/templates/{templateLocator}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_templates_with_http_info(self, bt_locator, **kwargs): # noqa: E501
"""get_templates # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_templates_with_http_info(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param str fields:
:return: BuildTypes
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_templates" % key
)
params[key] = val
del params['kwargs']
| |
<filename>sdk/resources/azure-mgmt-resourcegraph/azure/mgmt/resourcegraph/models/_models_py3.py<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._resource_graph_client_enums import *
class Column(msrest.serialization.Model):
"""Query result column descriptor.
All required parameters must be populated in order to send to Azure.
:param name: Required. Column name.
:type name: str
:param type: Required. Column data type. Possible values include: "string", "integer",
"number", "boolean", "object".
:type type: str or ~resource_graph_client.models.ColumnDataType
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "ColumnDataType"],
**kwargs
):
super(Column, self).__init__(**kwargs)
self.name = name
self.type = type
class DateTimeInterval(msrest.serialization.Model):
"""An interval in time specifying the date and time for the inclusive start and exclusive end, i.e. ``[start, end)``.
All required parameters must be populated in order to send to Azure.
:param start: Required. A datetime indicating the inclusive/closed start of the time interval,
i.e. ``[``\ **\ ``start``\ **\ ``, end)``. Specifying a ``start`` that occurs chronologically
after ``end`` will result in an error.
:type start: ~datetime.datetime
:param end: Required. A datetime indicating the exclusive/open end of the time interval, i.e.
``[start,``\ **\ ``end``\ **\ ``)``. Specifying an ``end`` that occurs chronologically before
``start`` will result in an error.
:type end: ~datetime.datetime
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'start', 'type': 'iso-8601'},
'end': {'key': 'end', 'type': 'iso-8601'},
}
def __init__(
self,
*,
start: datetime.datetime,
end: datetime.datetime,
**kwargs
):
super(DateTimeInterval, self).__init__(**kwargs)
self.start = start
self.end = end
class Error(msrest.serialization.Model):
"""Error details.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code identifying the specific error.
:type code: str
:param message: Required. A human readable error message.
:type message: str
:param details: Error details.
:type details: list[~resource_graph_client.models.ErrorDetails]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetails]'},
}
def __init__(
self,
*,
code: str,
message: str,
details: Optional[List["ErrorDetails"]] = None,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = details
class ErrorDetails(msrest.serialization.Model):
"""Error details.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code: Required. Error code identifying the specific error.
:type code: str
:param message: Required. A human readable error message.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: str,
message: str,
additional_properties: Optional[Dict[str, object]] = None,
**kwargs
):
super(ErrorDetails, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.code = code
self.message = message
class ErrorResponse(msrest.serialization.Model):
"""An error response from the API.
All required parameters must be populated in order to send to Azure.
:param error: Required. Error information.
:type error: ~resource_graph_client.models.Error
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(
self,
*,
error: "Error",
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class Facet(msrest.serialization.Model):
"""A facet containing additional statistics on the response of a query. Can be either FacetResult or FacetError.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: FacetError, FacetResult.
All required parameters must be populated in order to send to Azure.
:param expression: Required. Facet expression, same as in the corresponding facet request.
:type expression: str
:param result_type: Required. Result type.Constant filled by server.
:type result_type: str
"""
_validation = {
'expression': {'required': True},
'result_type': {'required': True},
}
_attribute_map = {
'expression': {'key': 'expression', 'type': 'str'},
'result_type': {'key': 'resultType', 'type': 'str'},
}
_subtype_map = {
'result_type': {'FacetError': 'FacetError', 'FacetResult': 'FacetResult'}
}
def __init__(
self,
*,
expression: str,
**kwargs
):
super(Facet, self).__init__(**kwargs)
self.expression = expression
self.result_type = None # type: Optional[str]
class FacetError(Facet):
"""A facet whose execution resulted in an error.
All required parameters must be populated in order to send to Azure.
:param expression: Required. Facet expression, same as in the corresponding facet request.
:type expression: str
:param result_type: Required. Result type.Constant filled by server.
:type result_type: str
:param errors: Required. An array containing detected facet errors with details.
:type errors: list[~resource_graph_client.models.ErrorDetails]
"""
_validation = {
'expression': {'required': True},
'result_type': {'required': True},
'errors': {'required': True},
}
_attribute_map = {
'expression': {'key': 'expression', 'type': 'str'},
'result_type': {'key': 'resultType', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ErrorDetails]'},
}
def __init__(
self,
*,
expression: str,
errors: List["ErrorDetails"],
**kwargs
):
super(FacetError, self).__init__(expression=expression, **kwargs)
self.result_type = 'FacetError' # type: str
self.errors = errors
class FacetRequest(msrest.serialization.Model):
"""A request to compute additional statistics (facets) over the query results.
All required parameters must be populated in order to send to Azure.
:param expression: Required. The column or list of columns to summarize by.
:type expression: str
:param options: The options for facet evaluation.
:type options: ~resource_graph_client.models.FacetRequestOptions
"""
_validation = {
'expression': {'required': True},
}
_attribute_map = {
'expression': {'key': 'expression', 'type': 'str'},
'options': {'key': 'options', 'type': 'FacetRequestOptions'},
}
def __init__(
self,
*,
expression: str,
options: Optional["FacetRequestOptions"] = None,
**kwargs
):
super(FacetRequest, self).__init__(**kwargs)
self.expression = expression
self.options = options
class FacetRequestOptions(msrest.serialization.Model):
"""The options for facet evaluation.
:param sort_by: The column name or query expression to sort on. Defaults to count if not
present.
:type sort_by: str
:param sort_order: The sorting order by the selected column (count by default). Possible values
include: "asc", "desc". Default value: "desc".
:type sort_order: str or ~resource_graph_client.models.FacetSortOrder
:param filter: Specifies the filter condition for the 'where' clause which will be run on main
query's result, just before the actual faceting.
:type filter: str
:param top: The maximum number of facet rows that should be returned.
:type top: int
"""
_validation = {
'top': {'maximum': 1000, 'minimum': 1},
}
_attribute_map = {
'sort_by': {'key': 'sortBy', 'type': 'str'},
'sort_order': {'key': 'sortOrder', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'str'},
'top': {'key': '$top', 'type': 'int'},
}
def __init__(
self,
*,
sort_by: Optional[str] = None,
sort_order: Optional[Union[str, "FacetSortOrder"]] = "desc",
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs
):
super(FacetRequestOptions, self).__init__(**kwargs)
self.sort_by = sort_by
self.sort_order = sort_order
self.filter = filter
self.top = top
class FacetResult(Facet):
"""Successfully executed facet containing additional statistics on the response of a query.
All required parameters must be populated in order to send to Azure.
:param expression: Required. Facet expression, same as in the corresponding facet request.
:type expression: str
:param result_type: Required. Result type.Constant filled by server.
:type result_type: str
:param total_records: Required. Number of total records in the facet results.
:type total_records: long
:param count: Required. Number of records returned in the facet response.
:type count: int
:param data: Required. A table containing the desired facets. Only present if the facet is
valid.
:type data: object
"""
_validation = {
'expression': {'required': True},
'result_type': {'required': True},
'total_records': {'required': True},
'count': {'required': True},
'data': {'required': True},
}
_attribute_map = {
'expression': {'key': 'expression', 'type': 'str'},
'result_type': {'key': 'resultType', 'type': 'str'},
'total_records': {'key': 'totalRecords', 'type': 'long'},
'count': {'key': 'count', 'type': 'int'},
'data': {'key': 'data', 'type': 'object'},
}
def __init__(
self,
*,
expression: str,
total_records: int,
count: int,
data: object,
**kwargs
):
super(FacetResult, self).__init__(expression=expression, **kwargs)
self.result_type = 'FacetResult' # type: str
self.total_records = total_records
self.count = count
self.data = data
class Operation(msrest.serialization.Model):
"""Resource Graph REST API operation definition.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: Display metadata associated with the operation.
:type display: ~resource_graph_client.models.OperationDisplay
:param origin: The origin of operations.
:type origin: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': | |
# -*- coding: utf-8 -*-
from src.browser import Browser
from src.constants import *
from src.exceptions import RetryException
from src.secret import USERNAME, PASSWORD, TRANS_USERNAME, TRANS_PASSWORD
from datetime import datetime
from functools import wraps, partial
from hashlib import md5
import json
import os
from pyquery import PyQuery as pq
import re
import requests
import sys
from time import sleep
from tqdm import tqdm
import traceback
"""
Usage:
python instagran_web_crawler.py [path] [options]
Args:
path: Input path (a file including one user_id per line).
options:
--saved: Download saved posts.
--transfer: Transfer following users from USERNAME to TRANS_USERNAME.
Notice:
Put chromedriver.exe in folder /bin.
Copy secret.py.dist as secret.py in the same folder.
Input file format:
username [start_date] [end_date]
If end_date is specific and no specific start_date, use '-'.
If start_date is specific and no specific end_date,
no other input is needed.
(Default: Posts of all time.)
options:
-f:
Get following users.
-p:
Download posts (including images and videos).
(If no -f, it's no need to input options, since -p is default with no option.
The order of options is meaningless.)
(Ignore letter case.)
e.g.
a123456789 2019-01-01 2019-06-01 -fp
b987654321 2018-01-01 2019-01-01 -Pf
c111111111 - 2019-02-01 -F
d222222222 2019-03-01 -fp
e333333333
"""
end_date = datetime.now().strftime("%Y-%m-%d")
SAVE_PATH = os.path.join('.', 'results')
browser = Browser(HAS_SCREEN)
download_saved = False
download_from_file = True
download_posts = True
get_following = False
transfer_following = False
logged_in_username = ''
logged_in_user_id = ''
trasnfer_from_username = ''
username = ''
user_id = ''
get_json_count = 0
# TODO: Download media which are in the specified period.
def output_log(msg, traceback_option=True):
with open(LOG_PATH, 'a', encoding='utf8') as output_file:
output_file.write(msg)
if traceback_option:
traceback.print_exc(file=open(LOG_PATH, 'a', encoding='utf8'))
def retry(attempt=10, wait=0.3):
def wrap(func):
@wraps(func)
def wrapped_f(*args, **kwargs):
try:
return func(*args, **kwargs)
except RetryException:
if attempt > 1:
sleep(wait)
return retry(attempt - 1, wait)(func)(*args, **kwargs)
else:
if func.__name__ == 'check_login':
tmp = 'log in. (username: {}, password: {})'.format(USERNAME, PASSWORD)
elif func.__name__ == 'check_log_out':
tmp = 'log out.'
msg = '{} - Error: Failed to {}\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), tmp)
output_log('\n' + msg, True)
exc = RetryException(msg)
exc.__cause__ = None
raise exc
return wrapped_f
return wrap
def log_out():
url = URL + '/{}/'.format(logged_in_username)
browser.get(url)
option_btn = browser.find_one('.dCJp8.afkep')
option_btn.click()
log_out_btn = browser.find('.aOOlW.HoLwm', waittime=10)[7]
log_out_btn.click()
@retry()
def check_log_out():
if not browser.find('._9nyy2'):
raise RetryException()
check_log_out()
def set_headers():
global logged_in_user_id
shbid = '3317'
shbts = '1571731121.0776558'
cookies_list = browser.driver.get_cookies()
cookies_dict = {}
for cookie in cookies_list:
cookies_dict[cookie['name']] = cookie['value']
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
}
headers['cookie'] = COOKIE.format(cookies_dict["mid"], shbid,
shbts, cookies_dict["ds_user_id"], cookies_dict["csrftoken"],
cookies_dict["sessionid"], cookies_dict["rur"], cookies_dict["urlgen"])
logged_in_user_id = cookies_dict["ds_user_id"]
return headers
def login(trans_login=False):
global logged_in_username
url = "{}/accounts/login/".format(URL)
browser.get(url)
if trans_login:
username = TRANS_USERNAME
password = <PASSWORD>
global trasnfer_from_username
trasnfer_from_username = logged_in_username
else:
username = USERNAME
password = PASSWORD
u_input = browser.find_one('input[name="username"]')
u_input.send_keys(username)
p_input = browser.find_one('input[name="password"]')
p_input.send_keys(password)
login_btn = browser.find_one(".L3NKy")
login_btn.click()
@retry()
def check_login():
if browser.find_one('input[name="username"]'):
raise RetryException()
check_login()
logged_in_user_url = browser.find(
"div[class='XrOey'] a")[2].get_attribute("href")
logged_in_username = logged_in_user_url[
logged_in_user_url[: -1].rfind('/') + 1: -1]
print('\n* Logged in as {}.\n'.format(logged_in_username))
headers = set_headers()
return headers
def get_html(url, headers):
try:
response = requests.get(url, headers=headers)
if response.status_code == requests.codes['ok']:
return response.text
else:
msg = '{} - Error: Failed to get page source (status_code: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), response.status_code)
output_log('\n' + msg, True)
raise Exception(msg)
except Exception:
msg = '{} - Error: Failed to get page source (status_code: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), response.status_code)
output_log('\n' + msg, True)
raise Exception(msg)
def get_json(url, headers):
global get_json_count
try:
response = requests.get(url, headers=headers)
if response.status_code == requests.codes['ok']:
return response.json()
else:
msg = '{} - Warning: Failed to get json file (status_code: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), response.status_code)
output_log('\n' + msg, False)
print('{} - 1 Warning: Retry to get json file.\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sleep(WAIT_TIME)
get_json_count += 1
if get_json_count == MAX_GET_JSON_COUNT:
raise RetryException()
return get_json(url, headers)
except Exception as e:
msg = '{} - Warning: Failed to get json file (status_code: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), response.status_code)
output_log('\n' + msg, False)
print('{} - 2 Warning: Retry to get json file.\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sleep(WAIT_TIME)
get_json_count += 1
if get_json_count == MAX_GET_JSON_COUNT:
raise RetryException()
return get_json(url, headers)
def get_content(url, headers):
try:
response = requests.get(url, headers=headers)
if response.status_code == requests.codes['ok']:
return response.content
else:
msg = '{} - Error: Failed to get image content (status_code: {}, url: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), response.status_code, url)
output_log('\n' + msg, True)
except Exception as e:
msg = '{} - Error: Failed to get image content (status_code: {}, url: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), response.status_code, url)
output_log('\n' + msg, True)
def get_video_url(shortcode, headers):
child_comment_count = '3'
fetch_comment_count = '40'
parent_comment_count = '24'
has_threaded_comments = 'true'
url = URL_QUERY_SAVED_VIDEOS.format(
HASH_SAVED_VIDEOS, shortcode, child_comment_count, fetch_comment_count,
parent_comment_count, has_threaded_comments)
js_data = get_json(url, headers)
video_url = js_data['data']['shortcode_media']['video_url']
return video_url
def get_sidecar_urls(shortcode):
url = URL_SHORTCODE.format(shortcode)
browser.get(url)
urls = set()
is_start = True
while True:
ele_imgs = browser.find("._97aPb img", waittime=10)
if isinstance(ele_imgs, list):
for ele_img in ele_imgs:
urls.add(ele_img.get_attribute("src"))
else:
break
play_btn = browser.find_one('.B2xwy._3G0Ji.PTIMp.videoSpritePlayButton')
if play_btn:
urls.add(browser.find_one('.tWeCl').get_attribute("src"))
if is_start:
urls.remove(ele_imgs[0].get_attribute("src"))
is_start = False
else:
# Exclude the preview image of a video.
urls.remove(ele_imgs[1].get_attribute("src"))
next_photo_btn = browser.find_one("._6CZji .coreSpriteRightChevron")
if next_photo_btn:
next_photo_btn.click()
sleep(WAIT_TIME)
else:
break
return list(urls)
def transfer_following_users(username_list):
button_not_follow_class = '_5f5mN jIbKX _6VtSN yZn4P '
@retry()
def check_follow():
follow_btn = browser.find_one('button')
if follow_btn.get_attribute("class") == button_not_follow_class:
raise RetryException()
has_followed = 0
for username in tqdm(username_list, desc='Progress'):
url = '{}/{}/'.format(URL, username)
browser.get(url)
follow_btn = browser.find_one('button')
if follow_btn.get_attribute("class") == button_not_follow_class:
follow_btn.click()
check_follow()
else:
has_followed += 1
msg = '{} - Info: This account has followed this user (username: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), username)
print(msg)
output_log('\n' + msg, False)
msg = '\n{} - Info: Finish following users. (Successful: {}, Already followed: {}, from_username: {}, to_username: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), len(username_list) - has_followed, has_followed,
trasnfer_from_username, logged_in_username)
print(msg)
def get_following_username_list(user_id, username, headers):
include_reel = 'true'
fetch_mutual = 'false'
first_first = '24'
url = URL_QUERY_FOLLOWING_USERS.format(HASH_FOLLOWING_USERS,
user_id, include_reel, fetch_mutual, first_first, '')
js_data = get_json(url, headers)
edges = js_data['data']['user']['edge_follow']['edges']
following_count = js_data['data']['user']['edge_follow']['count']
page_info = js_data['data']['user']['edge_follow']['page_info']
if page_info['end_cursor']:
cursor = page_info['end_cursor'][: -2]
else:
cursor = None
has_next_page = page_info['has_next_page']
username_list = list()
pbar = tqdm(total=following_count)
pbar.set_description("Progress")
for edge in edges:
username_list.append(edge['node']['username'])
pbar.update(1)
while has_next_page:
url = URL_QUERY_FOLLOWING_USERS.format(HASH_FOLLOWING_USERS,
user_id, include_reel, fetch_mutual, FIRST,
FOLLOWING_USERS_SUFFIX.format(cursor))
js_data = get_json(url, headers)
edges = js_data['data']['user']['edge_follow']['edges']
page_info = js_data['data']['user']['edge_follow']['page_info']
if page_info['end_cursor']:
cursor = page_info['end_cursor'][: -2]
else:
cursor = None
has_next_page = page_info['has_next_page']
for edge in edges:
username_list.append(edge['node']['username'])
pbar.update(1)
msg = '\n\n{} - Info: Finish exploring following users. {} following users are found (username: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), following_count, username)
print(msg)
return username_list
def get_saved_urls(headers):
url = URL_QUERY_SAVED_POSTS.format(logged_in_username)
js_data = get_json(url, headers)
urls = list()
user_id = js_data["logging_page_id"][12: ]
edges = js_data["graphql"]["user"]["edge_saved_media"]["edges"]
post_count = js_data["graphql"]["user"]["edge_saved_media"]["count"]
page_info = js_data["graphql"]["user"]["edge_saved_media"]["page_info"]
cursor = page_info['end_cursor']
has_next_page = page_info['has_next_page']
pbar = tqdm(total=post_count)
pbar.set_description("Progress")
for edge in edges:
if edge['node']['__typename'] == 'GraphSidecar':
shortcode = edge['node']['shortcode']
for url in get_sidecar_urls(shortcode):
urls.append(url)
else:
if edge['node']['is_video']:
shortcode = edge['node']['shortcode']
video_url = get_video_url(shortcode, headers)
urls.append(video_url)
else:
display_url = edge['node']['display_url']
urls.append(display_url)
pbar.update(1)
while has_next_page:
url = URL_QUERY_POSTS.format(HASH_SAVED_POSTS, user_id, FIRST, cursor)
js_data = get_json(url, headers)
edges = js_data['data']['user']['edge_saved_media']['edges']
page_info = js_data['data']['user']['edge_saved_media']['page_info']
cursor = page_info['end_cursor']
has_next_page = page_info['has_next_page']
for edge in edges:
if edge['node']['__typename'] == 'GraphSidecar':
shortcode = edge['node']['shortcode']
for url in get_sidecar_urls(shortcode):
urls.append(url)
else:
if edge['node']['is_video']:
shortcode = edge['node']['shortcode']
video_url = get_video_url(shortcode, headers)
urls.append(video_url)
else:
display_url = edge['node']['display_url']
urls.append(display_url)
pbar.update(1)
msg = '\n\n{} - Info: Finish exploring saved posts. {} saved posts are found (logged_in_username: {}).\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), post_count, logged_in_username)
print(msg)
return urls
def get_urls(html, headers):
global user_id
user_id = re.findall('"profilePage_([0-9]+)"', html, re.S)[0]
doc = pq(html)
items = doc('script[type="text/javascript"]').items()
urls = list()
for item in items:
if item.text().strip().startswith('window._sharedData'):
js_data = json.loads(item.text()[21: -1], encoding='utf-8')
edges = js_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_owner_to_timeline_media"]["edges"]
post_count = js_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_owner_to_timeline_media"]["count"]
page_info = js_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_owner_to_timeline_media"]['page_info']
cursor = page_info['end_cursor']
has_next_page = page_info['has_next_page']
pbar = tqdm(total=post_count)
pbar.set_description("Progress")
for edge in edges:
if edge['node']['__typename'] == 'GraphSidecar':
shortcode = edge['node']['shortcode']
for url in get_sidecar_urls(shortcode):
urls.append(url)
else:
if edge['node']['is_video']:
shortcode = edge['node']['shortcode']
video_url = get_video_url(shortcode, headers)
urls.append(video_url)
else:
display_url = edge['node']['display_url']
urls.append(display_url)
pbar.update(1)
while has_next_page:
url = URL_QUERY_POSTS.format(HASH_NORMAL_POSTS, user_id, FIRST, cursor)
js_data = get_json(url, headers)
edges = js_data['data']['user']['edge_owner_to_timeline_media']['edges']
page_info = js_data['data']['user']['edge_owner_to_timeline_media']['page_info']
cursor = page_info['end_cursor']
has_next_page = page_info['has_next_page']
for edge in edges:
if edge['node']['__typename'] == 'GraphSidecar':
if edge['node']['__typename'] == 'GraphSidecar':
shortcode = edge['node']['shortcode']
for url in get_sidecar_urls(shortcode):
urls.append(url)
else:
if | |
'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only)
External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (View for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only)
User is allowed View on (View for "category": ('id', 'name') only)
User is allowed View on (View for "user": ('nickname', 'status', 'username') only)
User is allowed View on (View for "user_status": ('name',) only)
User is allowed View on file if file is linked from an item with View permission (View for "file" only)
User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only)
User is allowed to access area (View for "area" only)
User is allowed to access doc_issue_status (View for "doc_issue_status" only)
User is allowed to access ext_tracker (View for "ext_tracker" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
User is allowed to access keyword (View for "keyword" only)
User is allowed to access kind (View for "kind" only)
User is allowed to access msg_keyword (View for "msg_keyword" only)
User is allowed to access safety_level (View for "safety_level" only)
User is allowed to access severity (View for "severity" only)
User is allowed to access status (View for "status" only)
User is allowed to access status_transition (View for "status_transition" only)
User is allowed to access test_level (View for "test_level" only)
User is allowed to create file (Create for "file" only)
User is allowed to create issue (Create for "issue" only)
User is allowed to create msg (Create for "msg" only)
User is allowed to create query (Create for "query" only)
User is allowed to edit their queries (Edit for "query" only)
User is allowed to retire their queries (Retire for "query" only)
User is allowed to search for their own files (Search for "file" only)
User is allowed to search for their own messages (Search for "msg" only)
User is allowed to search for their queries (Search for "query" only)
User is allowed to search issue (Search for "issue" only)
User is allowed to view their own files (View for "file" only)
User may access the web interface (Web Access)
User may use the email interface (Email Access)
Users are allowed to edit some of their details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'password', 'timezone') only)
Users are allowed to view some of their details (View for "user": ('activity', 'actor', 'creation', 'creator', 'firstname', 'lastname', 'realname', 'username') only)
Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only)
Role "facility":
(Restore for "room" only)
(Retire for "room" only)
User is allowed to create room (Create for "room" only)
User is allowed to edit room (Edit for "room" only)
Role "functional-role":
(Restore for "user_functional_role" only)
(Retire for "user_functional_role" only)
User is allowed Edit on (Edit for "user": ('business_responsible', 'scale_seniority') only)
User is allowed View on (View for "user": ('business_responsible', 'planning_role', 'scale_seniority') only)
User is allowed to access user_functional_role (View for "user_functional_role" only)
User is allowed to create user_functional_role (Create for "user_functional_role" only)
User is allowed to edit user_functional_role (Edit for "user_functional_role" only)
Role "hr":
(Edit for "overtime_period": ('name', 'order') only)
(Restore for "room" only)
(Retire for "room" only)
User is allowed Edit on (Edit for "daily_record": ('required_overtime', 'weekend_allowed') only)
User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only)
User is allowed Edit on (Edit for "time_project": ('approval_hr', 'approval_required', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'no_overtime', 'no_overtime_day', 'only_hours', 'overtime_reduction') only)
User is allowed View on (View for "user": ('contacts',) only)
User is allowed to access auto_wp (View for "auto_wp" only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access daily_record (View for "daily_record" only)
User is allowed to access daily_record_freeze (View for "daily_record_freeze" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access overtime_correction (View for "overtime_correction" only)
User is allowed to access time_record (View for "time_record" only)
User is allowed to access user_contact (View for "user_contact" only)
User is allowed to access user_dynamic (View for "user_dynamic" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
User is allowed to create auto_wp (Create for "auto_wp" only)
User is allowed to create daily_record_freeze (Create for "daily_record_freeze" only)
User is allowed to create location (Create for "location" only)
User is allowed to create org_location (Create for "org_location" only)
User is allowed to create organisation (Create for "organisation" only)
User is allowed to create overtime_correction (Create for "overtime_correction" only)
User is allowed to create overtime_period (Create for "overtime_period" only)
User is allowed to create product_family (Create for "product_family" only)
User is allowed to create public_holiday (Create for "public_holiday" only)
User is allowed to create reporting_group (Create for "reporting_group" only)
User is allowed to create room (Create for "room" only)
User is allowed to create sap_cc (Create for "sap_cc" only)
User is allowed to create time_record (Create for "time_record" only)
User is allowed to create uc_type (Create for "uc_type" only)
User is allowed to create user (Create for "user" only)
User is allowed to create user_dynamic (Create for "user_dynamic" only)
User is allowed to edit auto_wp (Edit for "auto_wp" only)
User is allowed to edit dynamic user data if not frozen in validity span of dynamic user record (Edit for "user_dynamic" only)
User is allowed to edit freeze record if not frozen at the given date (Edit for "daily_record_freeze": ('frozen',) only)
User is allowed to edit location (Edit for "location" only)
User is allowed to edit org_location (Edit for "org_location" only)
User is allowed to edit organisation (Edit for "organisation" only)
User is allowed to edit overtime correction if the overtime correction is not frozen (Edit for "overtime_correction" only)
User is allowed to edit product_family (Edit for "product_family" only)
User is allowed to edit public_holiday (Edit for "public_holiday" only)
User is allowed to edit reporting_group (Edit for "reporting_group" only)
User is allowed to edit room (Edit for "room" only)
User is allowed to edit sap_cc (Edit for "sap_cc" only)
User is allowed to edit time_record (Edit for "time_record" only)
User is allowed to edit uc_type (Edit for "uc_type" only)
User may manipulate user Roles through the web (Web Roles)
Role "hr-leave-approval":
User is allowed Edit on (Edit for "leave_submission": ('status',) only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
Role "hr-org-location":
(Search for "daily_record_freeze" only)
(Search for "overtime_correction" only)
(Search for "time_activity_perm" only)
(Search for "time_record" only)
(Search for "user_dynamic" only)
User is allowed to view dynamic user data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "user_dynamic" only)
User is allowed to view freeze information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "daily_record_freeze" only)
User is allowed to view overtime information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "overtime_correction" only)
User is allowed to view time record data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "time_record" only)
Role "hr-vacation":
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access vacation_correction (View for | |
SINGLE': 121217,
'SIGNWRITING TRAVEL-FLOORPLANE ROTATION-WALLPLANE ALTERNATING': 121222,
'SIGNWRITING TRAVEL-FLOORPLANE ROTATION-WALLPLANE DOUBLE': 121221,
'SIGNWRITING TRAVEL-FLOORPLANE ROTATION-WALLPLANE SINGLE': 121220,
'SIGNWRITING TRAVEL-FLOORPLANE SHAKING': 121223,
'SIGNWRITING TRAVEL-WALLPLANE ARM SPIRAL DOUBLE': 121171,
'SIGNWRITING TRAVEL-WALLPLANE ARM SPIRAL SINGLE': 121170,
'SIGNWRITING TRAVEL-WALLPLANE ARM SPIRAL TRIPLE': 121172,
'SIGNWRITING TRAVEL-WALLPLANE ROTATION-FLOORPLANE ALTERNATING': 121168,
'SIGNWRITING TRAVEL-WALLPLANE ROTATION-FLOORPLANE DOUBLE': 121167,
'SIGNWRITING TRAVEL-WALLPLANE ROTATION-FLOORPLANE SINGLE': 121166,
'SIGNWRITING TRAVEL-WALLPLANE ROTATION-WALLPLANE ALTERNATING': 121165,
'SIGNWRITING TRAVEL-WALLPLANE ROTATION-WALLPLANE DOUBLE': 121164,
'SIGNWRITING TRAVEL-WALLPLANE ROTATION-WALLPLANE SINGLE': 121163,
'SIGNWRITING TRAVEL-WALLPLANE SHAKING': 121169,
'SIGNWRITING UPPER BODY TILTING FROM HIP JOINTS': 121461,
'SIGNWRITING WALLPLANE SHOULDER HIP MOVE': 121455,
'SILHOUETTE OF JAPAN': 128510,
'SINGLE CHARACTER INTRODUCER': 983184,
'SINGLE GRAPHIC CHARACTER INTRODUCER': 983182,
'SINGLE SHIFT THREE': 983157,
'SINGLE SHIFT TWO': 983154,
'SINGLE-SHIFT-2': 983155,
'SINGLE-SHIFT-3': 983158,
'SINHALA ARCHAIC DIGIT EIGHT': 70120,
'SINHALA ARCHAIC DIGIT FIVE': 70117,
'SINHALA ARCHAIC DIGIT FOUR': 70116,
'SINHALA ARCHAIC DIGIT NINE': 70121,
'SINHALA ARCHAIC DIGIT ONE': 70113,
'SINHALA ARCHAIC DIGIT SEVEN': 70119,
'SINHALA ARCHAIC DIGIT SIX': 70118,
'SINHALA ARCHAIC DIGIT THREE': 70115,
'SINHALA ARCHAIC DIGIT TWO': 70114,
'SINHALA ARCHAIC NUMBER EIGHTY': 70129,
'SINHALA ARCHAIC NUMBER FIFTY': 70126,
'SINHALA ARCHAIC NUMBER FORTY': 70125,
'SINHALA ARCHAIC NUMBER NINETY': 70130,
'SINHALA ARCHAIC NUMBER ONE HUNDRED': 70131,
'SINHALA ARCHAIC NUMBER ONE THOUSAND': 70132,
'SINHALA ARCHAIC NUMBER SEVENTY': 70128,
'SINHALA ARCHAIC NUMBER SIXTY': 70127,
'SINHALA ARCHAIC NUMBER TEN': 70122,
'SINHALA ARCHAIC NUMBER THIRTY': 70124,
'SINHALA ARCHAIC NUMBER TWENTY': 70123,
'SINHALA CONSONANT SIGN RAKAARAANSAYA': 983922,
'SINHALA CONSONANT SIGN REPAYA': 983923,
'SINHALA CONSONANT SIGN YANSAYA': 983921,
'SINHALA LITH DIGIT EIGHT': 3566,
'SINHALA LITH DIGIT FIVE': 3563,
'SINHALA LITH DIGIT FOUR': 3562,
'SINHALA LITH DIGIT NINE': 3567,
'SINHALA LITH DIGIT ONE': 3559,
'SINHALA LITH DIGIT SEVEN': 3565,
'SINHALA LITH DIGIT SIX': 3564,
'SINHALA LITH DIGIT THREE': 3561,
'SINHALA LITH DIGIT TWO': 3560,
'SINHALA LITH DIGIT ZERO': 3558,
'SIX POINTED PINWHEEL STAR': 128973,
'SIX POINTED STAR WITH MIDDLE DOT': 128303,
'SKI AND SKI BOOT': 127935,
'SKULL': 128128,
'SLANTED NORTH ARROW WITH HOOKED HEAD': 11098,
'SLANTED NORTH ARROW WITH HORIZONTAL TAIL': 11100,
'SLEEPING ACCOMMODATION': 128716,
'SLEEPING FACE': 128564,
'SLEEPING SYMBOL': 128164,
'SLEEPY FACE': 128554,
'SLEUTH OR SPY': 128373,
'SLICE OF PIZZA': 127829,
'SLIGHTLY FROWNING FACE': 128577,
'SLIGHTLY SMILING FACE': 128578,
'SLOT MACHINE': 127920,
'SMALL AIRPLANE': 128745,
'SMALL BLUE DIAMOND': 128313,
'SMALL ORANGE DIAMOND': 128312,
'SMILING CAT FACE WITH HEART-SHAPED EYES': 128571,
'SMILING CAT FACE WITH OPEN MOUTH': 128570,
'SMILING FACE WITH HALO': 128519,
'SMILING FACE WITH HEART-SHAPED EYES': 128525,
'SMILING FACE WITH HORNS': 128520,
'SMILING FACE WITH OPEN MOUTH': 128515,
'SMILING FACE WITH OPEN MOUTH AND COLD SWEAT': 128517,
'SMILING FACE WITH OPEN MOUTH AND SMILING EYES': 128516,
'SMILING FACE WITH OPEN MOUTH AND TIGHTLY-CLOSED EYES': 128518,
'SMILING FACE WITH SMILING EYES': 128522,
'SMILING FACE WITH SUNGLASSES': 128526,
'SMIRKING FACE': 128527,
'SMOKING SYMBOL': 128684,
'SNAIL': 128012,
'SNAKE': 128013,
'SNOW CAPPED MOUNTAIN': 127956,
'SNOWBOARDER': 127938,
'SO': 983077,
'SOFT ICE CREAM': 127846,
'SOFT SHELL FLOPPY DISK': 128428,
'SOH': 983043,
'SOLID QUILT SQUARE ORNAMENT': 128618,
'SOLID QUILT SQUARE ORNAMENT IN BLACK SQUARE': 128619,
'SOON WITH RIGHTWARDS ARROW ABOVE': 128284,
'SORA SOMPENG DIGIT EIGHT': 69880,
'SORA SOMPENG DIGIT FIVE': 69877,
'SORA SOMPENG DIGIT FOUR': 69876,
'SORA SOMPENG DIGIT NINE': 69881,
'SORA SOMPENG DIGIT ONE': 69873,
'SORA SOMPENG DIGIT SEVEN': 69879,
'SORA SOMPENG DIGIT SIX': 69878,
'SORA SOMPENG DIGIT THREE': 69875,
'SORA SOMPENG DIGIT TWO': 69874,
'SORA SOMPENG DIGIT ZERO': 69872,
'SORA SOMPENG LETTER AH': 69858,
'SORA SOMPENG LETTER BAH': 69842,
'SORA SOMPENG LETTER CAH': 69843,
'SORA SOMPENG LETTER DAH': 69844,
'SORA SOMPENG LETTER EEH': 69859,
'SORA SOMPENG LETTER EH': 69863,
'SORA SOMPENG LETTER GAH': 69845,
'SORA SOMPENG LETTER HAH': 69854,
'SORA SOMPENG LETTER IH': 69860,
'SORA SOMPENG LETTER JAH': 69856,
'SORA SOMPENG LETTER KAH': 69855,
'SORA SOMPENG LETTER LAH': 69848,
'SORA SOMPENG LETTER MAE': 69864,
'SORA SOMPENG LETTER MAH': 69846,
'SORA SOMPENG LETTER NAH': 69849,
'SORA SOMPENG LETTER NGAH': 69847,
'SORA SOMPENG LETTER NYAH': 69857,
'SORA SOMPENG LETTER OH': 69862,
'SORA SOMPENG LETTER PAH': 69851,
'SORA SOMPENG LETTER RAH': 69853,
'SORA SOMPENG LETTER SAH': 69840,
'SORA SOMPENG LETTER TAH': 69841,
'SORA SOMPENG LETTER UH': 69861,
'SORA SOMPENG LETTER VAH': 69850,
'SORA SOMPENG LETTER YAH': 69852,
'SOS': 983181,
'SOUTH EAST POINTING BUD': 128611,
'SOUTH EAST POINTING LEAF': 128595,
'SOUTH EAST POINTING VINE LEAF': 128603,
'SOUTH EAST SANS-SERIF ARROW': 129110,
'SOUTH EAST TRIANGLE-HEADED ARROW': 11112,
'SOUTH EAST TRIANGLE-HEADED ARROW TO BAR': 11128,
'SOUTH WEST POINTING BUD': 128609,
'SOUTH WEST POINTING LEAF': 128593,
'SOUTH WEST POINTING VINE LEAF': 128601,
'SOUTH WEST SANS-SERIF ARROW': 129111,
'SOUTH WEST TRIANGLE-HEADED ARROW': 11113,
'SOUTH WEST TRIANGLE-HEADED ARROW TO BAR': 11129,
'SP': 983117,
'SPA': 983176,
'SPAGHETTI': 127837,
'SPARKLES': 10024,
'SPARKLING HEART': 128150,
'SPEAK-NO-EVIL MONKEY': 128586,
'SPEAKER': 128264,
'SPEAKER WITH CANCELLATION STROKE': 128263,
'SPEAKER WITH ONE SOUND WAVE': 128265,
'SPEAKER WITH THREE SOUND WAVES': 128266,
'SPEAKING HEAD IN SILHOUETTE': 128483,
'SPEECH BALLOON': 128172,
'SPEEDBOAT': 128676,
'SPIDER': 128375,
'SPIDER WEB': 128376,
'SPIRAL CALENDAR PAD': 128467,
'SPIRAL NOTE PAD': 128466,
'SPIRAL SHELL': 128026,
'SPLASHING SWEAT SYMBOL': 128166,
'SPORTS MEDAL': 127941,
'SPOUTING WHALE': 128051,
'SQUARE POSITION INDICATOR': 11216,
'SQUARE TARGET': 128918,
'SQUARED CJK UNIFIED IDEOGRAPH-5272': 127545,
'SQUARED CJK UNIFIED IDEOGRAPH-5408': 127540,
'SQUARED CJK UNIFIED IDEOGRAPH-55B6': 127546,
'SQUARED CJK UNIFIED IDEOGRAPH-6708': 127543,
'SQUARED CJK UNIFIED IDEOGRAPH-6709': 127542,
'SQUARED CJK UNIFIED IDEOGRAPH-6E80': 127541,
'SQUARED CJK UNIFIED IDEOGRAPH-7533': 127544,
'SQUARED CJK UNIFIED IDEOGRAPH-7981': 127538,
'SQUARED CJK UNIFIED IDEOGRAPH-7A7A': 127539,
'SQUARED CL': 127377,
'SQUARED COOL': 127378,
'SQUARED FREE': 127379,
'SQUARED ID': 127380,
'SQUARED KATAKANA KOKO': 127489,
'SQUARED KATAKANA SA': 127490,
'SQUARED LATIN CAPITAL LETTER A': 127280,
'SQUARED LATIN CAPITAL LETTER C': 127282,
'SQUARED LATIN CAPITAL LETTER D': 127283,
'SQUARED LATIN CAPITAL LETTER E': 127284,
'SQUARED LATIN CAPITAL LETTER F': 127285,
'SQUARED LATIN CAPITAL LETTER G': 127286,
'SQUARED LATIN CAPITAL LETTER H': 127287,
'SQUARED LATIN CAPITAL LETTER I': 127288,
'SQUARED LATIN CAPITAL LETTER J': 127289,
'SQUARED LATIN CAPITAL LETTER K': 127290,
'SQUARED LATIN CAPITAL LETTER L': 127291,
'SQUARED LATIN CAPITAL LETTER M': 127292,
'SQUARED LATIN CAPITAL LETTER O': 127294,
'SQUARED LATIN CAPITAL LETTER Q': 127296,
'SQUARED LATIN CAPITAL LETTER R': 127297,
'SQUARED LATIN CAPITAL LETTER T': 127299,
'SQUARED LATIN CAPITAL LETTER U': 127300,
'SQUARED LATIN CAPITAL LETTER V': 127301,
'SQUARED LATIN CAPITAL LETTER X': 127303,
'SQUARED LATIN CAPITAL LETTER Y': 127304,
'SQUARED LATIN CAPITAL LETTER Z': 127305,
'SQUARED LOGICAL AND': 10190,
'SQUARED LOGICAL OR': 10191,
'SQUARED NEW': 127381,
'SQUARED NG': 127382,
'SQUARED OK': 127383,
'SQUARED SOS': 127384,
'SQUARED UP WITH EXCLAMATION MARK': 127385,
'SQUARED VS': 127386,
'SQUARED WC': 127311,
'SS2': 983156,
'SS3': 983159,
'SSA': 983133,
'ST': 983189,
'STADIUM': 127967,
'STAMPED ENVELOPE': 128387,
'START OF GUARDED AREA': 983174,
'START OF HEADING': 983042,
'START OF PROTECTED AREA': 983175,
'START OF SELECTED AREA': 983132,
'START OF STRING': 983180,
'START OF TEXT': 983044,
'STATION': 128649,
'STATUE OF LIBERTY': 128509,
'STEAM LOCOMOTIVE': 128642,
'STEAMING BOWL': 127836,
'STENOGRAPHIC FULL STOP': 11836,
'STOCK CHART': 128480,
'STOPWATCH': 9201,
'STRAIGHT RULER': 128207,
'STRAWBERRY': 127827,
'STRING TERMINATOR': 983188,
'STS': 983169,
'STUDIO MICROPHONE': 127897,
'STX': 983045,
'SUB': 983102,
'SUBSTITUTE': 983101,
'SUN WITH FACE': 127774,
'SUNDANESE AVAGRAHA': 7098,
'SUNDANESE CONSONANT SIGN PASANGAN MA': 7084,
'SUNDANESE CONSONANT SIGN PASANGAN WA': 7085,
'SUNDANESE LETTER BHA': 7101,
'SUNDANESE LETTER FINAL K': 7102,
'SUNDANESE LETTER FINAL M': 7103,
'SUNDANESE LETTER LEU': 7100,
'SUNDANESE LETTER REU': 7099,
'SUNDANESE PUNCTUATION BINDU BA SATANGA': 7367,
'SUNDANESE PUNCTUATION BINDU CAKRA': 7363,
'SUNDANESE PUNCTUATION BINDU DA SATANGA': 7366,
'SUNDANESE PUNCTUATION BINDU KA SATANGA': 7365,
'SUNDANESE PUNCTUATION BINDU LEU SATANGA': 7364,
'SUNDANESE PUNCTUATION BINDU PANGLONG': 7361,
'SUNDANESE PUNCTUATION BINDU PURNAMA': 7362,
'SUNDANESE PUNCTUATION BINDU SURYA': 7360,
'SUNDANESE SIGN VIRAMA': 7083,
'SUNFLOWER': 127803,
'SUNRISE': 127749,
'SUNRISE OVER MOUNTAINS': 127748,
'SUNSET OVER BUILDINGS': 127751,
'SURFER': 127940,
'SUSHI': 127843,
'SUSPENSION RAILWAY': 128671,
'SWASH AMPERSAND ORNAMENT': 128629,
'SWIMMER': 127946,
'SYMBOL FOR MARKS CHAPTER': 128325,
'SYN': 983094,
'SYNAGOGUE': 128333,
'SYNCHRONOUS IDLE': 983093,
'SYRIAC SUBLINEAR COLON SKEWED LEFT': 983202,
'SYRINGE': 128137,
'T-SHIRT': 128085,
'TAB': 983061,
'TABLE TENNIS PADDLE AND BALL': 127955,
'TACO': 127790,
'TAKRI DIGIT EIGHT': 71368,
'TAKRI DIGIT FIVE': 71365,
'TAKRI DIGIT FOUR': 71364,
'TAKRI DIGIT NINE': 71369,
'TAKRI DIGIT ONE': 71361,
'TAKRI DIGIT SEVEN': 71367,
'TAKRI DIGIT SIX': 71366,
'TAKRI DIGIT THREE': 71363,
'TAKRI DIGIT TWO': 71362,
'TAKRI DIGIT ZERO': 71360,
'TAKRI LETTER A': 71296,
'TAKRI LETTER AA': 71297,
'TAKRI LETTER AI': 71303,
'TAKRI LETTER AU': 71305,
'TAKRI LETTER BA': 71328,
'TAKRI LETTER BHA': 71329,
'TAKRI LETTER CA': 71311,
'TAKRI LETTER CHA': 71312,
'TAKRI LETTER DA': 71323,
'TAKRI LETTER DDA': 71318,
'TAKRI LETTER DDHA': 71319,
'TAKRI LETTER DHA': 71324,
'TAKRI LETTER E': 71302,
'TAKRI LETTER GA': 71308,
'TAKRI LETTER GHA': 71309,
'TAKRI LETTER HA': 71337,
'TAKRI LETTER I': 71298,
'TAKRI LETTER II': 71299,
'TAKRI LETTER JA': 71313,
'TAKRI LETTER JHA': 71314,
'TAKRI LETTER KA': 71306,
'TAKRI LETTER KHA': 71307,
'TAKRI LETTER LA': 71333,
'TAKRI LETTER MA': 71330,
'TAKRI LETTER NA': 71325,
'TAKRI LETTER NGA': 71310,
'TAKRI LETTER NNA': 71320,
'TAKRI LETTER NYA': 71315,
'TAKRI LETTER O': 71304,
'TAKRI LETTER PA': 71326,
'TAKRI LETTER PHA': 71327,
'TAKRI LETTER RA': 71332,
'TAKRI LETTER RRA': 71338,
'TAKRI LETTER SA': 71336,
'TAKRI LETTER SHA': 71335,
'TAKRI LETTER TA': 71321,
'TAKRI LETTER THA': 71322,
'TAKRI LETTER TTA': 71316,
'TAKRI LETTER TTHA': 71317,
'TAKRI LETTER U': 71300,
'TAKRI LETTER UU': 71301,
'TAKRI LETTER VA': 71334,
'TAKRI LETTER YA': 71331,
'TAKRI SIGN ANUSVARA': 71339,
'TAKRI SIGN NUKTA': 71351,
'TAKRI SIGN VIRAMA': 71350,
'TAKRI SIGN VISARGA': 71340,
'TAKRI VOWEL SIGN AA': 71341,
'TAKRI VOWEL SIGN AI': 71347,
'TAKRI VOWEL SIGN AU': 71349,
'TAKRI VOWEL SIGN E': 71346,
'TAKRI VOWEL SIGN I': 71342,
'TAKRI VOWEL SIGN II': 71343,
'TAKRI VOWEL SIGN O': 71348,
'TAKRI VOWEL SIGN U': 71344,
'TAKRI VOWEL SIGN UU': 71345,
'TAMIL CONSONANT C': 983633,
'TAMIL CONSONANT H': 983653,
'TAMIL CONSONANT J': 983649,
'TAMIL CONSONANT K': 983631,
'TAMIL CONSONANT KSS': 983654,
'TAMIL CONSONANT L': 983643,
'TAMIL CONSONANT LL': 983646,
'TAMIL CONSONANT LLL': 983645,
'TAMIL CONSONANT M': 983640,
'TAMIL CONSONANT N': 983638,
'TAMIL CONSONANT NG': 983632,
'TAMIL CONSONANT NN': 983636,
'TAMIL CONSONANT NNN': 983648,
'TAMIL CONSONANT NY': 983634,
'TAMIL CONSONANT P': 983639,
'TAMIL CONSONANT R': 983642,
'TAMIL CONSONANT RR': 983647,
'TAMIL CONSONANT S': 983652,
'TAMIL CONSONANT SH': 983650,
'TAMIL CONSONANT SS': 983651,
'TAMIL CONSONANT T': 983637,
'TAMIL CONSONANT TT': 983635,
'TAMIL CONSONANT V': 983644,
'TAMIL CONSONANT Y': 983641,
'TAMIL SYLLABLE CAA': 983677,
'TAMIL SYLLABLE CAI': 983684,
'TAMIL SYLLABLE CAU': 983687,
'TAMIL SYLLABLE CE': 983682,
'TAMIL SYLLABLE CEE': 983683,
'TAMIL SYLLABLE CI': 983678,
'TAMIL SYLLABLE CII': 983679,
'TAMIL SYLLABLE CO': 983685,
'TAMIL SYLLABLE COO': 983686,
'TAMIL SYLLABLE CU': 983680,
'TAMIL SYLLABLE CUU': 983681,
'TAMIL SYLLABLE HAA': | |
<reponame>MishaRubanov/SchulmanLab<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 15:12:23 2020
@author: sscha
"""
import numpy as np
import copy
import scipy.integrate as spi
from sympy import symbols, Matrix, Transpose, matrix_multiply_elementwise as MME
import networkx as nx
import matplotlib.pyplot as plt
import sys
import xlrd
'''
###############################################################################
general genelet model functions (helper functions used within GeneletNetwork)
###############################################################################
'''
# function to assemble connectivity matrices
def con_vec2mat(con_vec,ortho_nodes,ind_nodes):
'''
This function converts connectivity vectors into connectivity matrices
'''
con_mat = np.zeros([ortho_nodes,ind_nodes], dtype = int)
for n in range(len(con_vec)):
if con_vec[n] != 0:
con_mat[con_vec[n]-1,n] = 1
return con_mat
# function to assemble production matrices
def prod_vec2mat(prod_vec,ortho_nodes,ind_nodes):
Cprod_mat = np.zeros([ortho_nodes,ind_nodes], dtype = int)
Rprod_mat = np.zeros([ortho_nodes,ind_nodes], dtype = int)
for n in range(len(prod_vec)):
if prod_vec[n] < 0:
Rprod_mat[-prod_vec[n]-1,n] = 1
elif prod_vec[n] > 0:
Cprod_mat[prod_vec[n]-1,n] = 1
return Cprod_mat, Rprod_mat
# function to create a topology matrix for topology plotting
def to_topology_mat(act_vec,prod_vec):
ortho_nodes = max(act_vec)
ind_nodes = len(act_vec)
topology_mat = [[0]*ortho_nodes for i in range(ortho_nodes)]
for i in range(ind_nodes):
if prod_vec[i] > 0:
topology_mat[abs(prod_vec[i])-1][act_vec[i]-1] = 1
elif prod_vec[i] < 0:
topology_mat[abs(prod_vec[i])-1][act_vec[i]-1] = -1
return topology_mat
# function to create act_vec and prod_vec (of indc_vec) from a topology mat
def from_topology_mat(topology_mat,ind_n):
k = 0
act_vec =[len(topology_mat[0])]*ind_n
prod_vec = [0]*ind_n
for i in range(len(topology_mat[0])):
for j in range(len(topology_mat[0])):
tmv = topology_mat[j][i]
if tmv == 1:
prod_vec[k]=(j+1)
act_vec[k]=(i+1)
k += 1
if tmv == -1:
prod_vec[k]=-(j+1)
act_vec[k]=(i+1)
k += 1
if (j+1) == len(topology_mat[0]) and sum(abs(np.array(topology_mat)[:,i]))==0:
k += 1
return act_vec, prod_vec
# function to create genelet initial conditions
def int_genelet_states(G_int_vec,G_tot,act_vec,dA_tot):
GdAin = np.zeros(len(G_int_vec), dtype = int)
GdBin = np.zeros(len(G_int_vec), dtype = int)
dAin = copy.deepcopy(dA_tot)
act_vec = np.array(act_vec)
for n in range(len(G_int_vec)):
# where there is more activator than total genelet of that activator
if G_int_vec[n] == 1 and dAin[act_vec[n]-1] >= sum(G_tot[act_vec==act_vec[n]]):
GdAin[n] = G_tot[n]
dAin[act_vec[n]-1] = dAin[act_vec[n]-1] - G_tot[n]
# where there is less activator than total genelet
elif G_int_vec[n] == 1 and dAin[act_vec[n]-1] < sum(G_tot[act_vec==act_vec[n]]):
# currently not sure how to handle this so an error will be printed
print('Warning: Less activator than genelets!!!')
'''GdAin[n] = dAin[act_vec[n]-1]/sum(act_vec==act_vec[n])
dAin[act_vec[n]-1] = 0'''
# blocked genelets
# (excess blocker concentrations are handled in the simulation script)
elif G_int_vec[n] == -1:
GdBin[n] = G_tot[n]
return GdAin, GdBin, dAin
# function to determine reverse complement (rc), reverse (r), or complement (c) of a nucleotide sequence
def rev_comp_seq(seq,spec_out='rc'):
new_seq = '3'+seq+'5'
new_seq = list(new_seq)
ds = ''
for i in range(len(seq)):
if seq[i].lower()=='g':
new_seq[i+1] = 'C'
elif seq[i].lower()=='c':
new_seq[i+1] = 'G'
elif seq[i].lower()=='t':
new_seq[i+1] = 'A'
elif seq[i].lower()=='a':
new_seq[i+1] = 'T'
if spec_out.lower() == 'rc':
out_seq = ds.join(new_seq[::-1])
elif spec_out.lower() == 'c':
out_seq=ds.join(new_seq)
elif spec_out.lower() == 'r':
out_seq = '3'+seq[::-1]+'5'
return out_seq
def xlsheet_to_dict(input_file_loc,sheet_name,ncol):
seq_workbook = xlrd.open_workbook(input_file_loc)
sheet = seq_workbook.sheet_by_name(sheet_name)
sheet_dict = {}
dv = [[0]*ncol for i in range(0,sheet.nrows)]
for ri in range(0,sheet.nrows):
dk=sheet.cell_value(ri,0)
a = 0
for ci in range(0,ncol):
dv[ri][a]=sheet.cell_value(ri,ci+1)
a += 1
sheet_dict.update({dk : dv[ri]})
return sheet_dict
# function containing the ODEs for the Kim and Winfree genelets (KWG)
def general_genelet_eqs(t,x,ortho_nodes,ind_nodes,dA_tot,dB_tot,G_tot, \
kpr,kpc,kpi,kd_H,kd_A,kga,kgar,kar,kgb,kgab,kgbc,kbc,kir, \
act_mat,rep_mat,blk_mat,ca_mat,Cprod_mat,Rprod_mat, \
Cindc_mat,Rindc_mat,RnH,RnA,leak):
k = ortho_nodes
g = ind_nodes
# defining species for convenience
rR = x[0:k] # repressors
dA = x[k:2*k] # activators
rC = x[2*k:3*k] # coactivators
dB = x[3*k:4*k] # blockers
GdA = x[4*k:4*k+g] # genelet:activator complexes
GdB = x[4*k+g:4*k+2*g] # genelet:blocker complexes
rIr = x[4*k+2*g:4*k+2*g+k] # repressor inducers
rIc = x[4*k+2*g+k:4*k+2*g+2*k] # coactivator inducers
dR = x[4*k+2*g+2*k:4*k+2*g+3*k] # DNA repressors
dAdR = x[4*k+2*g+3*k:4*k+2*g+4*k] # DNA repressor:DNA activator complexes which cannot be degraded
# mass balances to find other concentrations
dArR = dA_tot - dA - dAdR - np.matmul(act_mat,GdA) # activator:repressor complexes
dBrC = dB_tot - dB - np.matmul(blk_mat,GdB) # blocker:coactivator complexes
G = G_tot - GdA - GdB # OFF genelets
# Rate equations:
# repressors
dRdt = -kgar*(rep_mat@GdA)*rR - kar*dA*rR + Rprod_mat@(kpr*GdA) + Rprod_mat@(leak*kpr*GdB) - kir*rR*rIr - RnA*kd_A*rR
# activators
dAdt = RnH*kd_H*dArR+ RnA*kd_A*dArR - kga*(act_mat@G)*dA - kar*dA*rR + kgab*(blk_mat@GdA)*dB - kar*dA*dR
# coactivators
dCdt = -kgbc*(ca_mat@GdB)*rC - kbc*dB*rC + Cprod_mat@(kpc*GdA) + Cprod_mat@(leak*kpc*GdB) - kir*rC*rIc - RnA*kd_A*rC
# blockers
dBdt = RnH*kd_H*dBrC + RnA*kd_A*dBrC - kgb*(blk_mat@G)*dB - kbc*dB*rC - kgab*(blk_mat@GdA)*dB
# G (ON)
dGondt = (act_mat.T@(kga*dA))*G - (rep_mat.T@(kgar*rR))*GdA - (blk_mat.T@(kgab*dB))*GdA - (rep_mat.T@(kgar*dR))*GdA
#G (BLK)
dGblkdt = (blk_mat.T@(kgb*dB))*G + (blk_mat.T@(kgab*dB))*GdA - (ca_mat.T@(kgbc*rC))*GdB
# repressor inducers
dIrdt = Rindc_mat@(kpi*GdA) + Rindc_mat@(leak*kpi*GdB) - kir*rR*rIr - RnA*kd_A*rIr
# coactivator inducers
dIcdt = Cindc_mat@(kpi*GdA) + Cindc_mat@(leak*kpi*GdB) - kir*rC*rIc - RnA*kd_A*rIc
# DNA repressor
ddRdt = - kar*dA*dR + kgar*(rep_mat@GdA)*dR
# DNA repressor:DNA activator complexes
ddAdRdt = kar*dA*dR + kgar*(rep_mat@GdA)*dR
return np.concatenate([dRdt,dAdt,dCdt,dBdt,dGondt,dGblkdt,dIrdt,dIcdt,ddRdt,ddAdRdt])
# function containing the ODEs for spatiotemporal genelets (STG)
def spatial_genelet_eqs(t,x,ortho_nodes,ind_nodes,dA_tot,G_tot, \
kpr,kpi,kd_H,kd_A,kd_Hg,kd_Ag,kgar,kar,kir, \
act_mat,rep_mat,Rprod_mat,Rindc_mat,RnH,RnA,leak):
k = ortho_nodes
g = ind_nodes
# defining species for convenience
rR = x[0:k] # repressors
dA = x[k:2*k] # activators
Gon = x[2*k:2*k+g] # active genelets complexes
rIr = x[2*k+g:2*k+g+k] # repressor inducers
# mass balances to find other concentrations
dArR = dA_tot - dA # free activator:repressor complexes
GrR = G_tot - Gon # OFF genelets
# Rate equations:
# repressors
dRdt = -kgar*(rep_mat@Gon)*rR - kar*dA*rR + Rprod_mat@(kpr*Gon) + Rprod_mat@(leak*kpr*GrR) - kir*rR*rIr - RnA*kd_A*rR
# activators
dAdt = RnH*kd_H*dArR + RnA*kd_A*dArR - kar*dA*rR
# G (ON)
dGondt = RnH*kd_Hg*GrR + RnA*kd_Ag*GrR - (rep_mat.T@(kgar*rR))*Gon
# repressor inducers
dIrdt = Rindc_mat@(kpi*Gon) + Rindc_mat@(leak*kpi*GrR) - kir*rR*rIr - RnA*kd_A*rIr
return np.concatenate([dRdt,dAdt,dGondt,dIrdt])
'''
###############################################################################
GeneletNetwork CLASS DEFINITION
###############################################################################
'''
class GeneletNetwork:
def __init__(self,act_vec='req1',prod_vec='req1',indc_vec=[],blk_vec=[],top_mat='req2',Itop_mat=[],genelet_type='KWG'):
self.genelet_type = genelet_type
self.exit = 0
# if you don't use topology matrix notation you have to input at least
# act_vec and prod_vec
if top_mat == 'req2':
if act_vec == 'req1' or prod_vec == 'req1':
print(" Need inputs: 'act_vec' and 'prod_vec' or 'top_mat' ")
else:
self.topology_mat = to_topology_mat(act_vec,prod_vec)
ortho_nodes = max(act_vec)
ind_nodes = len(act_vec)
if indc_vec == []:
indc_vec=np.zeros(ind_nodes, dtype = int)
if Itop_mat == []:
self.I_topology_mat = to_topology_mat(act_vec,indc_vec)
# if you use topology matrix notation you have to input topology matrix
# if the inducer topology matrix is not supplied it is assumed to be all 0s
elif top_mat != 'req2':
self.topology_mat = top_mat
ortho_nodes = len(top_mat[0])
if Itop_mat == []:
Itop_mat = [[0]*ortho_nodes for a in range(ortho_nodes)]
self.I_topology_mat = Itop_mat
else:
self.I_topology_mat = Itop_mat
# counting total number of unique RNA's produced
np_top_mat = np.array(top_mat)
np_Itop_mat = np.array(Itop_mat)
ind_nodes = ortho_nodes # initialization
# finding the total ind nodes, each column that has more than a single
# 1 in it produces more than a single RNA
for i in range(ortho_nodes):
if sum(abs(np_top_mat[:,i]))+sum(abs(np_Itop_mat[:,i])) > 1:
ind_nodes += sum(abs(np_top_mat[:,i]))+sum(abs(np_Itop_mat[:,i]))-1
# create act_vec and prod_vec from top_mat
act_vec,prod_vec = from_topology_mat(top_mat,ind_nodes)
# create indc_vec from Itop_mat
_,indc_vec = from_topology_mat(Itop_mat,ind_nodes)
# computing everything else now that we know the topology
self.act_vec = act_vec
self.ortho_nodes = ortho_nodes
self.ind_nodes = ind_nodes
if blk_vec == []:
# if the blk vector is not included as an input then this vector will
# be populated assuming that any nodes that have coactivators coming to them
# have blockers and those without coactivators do not
c = 0
np_top_mat = np.array(self.topology_mat)
blk_vec=np.zeros(self.ind_nodes, dtype = int)
for i in range(self.ortho_nodes):
np_mat = np.array(self.topology_mat[i])
if sum(abs(np_mat))==0:
# node with no connections to it will no have blocker
if sum(abs(np_top_mat[:,i])) == 0:
c = c + 1
for j in range(sum(abs(np_top_mat[:,i]))):
c = c + 1
elif any(np_mat==1):
# If a coactivator is produced for a node then it will be blocked initially
if sum(abs(np_top_mat[:,i])) == 0:
blk_vec[c] = i+1
c = c + 1
for j in range(sum(abs(np_top_mat[:,i]))):
blk_vec[c] = i+1
c = c + 1
elif sum(np_mat)<0 and all(np_mat!=1):
# nodes that are only repressed will no have blockers
if sum(abs(np_top_mat[:,i])) == 0:
| |
element
Number of such [0,1,2,3] elements is equal to the number of defined
conductor cells in geometry
'''
# Let's check the size of the array
elementsInY = inputArray.shape[0]
elementsInX = inputArray.shape[1]
# lets define the empty vectorArray
vectorArray = []
# lets go for each input array position and check if is set
# and if yes then put it into putput vectorArray
for Row in range(elementsInY):
for Col in range(elementsInX):
if inputArray[Row][Col] == phaseNumber:
# Let's calculate the X and Y coordinates
coordinateY = (0.5 + Row) * dYmm
coordinateX = (0.5 + Col) * dXmm
vectorArray.append([Row, Col, coordinateX, coordinateY])
return np.array(vectorArray)
# Functions that calculate the master impedance array for given geometry
def n_getImpedanceArray(distanceArray, freq, dXmm, dYmm, lenght=1000, temperature=20, sigma20C=58e6, temCoRe=3.9e-3):
'''
Calculate the array of impedance as complex values for each element
Input:
distanceArray - array of distances beetween the elements in [mm]
freq = frequency in Hz
dXmm - size of element in x [mm]
dYmm - size of element in Y [mm]
lenght - analyzed lenght in [mm] /default= 1000mm
temperature - temperature of the conductors in deg C / defoult = 20degC
sigma20C - conductivity of conductor material in 20degC in [S] / default = 58MS (copper)
temCoRe - temperature resistance coeficcient / default is copper
'''
omega = 2*np.pi*freq
impedanceArray = np.zeros((distanceArray.shape), dtype=np.complex_)
for X in range(distanceArray.shape[0]):
for Y in range(distanceArray.shape[0]):
if X == Y:
impedanceArray[Y, X] = n_getResistance(sizeX=dXmm, sizeY=dYmm, lenght=lenght, temp=temperature, sigma20C=sigma20C,
temCoRe=temCoRe) + 1j*omega*n_getSelfInductance(sizeX=dXmm, sizeY=dYmm, lenght=lenght)
else:
impedanceArray[Y, X] = 1j*omega*n_getMutualInductance(
sizeX=dXmm, sizeY=dYmm, lenght=lenght, distance=distanceArray[Y, X])
# For debug
# print(impedanceArray)
#
return impedanceArray
# Function for calculating resistance array
def n_getResistanceArray(elementsVector, dXmm, dYmm, lenght=1000, temperature=20, sigma20C=58e6, temCoRe=3.9e-3):
'''
Calculate the array of resistance values for each element
Input:
elementsVector - The elements vector as delivered by arrayVectorize
dXmm - size of element in x [mm]
dYmm - size of element in Y [mm]
lenght - analyzed lenght in [mm] /default= 1000mm
temperature - temperature of the conductors in deg C / defoult = 20degC
sigma20C - conductivity of conductor material in 20degC in [S] / default = 58MS (copper)
temCoRe - temperature resistance coeficcient / default is copper
'''
resistanceArray = np.zeros(elementsVector.shape[0])
for element in range(elementsVector.shape[0]):
resistanceArray[element] = n_getResistance(
sizeX=dXmm, sizeY=dYmm, lenght=lenght, temp=temperature, sigma20C=sigma20C, temCoRe=temCoRe)
# for debug
# print(resistanceArray)
#
return resistanceArray
# Function that increase the resolution of the main geometry array
def n_arraySlicer(inputArray, subDivisions=2):
'''
This function increase the resolution of the cross section array
inputArray - oryginal geometry matrix
subDivisions - number of subdivisions / factor of increase of resoluttion / default = 2
'''
return inputArray.repeat(subDivisions, axis=0).repeat(subDivisions, axis=1)
# Functions that calculate module of complex number
def n_getComplexModule(x):
'''
returns the module of complex number
input: x - complex number
if not a complex number is given as parameter then it return the x diretly
'''
if isinstance(x, complex):
return np.sqrt(x.real**2 + x.imag**2)
else:
return x
# Canvas preparation procedure
def n_checkered(canvas, cutsX, cutsY, mode=0):
'''
This function clean the board and draw grid
Inputs:
canvas - tkinter canvas object
cutsX - elements in X (left right) direction
cutsY - elements in Y (top down) direction
'''
# Reading the size of the canvas element
canvasHeight = canvas.winfo_height()
canvasWidth = canvas.winfo_width()
line_distanceX = (canvasWidth / cutsX)
line_distanceY = (canvasHeight / cutsY)
# Cleaning up the whole canvas space by drawing a white rectangle
if mode == 0 or mode == 1:
canvas.create_rectangle(
0, 0, canvasWidth, canvasHeight, fill="white", outline="gray")
# vertical lines at an interval of "line_distance" pixel
# some limits added - we dont draw it if the line amout is to big
# it would be mess anyway if too much
if max(cutsX, cutsY) <= 100 and mode == 0 or max(cutsX, cutsY) <= 100 and mode == 2:
for x in range(0, cutsX):
canvas.create_line(x*line_distanceX, 0, x *
line_distanceX, canvasHeight, fill="gray")
# horizontal lines at an interval of "line_distance" pixel
for y in range(0, cutsY):
canvas.create_line(0, y*line_distanceY, canvasWidth,
y*line_distanceY, fill="gray")
# previous implementation - i think too much
# for x in range(0,canvasWidth):
# canvas.create_line(x*line_distanceX, 0, x*line_distanceX, canvasHeight, fill="gray")
# # horizontal lines at an interval of "line_distance" pixel
# for y in range(0,canvasHeight):
# canvas.create_line(0, y*line_distanceY, canvasWidth, y*line_distanceY, fill="gray")
# Procedure that plot the array to canvas
def n_printTheArray(dataArray, canvas):
'''
This procedure allows to print the array back to the graphical board
usefull for redraw or draw loaded data
Inputs:
dataArray - the array to display on canvas
canvas - tkinter canvas object
'''
global canvasElements
# Let's check the size
elementsInY = dataArray.shape[0]
elementsInX = dataArray.shape[1]
# Now we calculate the propper dX and dY for this array
canvasHeight = canvas.winfo_height()
canvasWidth = canvas.winfo_width()
dX = canvasWidth / elementsInX
dY = canvasHeight / elementsInY
# protection for backward compatibility
# & cleaning stuff
for graphElement in canvasElements:
try:
print(graphElement)
canvas.delete(graphElement)
except:
print("Error in removing stuff")
pass
canvasElements = []
colorList = ["red", "green", "blue"]
for Row in range(elementsInY):
for Col in range(elementsInX):
theNumber = int(dataArray[Row][Col])
if theNumber in [1,2,3]:
fillColor = colorList[theNumber-1]
canvasElements.append(canvas.create_rectangle(
(Col)*dX, (Row)*dY, (Col)*dX+dX, (Row)*dY+dY, fill=fillColor, outline=""))
# elif dataArray[Row][Col] == 2:
# canvas.create_rectangle(
# (Col)*dX, (Row)*dY, (Col)*dX+dX, (Row)*dY+dY, fill=fillColor, outline="")
# elif dataArray[Row][Col] == 3:
# canvas.create_rectangle(
# (Col)*dX, (Row)*dY, (Col)*dX+dX, (Row)*dY+dY, fill=fillColor, outline="")
# n_checkered(canvas, elementsInX, elementsInY, mode=2)
# Procedure that plot the array to canvas
def n_printTheArrayOld(dataArray, canvas):
'''
This procedure allows to print the array back to the graphical board
usefull for redraw or draw loaded data
Inputs:
dataArray - the array to display on canvas
canvas - tkinter canvas object
'''
# Let's check the size
elementsInY = dataArray.shape[0]
elementsInX = dataArray.shape[1]
# Now we calculate the propper dX and dY for this array
canvasHeight = canvas.winfo_height()
canvasWidth = canvas.winfo_width()
dX = canvasWidth / elementsInX
dY = canvasHeight / elementsInY
# Now we cleanUp the field
n_checkered(canvas, elementsInX, elementsInY, mode=1)
for Row in range(elementsInY):
for Col in range(elementsInX):
if dataArray[Row][Col] == 1:
fillColor = "red"
canvas.create_rectangle(
(Col)*dX, (Row)*dY, (Col)*dX+dX, (Row)*dY+dY, fill=fillColor, outline="")
elif dataArray[Row][Col] == 2:
fillColor = "green"
canvas.create_rectangle(
(Col)*dX, (Row)*dY, (Col)*dX+dX, (Row)*dY+dY, fill=fillColor, outline="")
elif dataArray[Row][Col] == 3:
fillColor = "blue"
canvas.create_rectangle(
(Col)*dX, (Row)*dY, (Col)*dX+dX, (Row)*dY+dY, fill=fillColor, outline="")
n_checkered(canvas, elementsInX, elementsInY, mode=2)
# Procedure to set up point in the array and display it on canvas
def n_setUpPoint(event, Set, dataArray, canvas):
'''
This procedure track the mouse position from event ad setup or reset propper element
in the cross section array
Inputs
event - the event object from tkinter that create the point (or reset)
Set - Number of phase to set or 0 to reset
dataArray - the array that keeps the cross section design data
canvas - tk inter canvas object
'''
# gathering some current data
elementsInY = dataArray.shape[0]
elementsInX = dataArray.shape[1]
canvasHeight = canvas.winfo_height()
canvasWidth = canvas.winfo_width()
dX = canvasWidth / elementsInX
dY = canvasHeight / elementsInY
Col = int(event.x/dX)
Row = int(event.y/dY)
if event.x < canvasWidth and event.y < canvasHeight and event.x > 0 and event.y > 0:
inCanvas = True
else:
inCanvas = False
if Set != 0 and inCanvas:
actualPhase = Set
if actualPhase == 3:
canvas.create_rectangle(
Col*dX, Row*dY, Col*dX+dX, Row*dY+dY, fill="blue", outline="gray")
dataArray[Row][Col] = 3
elif actualPhase == 2:
canvas.create_rectangle(
Col*dX, Row*dY, Col*dX+dX, Row*dY+dY, fill="green", outline="gray")
dataArray[Row][Col] = 2
else:
canvas.create_rectangle(
Col*dX, Row*dY, Col*dX+dX, Row*dY+dY, fill="red", outline="gray")
dataArray[Row][Col] = 1
elif Set == 0 and inCanvas:
canvas.create_rectangle(Col*dX, Row*dY, Col *
dX+dX, Row*dY+dY, fill="white", outline="gray")
dataArray[Row][Col] = 0
# Function that put back together the solution vectr back to represent the crss section shape array
def n_recreateresultsArray(elementsVector, resultsVector, initialGeometryArray):
'''
Functions returns recreate cross section array with mapperd solution results
Inputs:
elementsVector - vector of crossection elements as created by the n_arrayVectorize
resultsVector - vectr with results values calculated base on the elementsVector
initialGeometryArray - the array that contains the cross section geometry model
'''
localResultsArray = np.zeros((initialGeometryArray.shape), dtype=float)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: keycloak_client
short_description: Allows administration of Keycloak clients via Keycloak API
version_added: "2.5"
description:
- This module allows the administration of Keycloak clients via the Keycloak REST API. It
requires access to the REST API via OpenID Connect; the user connecting and the client being
used must have the requisite access rights. In a default Keycloak installation, admin-cli
and an admin user would work, as would a separate client definition with the scope tailored
to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the
Keycloak API and its documentation at U(http://www.keycloak.org/docs-api/3.3/rest-api/).
Aliases are provided so camelCased versions can be used as well.
- The Keycloak API does not always sanity check inputs e.g. you can set
SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
If you do not specify a setting, usually a sensible default is chosen.
options:
state:
description:
- State of the client
- On C(present), the client will be created (or updated if it exists already).
- On C(absent), the client will be removed if it exists
choices: ['present', 'absent']
default: 'present'
realm:
description:
- The realm to create the client in.
client_id:
description:
- Client id of client to be worked on. This is usually an alphanumeric name chosen by
you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
This is 'clientId' in the Keycloak REST API.
aliases:
- clientId
id:
description:
- Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
is required. If you specify both, this takes precedence.
name:
description:
- Name of the client (this is not the same as I(client_id))
description:
description:
- Description of the client in Keycloak
root_url:
description:
- Root URL appended to relative URLs for this client
This is 'rootUrl' in the Keycloak REST API.
aliases:
- rootUrl
admin_url:
description:
- URL to the admin interface of the client
This is 'adminUrl' in the Keycloak REST API.
aliases:
- adminUrl
base_url:
description:
- Default URL to use when the auth server needs to redirect or link back to the client
This is 'baseUrl' in the Keycloak REST API.
aliases:
- baseUrl
enabled:
description:
- Is this client enabled or not?
type: bool
client_authenticator_type:
description:
- How do clients authenticate with the auth server? Either C(client-secret) or
C(client-jwt) can be chosen. When using C(client-secret), the module parameter
I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
to configure its behavior.
This is 'clientAuthenticatorType' in the Keycloak REST API.
choices: ['client-secret', 'client-jwt']
aliases:
- clientAuthenticatorType
secret:
description:
- When using I(client_authenticator_type) C(client-secret) (the default), you can
specify a secret here (otherwise one will be generated if it does not exit). If
changing this secret, the module will not register a change currently (but the
changed secret will be saved).
registration_access_token:
description:
- The registration access token provides access for clients to the client registration
service.
This is 'registrationAccessToken' in the Keycloak REST API.
aliases:
- registrationAccessToken
default_roles:
description:
- list of default roles for this client. If the client roles referenced do not exist
yet, they will be created.
This is 'defaultRoles' in the Keycloak REST API.
aliases:
- defaultRoles
redirect_uris:
description:
- Acceptable redirect URIs for this client.
This is 'redirectUris' in the Keycloak REST API.
aliases:
- redirectUris
web_origins:
description:
- List of allowed CORS origins.
This is 'webOrigins' in the Keycloak REST API.
aliases:
- webOrigins
not_before:
description:
- Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
This is 'notBefore' in the Keycloak REST API.
aliases:
- notBefore
bearer_only:
description:
- The access type of this client is bearer-only.
This is 'bearerOnly' in the Keycloak REST API.
aliases:
- bearerOnly
type: bool
consent_required:
description:
- If enabled, users have to consent to client access.
This is 'consentRequired' in the Keycloak REST API.
aliases:
- consentRequired
type: bool
standard_flow_enabled:
description:
- Enable standard flow for this client or not (OpenID connect).
This is 'standardFlowEnabled' in the Keycloak REST API.
aliases:
- standardFlowEnabled
type: bool
implicit_flow_enabled:
description:
- Enable implicit flow for this client or not (OpenID connect).
This is 'implicitFlowEnabled' in the Keycloak REST API.
aliases:
- implicitFlowEnabled
type: bool
direct_access_grants_enabled:
description:
- Are direct access grants enabled for this client or not (OpenID connect).
This is 'directAccessGrantsEnabled' in the Keycloak REST API.
aliases:
- directAccessGrantsEnabled
type: bool
service_accounts_enabled:
description:
- Are service accounts enabled for this client or not (OpenID connect).
This is 'serviceAccountsEnabled' in the Keycloak REST API.
aliases:
- serviceAccountsEnabled
type: bool
authorization_services_enabled:
description:
- Are authorization services enabled for this client or not (OpenID connect).
This is 'authorizationServicesEnabled' in the Keycloak REST API.
aliases:
- authorizationServicesEnabled
type: bool
public_client:
description:
- Is the access type for this client public or not.
This is 'publicClient' in the Keycloak REST API.
aliases:
- publicClient
type: bool
frontchannel_logout:
description:
- Is frontchannel logout enabled for this client or not.
This is 'frontchannelLogout' in the Keycloak REST API.
aliases:
- frontchannelLogout
type: bool
protocol:
description:
- Type of client (either C(openid-connect) or C(saml).
choices: ['openid-connect', 'saml']
full_scope_allowed:
description:
- Is the "Full Scope Allowed" feature set for this client or not.
This is 'fullScopeAllowed' in the Keycloak REST API.
aliases:
- fullScopeAllowed
type: bool
node_re_registration_timeout:
description:
- Cluster node re-registration timeout for this client.
This is 'nodeReRegistrationTimeout' in the Keycloak REST API.
aliases:
- nodeReRegistrationTimeout
registered_nodes:
description:
- dict of registered cluster nodes (with C(nodename) as the key and last registration
time as the value).
This is 'registeredNodes' in the Keycloak REST API.
aliases:
- registeredNodes
client_template:
description:
- Client template to use for this client. If it does not exist this field will silently
be dropped.
This is 'clientTemplate' in the Keycloak REST API.
aliases:
- clientTemplate
use_template_config:
description:
- Whether or not to use configuration from the I(client_template).
This is 'useTemplateConfig' in the Keycloak REST API.
aliases:
- useTemplateConfig
type: bool
use_template_scope:
description:
- Whether or not to use scope configuration from the I(client_template).
This is 'useTemplateScope' in the Keycloak REST API.
aliases:
- useTemplateScope
type: bool
use_template_mappers:
description:
- Whether or not to use mapper configuration from the I(client_template).
This is 'useTemplateMappers' in the Keycloak REST API.
aliases:
- useTemplateMappers
type: bool
surrogate_auth_required:
description:
- Whether or not surrogate auth is required.
This is 'surrogateAuthRequired' in the Keycloak REST API.
aliases:
- surrogateAuthRequired
type: bool
authorization_settings:
description:
- a data structure defining the authorization settings for this client. For reference,
please see the Keycloak API docs at U(http://www.keycloak.org/docs-api/3.3/rest-api/index.html#_resourceserverrepresentation).
This is 'authorizationSettings' in the Keycloak REST API.
aliases:
- authorizationSettings
protocol_mappers:
description:
- a list of dicts defining protocol mappers for this client.
This is 'protocolMappers' in the Keycloak REST API.
aliases:
- protocolMappers
suboptions:
consentRequired:
description:
- Specifies whether a user needs to provide consent to a client for this mapper to be active.
consentText:
description:
- The human-readable name of the consent the user is presented to accept.
id:
description:
- Usually a UUID specifying the internal ID of this protocol mapper instance.
name:
description:
- The name of this protocol mapper.
protocol:
description:
- This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper
is active.
choices: ['openid-connect', 'saml']
protocolMapper:
description:
- The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
impossible to provide since this may be extended | |
source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
# x = self.embed_scale * self.embed_tokens(src_tokens)
# if self.embed_positions is not None:
# x += self.embed_positions(src_tokens)
if self.bert_backprop or self.no_bert_precompute:
# extract roberta on the fly
last_layer = self.roberta.extract_features(src_wordpieces)
# remove sentence start
bsize, max_len, emb_size = last_layer.shape
mask = (src_wordpieces != 0).unsqueeze(2).expand(last_layer.shape)
last_layer = last_layer[mask].view((bsize, max_len - 1, emb_size))
# remove sentence end
last_layer = last_layer[:, :-1, :]
# apply scatter, src_wp2w was inverted in pre-processing to use
# scatter's left side padding . We need to flip the result.
source_fix_emb2 = scatter_mean(
last_layer,
src_wp2w.unsqueeze(2),
dim=1
)
source_fix_emb2 = source_fix_emb2.flip(1)
# Remove extra padding
source_fix_emb2 = source_fix_emb2[:, -src_tokens.shape[1]:, :]
# do not backprop for on-the-fly computing
if self.no_bert_precompute:
bert_embeddings = source_fix_emb2.detach()
else:
bert_embeddings = source_fix_emb2
# DEBUG: check precomputed and on the fly sufficiently close
# abs(source_fix_emb2 - source_fix_emb).max()
else:
# use pre-extracted roberta
bert_embeddings = source_fix_emb
x = self.subspace(bert_embeddings)
if self.args.apply_tgt_input_src and self.args.tgt_input_src_emb == 'bot':
src_embs = x # size (B, T, C)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.layer_norm(x)
if self.args.apply_tgt_input_src:
if self.args.tgt_input_src_emb == 'top':
src_embs = x.transpose(0, 1)
elif self.args.tgt_input_src_emb == 'raw':
src_embs = bert_embeddings
elif self.args.tgt_input_src_emb == 'bot':
pass # already dealt with above
else:
raise NotImplementedError
else:
src_embs = None
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
'src_embs': src_embs, # B x T x C
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if encoder_out['src_embs'] is not None:
encoder_out['src_embs'] = encoder_out['src_embs'].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
# controls the use of stack transformer
self.encode_state_machine = args.encode_state_machine
if self.encode_state_machine:
# positions of buffer and stack for each time step
self.embed_stack_positions = PositionalEmbedding(
args.max_target_positions, args.decoder_embed_dim,
padding_idx, learned=args.decoder_learned_pos,
)
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
# copying the arguments for the separate model in decoding to use
self.args = args
# target input to include source token embeddings
if self.args.apply_tgt_input_src:
assert self.args.tgt_input_src_emb != 'raw', 'Not implemented yet'
if self.args.tgt_input_src_combine == 'cat':
self.combine_src_embs = Linear(input_embed_dim + args.encoder_embed_dim, input_embed_dim, bias=False)
def forward(self, prev_output_tokens, encoder_out, memory=None, memory_pos=None,
incremental_state=None, logits_mask=None, logits_indices=None,
tgt_vocab_masks=None, tgt_actnode_masks=None, tgt_src_cursors=None,
tgt_actedge_masks=None, tgt_actedge_cur_nodes=None, tgt_actedge_pre_nodes=None,
tgt_actedge_directions=None, tgt_actnode_masks_shift=None,
**unused):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
# breakpoint()
graph_self_attn_mask = get_graph_self_attn_mask(tgt_actedge_masks=tgt_actedge_masks,
tgt_actedge_cur_nodes=tgt_actedge_cur_nodes,
tgt_actedge_pre_nodes=tgt_actedge_pre_nodes,
tgt_actedge_directions=tgt_actedge_directions,
tgt_actnode_masks_shift=tgt_actnode_masks_shift,
mask_num_heads=self.args.tgt_graph_heads,
num_heads=self.layers[0].self_attn.num_heads,
tgt_graph_mask=self.args.tgt_graph_mask)
x, extra = self.extract_features(
prev_output_tokens,
memory,
memory_pos,
encoder_out,
incremental_state,
tgt_src_cursors=tgt_src_cursors,
tgt_actnode_masks=tgt_actnode_masks,
# graph structure: decoder self-attention mask
graph_self_attn_mask=graph_self_attn_mask
)
x = self.output_layer(
x,
logits_mask=logits_mask,
logits_indices=logits_indices,
tgt_vocab_masks=tgt_vocab_masks,
)
# DEBUG: (consumes time)
# if (x != x).any():
# import pdb; pdb.set_trace()
# print()
return x, extra
def extract_features(self, prev_output_tokens, memory, memory_pos,
encoder_out=None, incremental_state=None,
tgt_src_cursors=None, tgt_actnode_masks=None,
graph_self_attn_mask=None,
**unused):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
# It needs only the last auto-regressive element. Rest is cached.
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# TODO this is a hacky way of ignoring these two
memory = memory[:, :, -1:] if memory is not None else None
memory_pos = memory_pos[:, :, -1:] if memory_pos is not None else None
# graph structure mask on the decoder self-attention: take out the last row of (tgt_len, tgt_len)
# for only one step
if graph_self_attn_mask is not None:
graph_self_attn_mask = (graph_self_attn_mask[0][:, -1, :].unsqueeze(1),
graph_self_attn_mask[1][:, -1].unsqueeze(1))
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
# ========== combine the corresponding source token embeddings with the action embeddings as input ==========
if self.args.apply_tgt_input_src:
# 1) take out the source embeddings
src_embs = encoder_out['src_embs'] # size (batch_size, src_max_len, encoder_emb_dim)
if not self.args.tgt_input_src_backprop:
src_embs = src_embs.detach()
# 2) align the source embeddings to the tgt input actions
assert tgt_src_cursors is not None
tgt_src_index = tgt_src_cursors.clone() # size (bsz, tgt_max_len)
if encoder_out['encoder_padding_mask'] is not None:
src_num_pads = encoder_out['encoder_padding_mask'].sum(dim=1, keepdim=True)
tgt_src_index = tgt_src_index + src_num_pads # NOTE this is key to left padding!
tgt_src_index = tgt_src_index.unsqueeze(-1).repeat(1, 1, src_embs.size(-1))
# or
# tgt_src_index = tgt_src_index.unsqueeze(-1).expand(-1, -1, src_embs.size(-1))
src_embs = torch.gather(src_embs, 1, tgt_src_index)
# size (bsz, tgt_max_len, src_embs.size(-1))
# 3) combine the action embeddings with the aligned source token embeddings
if self.args.tgt_input_src_combine == 'cat':
x = self.combine_src_embs(torch.cat([src_embs, x], dim=-1))
elif self.args.tgt_input_src_combine == 'add':
x = src_embs + x
else:
raise NotImplementedError
# ===========================================================================================================
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
attn_all = []
inner_states = [x]
# ========== alignment guidance in the cross-attention: get the mask ==========
if self.args.apply_tgt_src_align:
assert tgt_src_cursors is not None
cross_attention_mask = get_cross_attention_mask_heads(tgt_src_cursors,
encoder_out['encoder_out'].size(0),
encoder_out['encoder_padding_mask'],
self.args.tgt_src_align_focus,
self.args.tgt_src_align_heads,
self.layers[0].encoder_attn.num_heads)
else:
cross_attention_mask = None
# ==============================================================================
# TODO there are some problems with the pointer mask
# attention mask dimension is (bsz * num_heads, target_size, source_size)
# where for each batch, head dimension comes first
# BUT here, the pointer mask assumes the batch dimension comes first, since it only generates
# the number of heads needed for pointer and stacks them
| |
file_name)
return None
self.add_cached_array_to_imported_list(import_array,
file_name,
keyword,
discrete = discrete,
uom = uom,
time_index = time_index,
null_value = null_value,
property_kind = property_kind,
local_property_kind_uuid = local_property_kind_uuid,
facet_type = facet_type,
facet = facet,
realization = realization)
return import_array
def decoarsen_imported_list(self, decoarsen_array = None, reactivate = True):
"""Decoarsen imported Nexus properties if needed.
arguments:
decoarsen_array (int array, optional): if present, the naturalised cell index of the coarsened host cell, for each fine cell;
if None, the ICOARS keyword is searched for in the imported list and if not found KID data is used to derive the mapping
reactivate (boolean, default True): if True, the parent grid will have decoarsened cells' inactive flag set to that of the
host cell
returns:
a copy of the array used for decoarsening, if established, or None if no decoarsening array was identified
notes:
a return value of None indicates that no decoarsening occurred;
coarsened values are redistributed quite naively, with coarse volumes being split equally between fine cells, similarly for
length and area based properties; default used for most properties is simply to replicate the coarse value;
the ICOARS array itself is left unchanged, which means the method should only be called once for an imported list;
if no array is passed and no ICOARS array found, the KID values are inspected and the decoarsen array reverse engineered;
the method must be called before the imported arrays are written to hdf5;
reactivation only modifies the grid object attribute and does not write to hdf5, so the method should be called prior to
writing the grid in this situation
"""
# imported_list is list pf:
# (0: uuid, 1: file_name, 2: keyword, 3: cached_name, 4: discrete, 5: uom, 6: time_index, 7: null_value, 8: min_value, 9: max_value,
# 10: property_kind, 11: facet_type, 12: facet, 13: realization, 14: indexable_element, 15: count, 16: local_property_kind_uuid,
# 17: const_value)
skip_keywords = ['UID', 'ICOARS', 'KID', 'DAD'] # TODO: complete this list
decoarsen_length_kinds = ['length', 'cell length', 'thickness', 'permeability thickness', 'permeability length']
decoarsen_area_kinds = ['transmissibility']
decoarsen_volume_kinds = ['volume', 'rock volume', 'pore volume', 'fluid volume']
assert self.grid is not None
kid_attr_name = None
k_share = j_share = i_share = None
if decoarsen_array is None:
for import_item in self.imported_list:
if (import_item[14] is None or import_item[14] == 'cells') and import_item[4] and hasattr(
self, import_item[3]):
if import_item[2] == 'ICOARS':
decoarsen_array = self.__dict__[import_item[3]] - 1 # ICOARS values are one based
break
if import_item[2] == 'KID':
kid_attr_name = import_item[3]
if decoarsen_array is None and kid_attr_name is not None:
kid = self.__dict__[kid_attr_name]
kid_mask = (kid == -3) # -3 indicates cell inactive due to coarsening
assert kid_mask.shape == tuple(self.grid.extent_kji)
if np.any(kid_mask):
log.debug(f'{np.count_nonzero(kid_mask)} cells marked as requiring decoarsening in KID data')
decoarsen_array = np.full(self.grid.extent_kji, -1, dtype = int)
k_share = np.zeros(self.grid.extent_kji, dtype = int)
j_share = np.zeros(self.grid.extent_kji, dtype = int)
i_share = np.zeros(self.grid.extent_kji, dtype = int)
natural = 0
for k0 in range(self.grid.nk):
for j0 in range(self.grid.nj):
for i0 in range(self.grid.ni):
# if decoarsen_array[k0, j0, i0] < 0:
if kid[k0, j0, i0] == 0:
# assert not kid_mask[k0, j0, i0]
ke = k0 + 1
while ke < self.grid.nk and kid_mask[ke, j0, i0]:
ke += 1
je = j0 + 1
while je < self.grid.nj and kid_mask[k0, je, i0]:
je += 1
ie = i0 + 1
while ie < self.grid.ni and kid_mask[k0, j0, ie]:
ie += 1
# todo: check for conflict and resolve
decoarsen_array[k0:ke, j0:je, i0:ie] = natural
k_share[k0:ke, j0:je, i0:ie] = ke - k0
j_share[k0:ke, j0:je, i0:ie] = je - j0
i_share[k0:ke, j0:je, i0:ie] = ie - i0
elif not kid_mask[k0, j0, i0]: # inactive for reasons other than coarsening
decoarsen_array[k0, j0, i0] = natural
k_share[k0, j0, i0] = 1
j_share[k0, j0, i0] = 1
i_share[k0, j0, i0] = 1
natural += 1
assert np.all(decoarsen_array >= 0)
if decoarsen_array is None:
return None
cell_count = decoarsen_array.size
host_count = len(np.unique(decoarsen_array))
log.debug(f'{host_count} of {cell_count} are hosts; difference is {cell_count - host_count}')
assert cell_count == self.grid.cell_count()
if np.all(decoarsen_array.flatten() == np.arange(cell_count, dtype = int)):
return None # identity array
if k_share is None:
sharing_needed = False
for import_item in self.imported_list:
kind = import_item[10]
if kind in decoarsen_volume_kinds or kind in decoarsen_area_kinds or kind in decoarsen_length_kinds:
sharing_needed = True
break
if sharing_needed:
k_share = np.zeros(self.grid.extent_kji, dtype = int)
j_share = np.zeros(self.grid.extent_kji, dtype = int)
i_share = np.zeros(self.grid.extent_kji, dtype = int)
natural = 0
for k0 in range(self.grid.nk):
for j0 in range(self.grid.nj):
for i0 in range(self.grid.ni):
if k_share[k0, j0, i0] == 0:
ke = k0 + 1
while ke < self.grid.nk and decoarsen_array[ke, j0, i0] == natural:
ke += 1
je = j0 + 1
while je < self.grid.nj and decoarsen_array[k0, je, i0] == natural:
je += 1
ie = i0 + 1
while ie < self.grid.ni and decoarsen_array[k0, j0, ie] == natural:
ie += 1
k_share[k0:ke, j0:je, i0:ie] = ke - k0
j_share[k0:ke, j0:je, i0:ie] = je - j0
i_share[k0:ke, j0:je, i0:ie] = ie - i0
natural += 1
if k_share is not None:
assert np.all(k_share > 0) and np.all(j_share > 0) and np.all(i_share > 0)
volume_share = (k_share * j_share * i_share).astype(float)
k_share = k_share.astype(float)
j_share = j_share.astype(float)
i_share = i_share.astype(float)
property_count = 0
for import_item in self.imported_list:
if import_item[3] is None or not hasattr(self, import_item[3]):
continue # todo: handle decoarsening of const arrays?
if import_item[14] is not None and import_item[14] != 'cells':
continue
coarsened = self.__dict__[import_item[3]].flatten()
assert coarsened.size == cell_count
keyword = import_item[2]
if keyword.upper() in skip_keywords:
continue
kind = import_item[10]
if kind in decoarsen_volume_kinds:
redistributed = coarsened[decoarsen_array] / volume_share
elif kind in decoarsen_area_kinds:
# only transmissibilty currently in this set of supported property kinds
log.warning(
f'decoarsening of transmissibility {keyword} skipped due to simple methods not yielding correct values'
)
elif kind in decoarsen_length_kinds:
facet_dir = import_item[12] if import_item[11] == 'direction' else None
if kind in ['thickness', 'permeability thickness'] or (facet_dir == 'K'):
redistributed = coarsened[decoarsen_array] / k_share
elif facet_dir == 'J':
redistributed = coarsened[decoarsen_array] / j_share
elif facet_dir == 'I':
redistributed = coarsened[decoarsen_array] / i_share
else:
log.warning(f'decoarsening of length property {keyword} skipped as direction not established')
else:
redistributed = coarsened[decoarsen_array]
self.__dict__[import_item[3]] = redistributed.reshape(self.grid.extent_kji)
property_count += 1
if property_count:
log.debug(f'{property_count} properties decoarsened')
if reactivate and hasattr(self.grid, 'inactive'):
log.debug('reactivating cells inactive due to coarsening')
pre_count = np.count_nonzero(self.grid.inactive)
self.grid.inactive = self.grid.inactive.flatten()[decoarsen_array].reshape(self.grid.extent_kji)
post_count = np.count_nonzero(self.grid.inactive)
log.debug(f'{pre_count - post_count} cells reactivated')
return decoarsen_array
def write_nexus_property(
self,
part,
file_name,
keyword = None,
headers = True,
append = False,
columns = 20,
decimals = 3, # note: decimals only applicable to real numbers
blank_line_after_i_block = True,
blank_line_after_j_block = False,
space_separated = False, # default is tab separated
use_binary = False,
binary_only = False,
nan_substitute_value = None):
"""Writes the property array to a file in a format suitable for including as nexus input.
arguments:
part (string): the part name for which the array is to be exported
file_name (string): the path of the file to be created (any existing file will be overwritten)
keyword (string, optional, default None): if not None, the Nexus keyword to be included in the
ascii export file (otherwise data only is written, without a keyword)
headers (boolean, optional, default True): if True, some header comments are included in the
ascii export file, using a Nexus comment character
append (boolean, optional, default False): if True, any existing file is appended to rather than
overwritten
columns (integer, optional, default 20): the maximum number of data items to be written per line
decimals (integer, optional, default 3): the number of decimal places included in the values
written to the ascii export file (ignored for integer data)
blank_line_after_i_block (boolean, optional, default True): if True, a blank line is inserted
after each I-block of data (ie. when the J index changes)
blank_line_after_j_block (boolean, optional, default False): if True, a blank line is inserted
after each J-block of data (ie. when the K index changes)
space_separated (boolean, optional, default False): if | |
Sorry about that.')
return redirect("leaders_interfaces:courses")
except LessonNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the lesson you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses_lessons", course_id)
# Get all associated learning_resources objects with the lessons
learning_resources = lesson.lessonslearningstylesresources_set.all()
# If there are no resources pass a message.
if not learning_resources:
messages.error(request,
'It looks like there are no learning resources for us to display (^・x・^). Please '
'populate '
'some resources so students have something to see!')
# Render the template of course resources passing the lesson, course, and resources as context.
return render(request, lessons_resources_template,
{'lesson': lesson, 'course': course,
'learning_resources': learning_resources})
class LessonsResourceCreateView(View):
"""
The Lessons Resources Create View is responsible for providing an interface which allows users to create existing
resources for lessons. This is facilitated by rendering the lessons_resources_create_template specified.
"""
def get(self, request, course_id, lesson_id):
"""
The get method of the leaders lessons resources create view is responsible for providing the interface to
append a new resource to a lesson. This is achieved by validating that the lesson belongs to the
course and the course belongs to the user and handling HTTP requests as appropriate.
"""
# Validation
# Get user details
leader = request.user
# Get the lesson and course we want to work with.
try:
course = get_leaders_course(course_id, leader)
lesson = get_leaders_lesson(lesson_id, course, leader)
# Forward error handle back where the user came from with an error message.
except CourseNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the course you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses")
except LessonNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the lesson you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses_lessons", course_id)
# Get the LessonLearningStylesResources form construct with initial values.
create_form = LessonsLearningStylesResourcesCreateForm(course=course, lesson=lesson)
return render(request, lessons_resources_create_template,
{'resources_create_form': create_form, 'lesson': lesson,
'course': course,
})
def post(self, request, course_id, lesson_id):
"""
The post method of the leaders lessons resources create view is responsible for handling information passed
by the interface creating lesson resources. This is achieved by getting the resources sent by the HTTP post
request, performing validation that the lesson belongs to the course and the course belongs to the user,
then handling resources as appropriate.
"""
# Validation
# Get user details
leader = request.user
# Get the lesson and course we want to work with.
try:
course = get_leaders_course(course_id, leader)
lesson = get_leaders_lesson(lesson_id, course, leader)
# Forward error handle back where the user came from with an error message.
except CourseNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the course you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses")
except LessonNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the lesson you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses_lessons", course_id)
create_form = LessonsLearningStylesResourcesCreateForm(request.POST, request.FILES, lesson=lesson,
course=course)
if create_form.is_valid():
create_form.save(commit=True)
# Get all associated learning_resources objects with the lessons
learning_resources = lesson.lessonslearningstylesresources_set.all()
# If there are no resources pass a message.
if not learning_resources:
messages.error(request,
'It looks like there are no learning resources for us to display (^・x・^). Please '
'populate '
'some resources so students have something to see!')
return render(request, lessons_resources_template,
{'lesson': lesson, 'course': course,
'learning_resources': learning_resources})
# Re-interface the page with validation errors passed by the form
else: # Get all the resources objects associated with the lesson objects.
learning_resources = lesson.lessonslearningstylesresources_set.all()
# If there are no resources pass a message.
if not learning_resources:
messages.error(request,
'It looks like there are no learning resources for us to display (^・x・^). Please '
'populate '
'some resources so students have something to see!')
return render(request, lessons_resources_create_template,
{'resources_create_form': create_form,
'course': course, 'lesson': lesson, 'learning_resources': learning_resources
})
class LessonsResourceDeleteView(View):
"""
The Lessons Resources Delete View is responsible for providing an interface which allows users to delete existing
resources for lessons. This is facilitated by rendering the lessons_resources_delete_template specified.
"""
def get(self, request, course_id, lesson_id):
"""
The get method of the leaders lessons resources delete view is responsible for providing the interface to
append a new resource to a lesson. This is achieved by validating that the lesson belongs to the course and
the course belongs to the user, providing only resources belonging to the lesson and handling HTTP requests
as appropriate.
"""
# Validation
# Get user details
leader = request.user
# Get the lesson and course we want to work with.
try:
course = get_leaders_course(course_id, leader)
lesson = get_leaders_lesson(lesson_id, course, leader)
# Forward error handle back where the user came from with an error message.
except CourseNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the course you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses")
except LessonNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the lesson you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses_lessons", course_id)
# Get all the resources objects associated with the lesson objects.
learning_resources = lesson.lessonslearningstylesresources_set.all()
# If there are no resources pass a message.
if not learning_resources:
messages.error(request,
'It looks like there are no learning resources for us to display (^・x・^). Please '
'populate '
'some resources so students have something to see!')
return render(request, resources_delete_template,
{'course': course, 'lesson': lesson, 'learning_resources': learning_resources})
def post(self, request, course_id, lesson_id):
"""
The post method of the leaders lessons resources delete view is responsible for handling information passed
by the interface to delete lesson resources. This is achieved by getting the resources sent by the HTTP post
request, performing validation that the lesson belongs to the course and the course belongs to the user,
and the resource belongs to the lesson then handling resources as appropriate.
"""
# Validation
# Get user details
leader = request.user
# Get the lesson and course we want to work with.
try:
course = get_leaders_course(course_id, leader)
lesson = get_leaders_lesson(lesson_id, course, leader)
# Forward error handle back where the user came from with an error message.
except CourseNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the course you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses")
except LessonNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the lesson you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses_lessons", course_id)
learning_resource_ids = request.POST.getlist('checks')
remove_learning_resources_list(lesson, learning_resource_ids)
# Get all the resources objects associated with the lesson objects.
learning_resources = lesson.lessonslearningstylesresources_set.all()
# If there are no resources pass a message.
if not learning_resources:
messages.error(request,
'It looks like there are no learning resources for us to display (^・x・^). Please '
'populate '
'some resources so students have something to see!')
return render(request, resources_delete_template,
{'course': course, 'lesson': lesson, 'learning_resources': learning_resources})
class LessonsResourceEditView(View):
"""
The Lessons Resources Edit View is responsible for providing an interface which allows users to edit existing
resources for lessons. This is facilitated by rendering the lessons_resources_edit_template specified.
"""
def get(self, request, course_id, lesson_id, learning_resource_id):
"""
The get method of the leaders lessons resources edit view is responsible for providing the interface to
edit resources of lessons. This is achieved by validating that the lesson belongs to the
course and the course belongs to the user and the resource belongs to the lesson and handling HTTP requests as appropriate.
"""
# Validation
# Get user details
leader = request.user
# Get the lesson and course we want to work with.
try:
course = get_leaders_course(course_id, leader)
lesson = get_leaders_lesson(lesson_id, course, leader)
learning_resource = get_leaders_learning_resource(learning_resource_id, lesson, course, leader)
# Forward error handle back where the user came from with an error message.
except CourseNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the course you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses")
except LessonNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the lesson you asked for! Sorry about that.')
return redirect("leaders_interfaces:courses_lessons", course_id)
except LearningResourceNotFoundException:
messages.error(request,
'An error has occurred (^・x・^). We cannot find the learning resource you asked for! Sorry '
'about that.')
return redirect("leaders_interfaces:lessons_resources", course_id, lesson_id)
edit_form = LessonsLearningStylesResourcesEditForm(
initial={'title': learning_resource.title, 'description': learning_resource.description,
'learning_style': learning_resource.learning_style}, course=course, lesson=lesson,
lessonlearningstyleresource=learning_resource)
| |
old =time.time()
global callback_queue
tid = threading.currentThread()
callback_queue[tid] = False
# while (compare_and_swap(0, 1) == 1):
# debug_probe_string = "CALLBACK_PROBE : " + kernel.name + " " + str(device) + " " + str(
# event_type) + " event"
# logging.debug(debug_probe_string)
#boltLock.acquire()
debug_trigger_string = "CALLBACK_TRIGGERED : " + str(kernel.id) + " " + str(
event_type) + " execution finished for device " + str(device)
logging.debug(debug_trigger_string)
if event_type == 'WRITE':
host_event_info.write_end = time.time()
elif event_type == 'READ':
#host_event_info.read_end = time.time()
kernel__data1[kernel.id] = time.time()
global device_history
logging.debug("CALLBACK : " +str(host_event_info))
logging.debug("CALLBACK : Pushing info onto " + str(device) + str(dev_no))
device_history[device][dev_no].append(host_event_info)
if kernel.multiple_break:
if device == 'cpu':
kernel.chunks_cpu -= 1
else:
kernel.chunks_gpu -= 1
kernel.chunks_left -= 1
logging.info("Chunks Left: "+ str(kernel.chunks_left))
if kernel.chunks_left == 0:
#kernel__data[kernel.id] = time.time() -kernel__data[kernel.id]
kernel_chunkleft[kernel.id] = time.time()
#Step 1 - add kernel to finished kernels list of task object
task = kernel.task_object
dag = task.task_dag_object
info_trigger_string = "EVENT: " + str(kernel.id) + " finished on device " + str(device)
logging.info(info_trigger_string)
#Step 2 - update succesors
succesors = dag.get_kernel_children_ids(kernel.id)
logging.info("Successors being added" + str(succesors))
child_tasks_to_be_dispatched = []
for child in succesors:
child_kernel = dag.kernels[child]
child_task = child_kernel.task_object
if child_task.id != task.id:
dag.kernel_flags[child].finished_parents +=1
if dag.kernel_flags[child].is_ready():
child_task.resource_lock.acquire()
child_task.add_free_kernels([child_kernel])
child_task.resource_lock.notifyAll()
child_task.resource_lock.release()
#Case 1 - child is part of a task that has
# already dispatched - this is enough
#Case 2 - > else new task needs to be added to the FQ
frontier_Q_lock.acquire()
if not child_task.has_enqueued:
logging.info("FQ: Adding to frontier queue task containing kernel " + str(child))
frontier_Q.append(child_task)
child_task.has_enqueued = True
else:
logging.info("TDQ: Adding to child_tasks_to_be_dispatched task containing kernel" + str(child))
child_tasks_to_be_dispatched.append(child_task)
# child_task.dispatch_all()
frontier_Q_lock.notifyAll()
frontier_Q_lock.release()
#Step 3 :- check if task has finished
for task in child_tasks_to_be_dispatched:
task.dispatch_all()
elif event_type == 'KERNEL':
host_event_info.ndrange_end = time.time()
callback_queue[tid] = True
#bolt[0] = 0
logging.debug("CALLBACK_RESET : " + str(kernel.id) + " Resetting bolt value by " + str(device) + " " + str(
event_type) + " event")
extime = time.time() - old
global ex_callback_queue
ex_callback_queue[event_type][tid] = extime
#boltLock.release()
except TypeError:
pass
return cb
#########################################################################################################################
def generate_unique_id():
"""
Generates and returns a unique id string.
:return: Unique ID
:rtype: String
"""
import uuid
return str(uuid.uuid1())
class Kernel(object):
"""
Class to handle all operations performed on OpenCL kernel.
:ivar dataset: An integer representing size of the data on which kernel will be dispatched.
:ivar id: An id that is used identify a kernel uniquely.
:ivar eco: A dictionary mapping between size of dataset and Estimated Computation Overhead
:ivar name: Name of the Kernel
:ivar src: Path to the Kernel source file.
:ivar partition: An integer denoting the partition class of the kernel.
:ivar work_dimension: Work Dimension of the Kernel.
:ivar global_work_size: A list denoting global work dimensions along different axes.
:ivar local_work_size: A list denoting local work dimensions along different axes.
:ivar buffer_info: Properties of Buffers
:ivar input_buffers: Dictionaries containing actual cl.Buffer objects.
:ivar output_buffers: Dictionaries containing actual cl.Buffer objects.
:ivar io_buffers: Dictionaries containing actual cl.Buffer objects.
:ivar data: Numpy Arrays maintaining the input and output data of the kernels.
:ivar buffer_deps: Dictionary mapping containing buffer dependencies.
:ivar variable_args: Data corresponding to Variable arguments of the kernel.
:ivar local_args: Information regarding Local Arguments of the kernel.
:ivar kernel objects: Dictionary mapping between devices and compiled and built pyopencl.Kernel objects.
:ivar events: Dictionary containing pyschedcl.KEvents.
:ivar source: String containing contents of kernel file.
:ivar clevents: Dictionary containing pyopencl.Events.
"""
def __init__(self, src, task_dag_object=None, dataset=1024, partition=None, identifier=None):
"""
Initialise attributes of Kernel event.
"""
self.rank = 0
self.task_object = None
self.exec_time={'cpu':0.0,'gpu':0.0}
self.dataset = dataset
self.symbolic_variables = src["symbolicVariables"]
if 'id' in src:
#print "Source ID", src['id']
self.id = int(src['id'])
else:
#print "Random ID"
self.id = generate_unique_id()
self.optm_device = 0
#if identifier is not None:
# print "Identifier ID"
# self.id = identifieri
################ TODO: Remove global version of task dag object##############################
if task_dag_object != None:
self.task_dag_object = task_dag_object
else:
self.task_dag_object = 0
self.release_times = 2
###############################################
if 'ecos' in src and str(dataset) in src['ecos']:
self.eco = src['ecos'][str(dataset)]
elif 'eco' in src:
self.eco = src['eco']
else:
self.eco = 1
self.name = src['name']
self.src = src['src']
self.src_cpu = src['src']
self.src_gpu = src['src']
#self.partition = src['partition']
if partition is not None:
self.partition = partition
else:
self.partition = src['partition']
self.work_dimension = src['workDimension']
self.global_work_size = src['globalWorkSize']
if type(self.global_work_size) in [str]: #, unicode]:
self.global_work_size = eval(self.global_work_size)
if type(self.global_work_size) is int:
self.global_work_size = [self.global_work_size]
if 'localWorkSize' in src:
self.local_work_size = src['localWorkSize']
else:
self.local_work_size = []
if 'localChunkFactor' in src:
self.local_chunk=src['localChunkFactor']
else:
self.local_chunk=[]
self.global_work_offset = []
if 'globalChunkFactor' in src:
self.global_chunk=src['localChunkFactor']
else:
self.global_chunk=[]
if type(self.local_work_size) in [str]: #, unicode]:
self.local_work_size = eval(self.local_work_size)
elif type(self.local_work_size) is int:
self.local_work_size = [self.local_work_size]
self.buffer_info = dict()
self.macros = dict()
if 'macros_values' in src:
self.macros=src['macros_values']
if 'inputBuffers' in src:
self.buffer_info['input'] = src['inputBuffers']
else:
self.buffer_info['input'] = []
if 'outputBuffers' in src:
self.buffer_info['output'] = src['outputBuffers']
else:
self.buffer_info['output'] = []
if 'ioBuffers' in src:
self.buffer_info['io'] = src['ioBuffers']
else:
self.buffer_info['io'] = []
##changed to support chunking
self.input_buffers = {'gpu': dict(), 'cpu': dict()}
self.output_buffers = {'gpu': dict(), 'cpu': dict()}
self.io_buffers = {'gpu': dict(), 'cpu': dict()}
self.data = {}
self.buffer_deps = {}
if 'varArguments' in src:
self.variable_args = deepcopy(src['varArguments'])
self.vargs = src['varArguments']
else:
self.variable_args = []
self.vargs = []
if 'cpuArguments' in src:
self.cpu_args = src['cpuArguments']
print("Ignoring CPU Arguments")
if 'gpuArguments' in src:
self.gpu_args = src['gpuArguments']
print("Ignoring GPU Arguments")
if 'localArguments' in src:
self.local_args = src['localArguments']
for i in range(len(self.local_args)):
self.local_args[i]['size'] = eval(self.local_args[i]['size'])
else:
self.local_args = []
# self.buffer_info['local'] = deepcopy(self.local_args)
self.kernel_objects = dict()
for btype in ['input', 'output', 'io']:
for i in range(len(self.buffer_info[btype])):
if type(self.buffer_info[btype][i]['size']) in [str]: #, unicode]:
self.buffer_info[btype][i]['size'] = eval(self.buffer_info[btype][i]['size'])
if 'chunk' in self.buffer_info[btype][i] and type(self.buffer_info[btype][i]['chunk']) in [str]: #, unicode]:
self.buffer_info[btype][i]['chunk'] = eval(self.buffer_info[btype][i]['chunk'])
self.buffer_info[btype][i]['create'] = True
self.buffer_info[btype][i]['enq_write'] = True
self.buffer_info[btype][i]['enq_read'] = True
if 'from' in self.buffer_info[btype][i]:
self.buffer_deps[self.buffer_info[btype][i]['pos']] = (self.buffer_info[btype][i]['from']['kernel'],
self.buffer_info[btype][i]['from']['pos'])
self.partition_multiples = self.get_partition_multiples()
self.events = {'gpu': dict(), 'cpu': dict()}
self.source = None
# self.clevents = {'gpu': dict(), 'cpu': dict()}
self.chunks_left = 1
self.multiple_break = False
self.chunks_cpu = 0
self.chunks_gpu = 0
self.write_events = []
self.read_events = []
self.nd_range_event = []
def get_device_requirement(self):
req = {'gpu': 0, 'cpu': 0, 'all': 0}
if self.partition > 0:
req['gpu'] += 1
req['all'] += 1
if self.partition < 10:
req['cpu'] += 1
req['all'] += 1
return req
def dump_json(self):
import json
dump_js = dict()
dump_js['src'] = self.src
dump_js['dataset'] = self.dataset
dump_js['id'] = self.id
dump_js['name'] = self.name
dump_js['ecos'] = dict()
dump_js['ecos'][str(self.dataset)] = self.eco
dump_js['macros_values'] = self.macros
dump_js['partition'] = self.partition
dump_js['workDimension'] = self.work_dimension
dump_js['globalWorkSize'] = self.global_work_size
dump_js['localWorkSize'] = self.local_work_size
dump_js['inputBuffers'] = deepcopy(self.buffer_info['input'])
for i in dump_js['inputBuffers']:
i['size'] = str(i['size'])
dump_js['outputBuffers'] = deepcopy(self.buffer_info['output'])
for i in dump_js['outputBuffers']:
i['size'] = str(i['size'])
dump_js['ioBuffers'] = self.buffer_info['io']
if hasattr(self, 'variable_args'):
dump_js['varArguments'] = self.variable_args
if hasattr(self, 'vargs'):
dump_js['varArguments'] = self.vargs
if hasattr(self, 'cpu_args'):
dump_js['cpuArguments'] = deepcopy(self.cpu_args)
for i in dump_js['cpuArguments']:
i['size'] = str(i['size'])
if hasattr(self, 'gpu_args'):
dump_js['gpuArguments'] = deepcopy(self.gpu_args)
for i in dump_js['gpuArguments']:
i['size'] = str(i['size'])
if hasattr(self, 'local_args'):
dump_js['localArguments'] = deepcopy(self.local_args)
for i in dump_js['localArguments']:
i['size'] = str(i['size'])
return deepcopy(dump_js)
def get_num_global_work_items(self):
"""
Returns the total number of global work items based on global work size.
:return: Total number of global work items considering all dimensions
:rtype: Integer
"""
i = 1
for j in self.global_work_size:
i *= j
return i
# TODO: Modify to handle dependent buffers.
def release_host_arrays(self):
"""
Forcefully releases all host array data after completion of a kernel task
"""
# for array_type in self.data.keys():
# for array in self.data[array_type]:
# del array
# logging.debug("Releasing host arrays")
del self.data
gc.collect()
def release_buffers(self, obj):
"""
Releases all buffers of a Kernel Object for a particular device given in obj
:param obj: Specifies Kernel object
:type obj: String
"""
debug_str = "Releasing buffers of " + self.name + " on " + obj
logging.debug(debug_str)
for i, buff in self.input_buffers[obj].iteritems():
if buff is not None:
buff.release()
for i, buff in self.output_buffers[obj].iteritems():
if buff is not None:
buff.release()
for i, buff in self.io_buffers[obj].iteritems():
if | |
if kind == 'batchmodify':
return self._render_batched_timeline_event(context, field, event)
ticket, verb, info, summary, status, resolution, type, \
description, component, comment, cid = event[3]
if field == 'url':
href = context.href.ticket(ticket.id)
if cid:
href += '#comment:' + cid
return href
elif field == 'title':
title = TicketSystem(self.env).format_summary(summary, status,
resolution, type)
message = {
'created': N_("Ticket %(ticketref)s (%(summary)s) created"),
'reopened': N_("Ticket %(ticketref)s (%(summary)s) reopened"),
'closed': N_("Ticket %(ticketref)s (%(summary)s) closed"),
'updated': N_("Ticket %(ticketref)s (%(summary)s) updated"),
}[verb]
# Add component as prefix to summary if enabled and available
component_prefix = ''
if self.timeline_component and component:
component_prefix = component + ' - '
summary_complete = component_prefix + shorten_line(summary)
return tag_(message,
ticketref=tag.em('#', ticket.id, title=title),
summary=summary_complete)
elif field == 'description':
descr = message = ''
if status == 'new':
message = description
else:
descr = info
message = comment
t_context = context.child(resource=ticket)
t_context.set_hints(preserve_newlines=self.must_preserve_newlines)
if status == 'new' and \
context.get_hint('wiki_flavor') == 'oneliner':
flavor = self.timeline_newticket_formatter
t_context.set_hints(wiki_flavor=flavor,
shorten_lines=flavor == 'oneliner')
return descr + format_to(self.env, None, t_context, message)
def _render_batched_timeline_event(self, context, field, event):
tickets, verb, info, summary, status, resolution, type, \
description, component, comment, cid = event[3]
if field == 'url':
return context.href.query(id=','.join(str(t) for t in tickets))
elif field == 'title':
ticketids = u',\u200b'.join(str(t) for t in tickets)
title = _("Tickets %(ticketids)s", ticketids=ticketids)
return tag_("Tickets %(ticketlist)s batch updated",
ticketlist=tag.em('#', ticketids, title=title))
elif field == 'description':
t_context = context()
t_context.set_hints(preserve_newlines=self.must_preserve_newlines)
return info + format_to(self.env, None, t_context, comment)
# Internal methods
def _get_action_controls(self, req, ticket):
# action_controls is an ordered list of "renders" tuples, where
# renders is a list of (action_key, label, widgets, hints)
# representing the user interface for each action
action_controls = []
sorted_actions = TicketSystem(self.env).get_available_actions(req,
ticket)
for action in sorted_actions:
labels = []
hints = []
widgets = []
for controller in self._get_action_controllers(req, ticket,
action):
label, widget, hint = controller.render_ticket_action_control(
req, ticket, action)
if label is None:
continue
labels.append(label)
widgets.append(widget)
hints.append(hint)
if labels:
action_controls.append((action, labels[0], tag(widgets), hints))
# The default action is the first in the action_controls list.
selected_action = req.args.get('action')
if not selected_action and action_controls:
selected_action = action_controls[0][0]
if not action_controls:
add_warning(req, tag_("There are no workflow actions defined for "
"this ticket's status."))
return action_controls, selected_action
def _get_action_controllers(self, req, ticket, action):
"""Generator yielding the controllers handling the given `action`"""
for controller in TicketSystem(self.env).action_controllers:
actions = [a for w, a in
controller.get_ticket_actions(req, ticket) or []]
if action in actions:
yield controller
def _process_newticket_request(self, req):
req.perm(self.realm).require('TICKET_CREATE')
ticket = model.Ticket(self.env)
plain_fields = True # support for /newticket?version=0.11 GETs
field_reporter = 'reporter'
if req.method == 'POST':
plain_fields = False
field_reporter = 'field_reporter'
self._populate(req, ticket, plain_fields)
reporter_id = req.args.get(field_reporter) or \
get_reporter_id(req, 'author')
ticket['reporter'] = reporter_id
preview = 'preview' in req.args
if preview or req.method == 'POST':
actions = TicketSystem(self.env) \
.get_available_actions(req, ticket)
default_action = actions[0] if len(actions) > 0 else None
action = req.args.get('action', default_action)
valid = True
# Do any action on the ticket?
if action and action not in actions:
valid = False
add_warning(req, _('The action "%(name)s" is not available.',
name=action))
field_changes, problems = self.get_ticket_changes(req, ticket,
action)
if problems:
valid = False
for problem in problems:
add_warning(req, problem)
add_warning(req,
tag_("Please review your configuration, "
"probably starting with %(section)s "
"in your %(tracini)s.",
section=tag.pre('[ticket]', tag.br(),
'workflow = ...'),
tracini=tag.code('trac.ini')))
# Apply changes made by the workflow
self._apply_ticket_changes(ticket, field_changes)
valid &= self._validate_ticket(req, ticket)
if not preview and req.method == 'POST':
if valid:
# redirects on success
self._do_create(req, ticket, action)
# else fall through in a preview
req.args['preview'] = True
action_controls, selected_action = \
self._get_action_controls(req, ticket)
disable_submit = not action_controls
# Preview a new ticket
data = self._prepare_data(req, ticket)
data.update({
'author_id': reporter_id,
'actions': [],
'version': None,
'description_change': None,
'action_controls': action_controls,
'action': selected_action,
'disable_submit': disable_submit,
})
fields = self._prepare_fields(req, ticket)
data['fields'] = fields
# fields_map is deprecated and removed in 1.5.1
data['fields_map'] = {field['name']: i
for i, field in enumerate(fields)}
if req.is_xhr:
data['preview_mode'] = True
data['chrome_info_script'] = chrome_info_script
return 'ticket_box.html', data
add_stylesheet(req, 'common/css/ticket.css')
chrome = Chrome(self.env)
chrome.add_wiki_toolbars(req)
if not disable_submit:
chrome.add_auto_preview(req)
chrome.add_jquery_ui(req)
return 'ticket.html', data
def _process_ticket_request(self, req):
id = req.args.getint('id')
version = req.args.getint('version', None)
if req.is_xhr and 'preview_comment' in req.args:
context = web_context(req, self.realm, id, version)
escape_newlines = self.must_preserve_newlines
rendered = format_to_html(self.env, context,
req.args.get('edited_comment', ''),
escape_newlines=escape_newlines) + \
chrome_info_script(req)
req.send(rendered.encode('utf-8'))
req.perm(self.realm, id, version).require('TICKET_VIEW')
ticket = model.Ticket(self.env, id, version=version)
action = req.args.get('action', ('history' in req.args and 'history' or
'view'))
data = self._prepare_data(req, ticket)
if action in ('history', 'diff'):
field = req.args.get('field')
if field:
text_fields = [field]
else:
text_fields = [field['name'] for field in ticket.fields if
field['type'] == 'textarea']
if action == 'history':
return self._render_history(req, ticket, data, text_fields)
elif action == 'diff':
return self._render_diff(req, ticket, data, text_fields)
elif action == 'comment-history':
req.args.require('cnum')
cnum = req.args.getint('cnum')
return self._render_comment_history(req, ticket, data, cnum)
elif action == 'comment-diff':
req.args.require('cnum')
cnum = req.args.getint('cnum')
return self._render_comment_diff(req, ticket, data, cnum)
elif 'preview_comment' in req.args:
field_changes = {}
data.update({'action': None,
'reassign_owner': req.authname,
'resolve_resolution': None,
'start_time': ticket['changetime']})
self._validate_ticket(req, ticket)
elif 'cancel_comment' in req.args:
req.redirect(req.href.ticket(ticket.id))
elif 'edit_comment' in req.args:
comment = req.args.get('edited_comment', '')
cnum = req.args.getint('cnum_edit')
change = ticket.get_change(cnum)
if not change:
raise TracError(_("Comment %(num)s not found", num=cnum))
comment_resource = \
Resource('comment', cnum, parent=ticket.resource)
req.perm(comment_resource).require('TICKET_EDIT_COMMENT')
if self._validate_ticket(req, ticket):
ticket.modify_comment(change['date'], req.authname, comment)
req.redirect(req.href.ticket(ticket.id) +
'#comment:%d' % cnum)
else:
field_changes = {}
data.update({'action': None,
'reassign_owner': req.authname,
'resolve_resolution': None,
'start_time': ticket['changetime']})
elif req.method == 'POST':
valid = True
# Do any action on the ticket?
actions = TicketSystem(self.env).get_available_actions(req, ticket)
if action not in actions:
valid = False
add_warning(req, _('The action "%(name)s" is not available.',
name=action))
# We have a bit of a problem. There are two sources of changes to
# the ticket: the user, and the workflow. We need to show all the
# changes that are proposed, but we need to be able to drop the
# workflow changes if the user changes the action they want to do
# from one preview to the next.
#
# the _populate() call pulls all the changes from the webpage; but
# the webpage includes both changes by the user and changes by the
# workflow... so we aren't able to differentiate them clearly.
self._populate(req, ticket) # Apply changes made by the user
field_changes, problems = self.get_ticket_changes(req, ticket,
action)
if problems:
valid = False
for problem in problems:
add_warning(req, problem)
add_warning(req,
tag_("Please review your configuration, "
"probably starting with %(section)s "
"in your %(tracini)s.",
section=tag.pre('[ticket]', tag.br(),
'workflow = ...'),
tracini=tag.code('trac.ini')))
# Apply changes made by the workflow
self._apply_ticket_changes(ticket, field_changes)
# Unconditionally run the validation so that the user gets
# information any and all problems. But it's only valid if it
# validates and there were no problems with the workflow side of
# things.
valid &= self._validate_ticket(req, ticket, not valid)
if 'submit' in req.args:
if valid:
# redirected if successful
self._do_save(req, ticket, action)
# else fall through in a preview
req.args['preview'] = True
# Preview an existing ticket (after a Preview or a failed Save)
start_time = from_utimestamp(int(req.args.get('start_time', 0)))
data.update({
'action': action, 'start_time': start_time,
'reassign_owner': (req.args.get('reassign_choice')
or req.authname),
'resolve_resolution': req.args.get('resolve_choice'),
})
else: # simply 'View'ing the ticket
field_changes = {}
data.update({'action': None,
'reassign_owner': req.authname,
'resolve_resolution': None,
# Store a timestamp for detecting "mid air collisions"
'start_time': ticket['changetime']})
data.update({'comment': req.args.get('comment'),
'cnum_edit': req.args.get('cnum_edit'),
'edited_comment': req.args.get('edited_comment'),
'cnum_hist': req.args.get('cnum_hist'),
'cversion': req.args.get('cversion')})
self._insert_ticket_data(req, ticket, data,
get_reporter_id(req, 'author'), field_changes)
if req.is_xhr:
data['preview_mode'] = bool(data['change_preview']['fields'])
data['chrome_info_script'] = chrome_info_script
return 'ticket_preview.html', data
mime = Mimeview(self.env)
format = req.args.get('format')
if format:
# FIXME: mime.send_converted(context, ticket, 'ticket_x') (#3332)
filename = 't%d' % ticket.id if format != 'rss' else None
mime.send_converted(req, 'trac.ticket.Ticket', ticket,
format, filename=filename)
def add_ticket_link(css_class, id):
t = ticket.resource(id=id, version=None)
if t:
add_link(req, css_class, req.href.ticket(id),
_("Ticket #%(id)s", id=id))
global_sequence = True
# If the ticket is being shown in the context of a query, add
# links to help navigate in the query result set
if 'query_tickets' in req.session:
tickets = req.session['query_tickets'].split()
if str(ticket.id) in tickets:
idx = tickets.index(str(ticket.id))
if idx > 0:
add_ticket_link('first', tickets[0])
add_ticket_link('prev', tickets[idx - 1])
if idx < len(tickets) - 1:
| |
"""
:title: HLSPFile.py
:author: <NAME>
:contact: <EMAIL>
The class defined here can be used to construct a file to track information
about a High-Level Science Product that will be needed for various stages of
HLSP ingestion to MAST and CAOM. Methods are also provided to add new
information, update existing parameters, and file input and output.
..class:: HLSPFile
..synopsis:: This class constructs an object to organize information needed
for HLSP data file ingestion to MAST and CAOM. The class
provides methods to access and modify that information, as well
as read or write that information from/to YAML-formatted files.
"""
import bin.check_paths as cp
from bin.read_yaml import read_yaml
from lib.FileType import FileType
from lib.FitsKeyword import FitsKeyword, FitsKeywordList
from lxml import etree
import os
import re
import yaml
# --------------------
class HLSPFile(object):
"""
This file constructs an object to organize information needed for HLSP data
file ingestion to MAST and CAOM. The class provides methods to access and
modify that information, as well as read or write that information from/to
YAML-formatted files.
..module:: _add_fits_keyword
..synopsis:: Add a new FitsKeyword object to the private _fits_keywords
list. Skip if the keyword is already present.
..module:: _add_xml_value_pairs
..synopsis:: Add a dictionary of keyword / value pairs to an lxml etree.
..module:: _format_caller
..synopsis:: Convert a filename from a calling function to a format that
will match a class attribute.
..module:: _get_filename
..synopsis:: Construct an output filename, with an optional prefix based
on an ingestion step.
..module:: _get_keyword_updates
..synopsis:: Compare the _fits_keywords list to the _standard_keywords
list to find any differences and add these keywords to the
keyword_updates list.
..module:: _get_standard_fits_keywords
..synopsis:: Read a FITS template file based on the file types present,
create FitsKeyword objects based on the template and try to
add them to _fits_keywords.
..module:: _implement_keyword_updates
..synopsis:: Add FITS keyword updates defined in the keyword_updates list
to _fits_keywords.
..module:: _make_value_xml_dict
..synopsis:: Format a provided value into a dictionary for lxml ingest.
..module:: _match_caller
..synopsis:: Match a calling function file to an appropriate filepath.
..module:: _split_name_from_params
..synopsis:: Given a single key dictionary, return the lone key and
corresponding dictionary.
..module:: _update_stage_paths
..synopsis:: Construct file paths for resulting files from various stages
of HLSP ingestion.
..module:: add_filetype
..synopsis:: Add a FileType object to the file_types list.
..module:: add_fits_keyword
..synopsis:: Add a new FitsKeyword object to the private _fits_keywords
list. Skip if the keyword is already present.
..module:: add_keyword_update
..synopsis:: Add an updated FitsKeyword object to the keyword_updates
list.
..module:: add_unique_parameter
..synopsis:: Add a new entry in the unique_parameters list.
..module:: as_dict
..synopsis:: Return the current contents of self as a formatted
dictionary. This is useful for entering the contents into
an XML tree or writing to YAML.
..module:: check_ingest_step
..synopsis:: Check the HLSP ingestion status of a particular ingest step.
..module:: find_file_type
..synopsis:: Find a given file ending in the file_types list.
..module:: find_log_file
..synopsis:: Look for an existing log file for a given HLSP ingestion
step.
..module:: fits_keywords
..synopsis:: Combine the contents of the designated FITS template and
any keyword updates and return a list of all FitsKeyword
objects used by this HLSPFile.
..module:: get_output_filepath
..synopsis:: Get an output file path for saving an HLSPFile to disk.
Either base the file path on the calling file or return the
default path.
..module:: in_hlsp_format
..synopsis:: Checks for a number of HLSPFile attributes, returns False if
not found. Have not put this to any use yet.
..module:: load_dict
..synopsis:: Load the contents of a formatted dictionary into self.
..module:: load_hlsp
..synopsis:: Read information from a YAML-formatted .hlsp file and load
those contents into self.
..module:: member_fits_standards
..synopsis:: Get a list of all FITS standards being used by file types
present in the file_types list.
..module:: remove_filetype
..synopsis:: Remove a FileType object from the file_types list.
..module:: reset_fits_keywords
..synopsis:: Update a value in the self.ingest dictionary to indicate a
completed ingestion step. Toggles the boolean value by
default or can be given a state to enforce.
..module:: save
..synopsis:: Write the current contents of self to a YAML-formatted .hlsp
file.
..module:: toggle_ingest
..synopsis:: Update a value in the self.ingest dictionary to indicate a
completed ingestion step. Toggles the boolean value by
default or can be given a state to enforce.
..module:: update_filepaths
..synopsis:: Update the file_paths dictionary with provided input / output
settings.
..module:: write_xml_template
..synopsis:: Write the current contents of self into an XML-formatted
template file for ingestion into CAOM.
"""
# Create class attributes to define file and routine naming and structure.
_check_file_names_out = "check_file_names"
_check_metadata_format_out = "check_metadata_format"
_precheck_metadata_format_out = "precheck_data_format"
_check_file_names_dir = _check_file_names_out.upper()
_check_metadata_format_dir = _check_metadata_format_out.upper()
_fits_templates_dir = os.path.join(_check_metadata_format_dir, "TEMPLATES")
_file_ext = ".hlsp"
_root_dir = "MAST_HLSP"
_static_values_yaml = "PREP_CAOM/resources/hlsp_caom_staticvalues.yaml"
def __init__(self, from_dict=None, name=None, path=None):
"""
Initialize a new HLSPFile object.
:param from_dict: Use a dictionary (likely pre-loaded from a .yaml
file) to populate HLSPFile attributes.
:type from_dict: dict
:param name: Pre-seed an HLSPFile object with an optional name.
:type name: str
:param path: Provided a path to a valid .hlsp-formatted file,
initialization will immediately load the contents into
the new object.
:type path: str
"""
super().__init__()
# Set the list of ingestion steps.
steps = ["00_filenames_checked",
"01_metadata_prechecked",
"02_metadata_checked",
"03_fits_keywords_set",
"04_value_parameters_added"
]
# Set private internal-use attributes.
self._fits_keywords = FitsKeywordList.empty_list()
self._standard_keywords = FitsKeywordList.empty_list()
self._updated = False
# Initialize primary attributes.
self.file_paths = {"InputDir": "", "Output": ""}
self.file_types = []
self.hlsp_name = "blank"
self.ingest = {s: False for s in steps}
self.keyword_updates = FitsKeywordList.empty_list()
self.unique_parameters = {}
# Load HLSP parameters from a provided dictionary.
if from_dict:
self.load_dict(from_dict)
# If a name is provided, update the related stage filepaths.
if name:
self.hlsp_name = name
self._update_stage_paths()
# If path is provided, try to load the file contents into self.
if path:
self.load_hlsp(path)
def _add_fits_keyword(self, keyword_obj):
"""
Add a new FitsKeyword object to the private _fits_keywords list. Skip
if the keyword is already present.
:param keyword_obj: The potentially new fits keyword to add.
:type keyword_obj: FitsKeyword
"""
# Check keyword_obj for a FitsKeyword attribute.
try:
caom = keyword_obj.caom_keyword
except AttributeError:
err = "HLSPFile expected a <FitsKeyword> type object"
raise TypeError(err)
# Assume the given FitsKeyword is already in self._fits_keywords.
found = False
updated = False
# Search for the given FitsKeyword in self._fits_keywords.
for kw in self._fits_keywords:
# If found, try updating the existing FitsKeyword object with
# values from the target object.
if kw.fits_keyword == keyword_obj.fits_keyword:
found = True
new_vals = keyword_obj.as_dict()
updated = kw.update(new_vals[keyword_obj.fits_keyword])
# If keyword_obj updates an existing FitsKeyword or is a new
# FitsKeyword, add it to self._fits_keywords.
if not found:
self._fits_keywords.append(keyword_obj)
def _add_xml_value_pairs(self, parent, parameters):
"""
Add a dictionary of keyword / value pairs to an lxml etree.
:param parameters: A dictionary of keyword / value pairs to be added.
:type parameters: dict
"""
# Iterate through all key / val pairs in parameters.
for key, val in parameters.items():
new_entry = etree.SubElement(parent, key)
# Format the dictionary to go in a CAOM template XML file.
value_dict = self._make_value_xml_dict(val)
# Add the formatted dictionary to the lxml etree.
for line, txt in value_dict.items():
new_line = etree.SubElement(new_entry, line)
new_line.text = txt
return parent
def _bulk_add_static_values(self, static_values):
for parent, values in static_values.items():
for key, val in values.items():
self.add_unique_parameter(key, parent, val)
@staticmethod
def _format_caller(call_file):
"""
Convert a filename from a calling function to a format that will match
a class attribute.
:param call_file: The filename of the calling function.
:type call_file: str
"""
if isinstance(call_file, str):
# If a full filepath is provided, we only want the final filename.
caller = call_file.split("/")[-1]
# Remove the file extension.
caller = caller.split(".")[0]
else:
caller = None
return caller
def _get_filename(self, prefix=None):
"""
Construct an output filename, with an optional prefix based on an
ingestion step.
:param prefix: Optional prefix to add to the filename.
:type prefix: str
"""
# Combine the HLSP name with the file extension attribute.
fname = "".join([self.hlsp_name.lower(), self._file_ext])
# Add the prefix to the filename, if provided.
if prefix:
fname = "_".join([prefix, fname])
return | |
self.AssetIpAll = params.get("AssetIpAll")
self.AssetType = params.get("AssetType")
self.PublicIpAddresses = params.get("PublicIpAddresses")
self.PrivateIpAddresses = params.get("PrivateIpAddresses")
self.SoarResponseStatus = params.get("SoarResponseStatus")
self.SoarResponseTime = params.get("SoarResponseTime")
self.SoarSuggestStatus = params.get("SoarSuggestStatus")
self.SoarPlaybookType = params.get("SoarPlaybookType")
self.SoarRunId = params.get("SoarRunId")
self.SsaEventId = params.get("SsaEventId")
self.IsNewCfwEvent = params.get("IsNewCfwEvent")
self.Direction = params.get("Direction")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAssetDetailRequest(AbstractModel):
"""DescribeAssetDetail请求参数结构体
"""
def __init__(self):
r"""
:param Params: 查询过滤参数
:type Params: str
"""
self.Params = None
def _deserialize(self, params):
self.Params = params.get("Params")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAssetDetailResponse(AbstractModel):
"""DescribeAssetDetail返回参数结构体
"""
def __init__(self):
r"""
:param Data: 资产详情
注意:此字段可能返回 null,表示取不到有效值。
:type Data: :class:`tencentcloud.ssa.v20180608.models.AssetDetail`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = AssetDetail()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DescribeAssetListRequest(AbstractModel):
"""DescribeAssetList请求参数结构体
"""
def __init__(self):
r"""
:param Params: 查询过滤参数
:type Params: str
"""
self.Params = None
def _deserialize(self, params):
self.Params = params.get("Params")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAssetListResponse(AbstractModel):
"""DescribeAssetList返回参数结构体
"""
def __init__(self):
r"""
:param AssetList: 资产列表
:type AssetList: :class:`tencentcloud.ssa.v20180608.models.AssetList`
:param AggregationData: 聚合数据
:type AggregationData: list of AggregationObj
:param NamespaceData: 命名空间数据
:type NamespaceData: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AssetList = None
self.AggregationData = None
self.NamespaceData = None
self.RequestId = None
def _deserialize(self, params):
if params.get("AssetList") is not None:
self.AssetList = AssetList()
self.AssetList._deserialize(params.get("AssetList"))
if params.get("AggregationData") is not None:
self.AggregationData = []
for item in params.get("AggregationData"):
obj = AggregationObj()
obj._deserialize(item)
self.AggregationData.append(obj)
self.NamespaceData = params.get("NamespaceData")
self.RequestId = params.get("RequestId")
class DescribeAssetsMappingListRequest(AbstractModel):
"""DescribeAssetsMappingList请求参数结构体
"""
def __init__(self):
r"""
:param Params: 请求参数
:type Params: str
"""
self.Params = None
def _deserialize(self, params):
self.Params = params.get("Params")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAssetsMappingListResponse(AbstractModel):
"""DescribeAssetsMappingList返回参数结构体
"""
def __init__(self):
r"""
:param Data: 资产测绘列表
:type Data: list of DataAssetMapping
:param TotalCount: 资产测绘总数
:type TotalCount: int
:param CountByType: 类型分类统计数量
注意:此字段可能返回 null,表示取不到有效值。
:type CountByType: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.TotalCount = None
self.CountByType = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = DataAssetMapping()
obj._deserialize(item)
self.Data.append(obj)
self.TotalCount = params.get("TotalCount")
self.CountByType = params.get("CountByType")
self.RequestId = params.get("RequestId")
class DescribeCheckConfigAssetListRequest(AbstractModel):
"""DescribeCheckConfigAssetList请求参数结构体
"""
def __init__(self):
r"""
:param Id: 检查项UUID
:type Id: str
:param Offset: 页码
:type Offset: int
:param Limit: 每页列表数
:type Limit: int
:param Search: db搜索条件
:type Search: list of Filter
:param Filter: ES过滤条件
:type Filter: list of Filter
"""
self.Id = None
self.Offset = None
self.Limit = None
self.Search = None
self.Filter = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
if params.get("Search") is not None:
self.Search = []
for item in params.get("Search"):
obj = Filter()
obj._deserialize(item)
self.Search.append(obj)
if params.get("Filter") is not None:
self.Filter = []
for item in params.get("Filter"):
obj = Filter()
obj._deserialize(item)
self.Filter.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCheckConfigAssetListResponse(AbstractModel):
"""DescribeCheckConfigAssetList返回参数结构体
"""
def __init__(self):
r"""
:param Total: 资产列表总数
:type Total: int
:param CheckAssetsList: 资产列表项
注意:此字段可能返回 null,表示取不到有效值。
:type CheckAssetsList: list of CheckAssetItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.CheckAssetsList = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("CheckAssetsList") is not None:
self.CheckAssetsList = []
for item in params.get("CheckAssetsList"):
obj = CheckAssetItem()
obj._deserialize(item)
self.CheckAssetsList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeCheckConfigDetailRequest(AbstractModel):
"""DescribeCheckConfigDetail请求参数结构体
"""
def __init__(self):
r"""
:param Id: 检查项ID
:type Id: str
"""
self.Id = None
def _deserialize(self, params):
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCheckConfigDetailResponse(AbstractModel):
"""DescribeCheckConfigDetail返回参数结构体
"""
def __init__(self):
r"""
:param CheckConfigDetail: 检查项详情
:type CheckConfigDetail: :class:`tencentcloud.ssa.v20180608.models.CheckConfigDetail`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CheckConfigDetail = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CheckConfigDetail") is not None:
self.CheckConfigDetail = CheckConfigDetail()
self.CheckConfigDetail._deserialize(params.get("CheckConfigDetail"))
self.RequestId = params.get("RequestId")
class DescribeComplianceAssetListRequest(AbstractModel):
"""DescribeComplianceAssetList请求参数结构体
"""
def __init__(self):
r"""
:param Offset: 页码
:type Offset: int
:param Limit: 每页数量
:type Limit: int
:param Id: 检查项uuid
:type Id: str
:param Filter: 过滤条件
:type Filter: list of Filter
:param Search: 查询条件
:type Search: list of Filter
"""
self.Offset = None
self.Limit = None
self.Id = None
self.Filter = None
self.Search = None
def _deserialize(self, params):
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Id = params.get("Id")
if params.get("Filter") is not None:
self.Filter = []
for item in params.get("Filter"):
obj = Filter()
obj._deserialize(item)
self.Filter.append(obj)
if params.get("Search") is not None:
self.Search = []
for item in params.get("Search"):
obj = Filter()
obj._deserialize(item)
self.Search.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeComplianceAssetListResponse(AbstractModel):
"""DescribeComplianceAssetList返回参数结构体
"""
def __init__(self):
r"""
:param CheckAssetsList: 资产组列表
:type CheckAssetsList: list of CheckAssetItem
:param Total: 资产组列表总数
:type Total: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CheckAssetsList = None
self.Total = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CheckAssetsList") is not None:
self.CheckAssetsList = []
for item in params.get("CheckAssetsList"):
obj = CheckAssetItem()
obj._deserialize(item)
self.CheckAssetsList.append(obj)
self.Total = params.get("Total")
self.RequestId = params.get("RequestId")
class DescribeComplianceDetailRequest(AbstractModel):
"""DescribeComplianceDetail请求参数结构体
"""
def __init__(self):
r"""
:param Id: 检查项uuid
:type Id: str
"""
self.Id = None
def _deserialize(self, params):
self.Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeComplianceDetailResponse(AbstractModel):
"""DescribeComplianceDetail返回参数结构体
"""
def __init__(self):
r"""
:param CheckConfigDetail: 合规管理检查项详情
:type CheckConfigDetail: :class:`tencentcloud.ssa.v20180608.models.ComplianceCheckDetail`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CheckConfigDetail = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CheckConfigDetail") is not None:
self.CheckConfigDetail = ComplianceCheckDetail()
self.CheckConfigDetail._deserialize(params.get("CheckConfigDetail"))
self.RequestId = params.get("RequestId")
class DescribeComplianceListRequest(AbstractModel):
"""DescribeComplianceList请求参数结构体
"""
def __init__(self):
r"""
:param Filter: 搜索过滤条件
:type Filter: str
"""
self.Filter = None
def _deserialize(self, params):
self.Filter = params.get("Filter")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeComplianceListResponse(AbstractModel):
"""DescribeComplianceList返回参数结构体
"""
def __init__(self):
r"""
:param Data: 检查项列表
:type Data: list of DataCompliance
:param AssetTotalNum: 总检查资产数
注意:此字段可能返回 null,表示取不到有效值。
:type AssetTotalNum: int
:param ConfigTotalNum: 总检查项
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigTotalNum: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.AssetTotalNum = None
self.ConfigTotalNum = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = DataCompliance()
obj._deserialize(item)
self.Data.append(obj)
self.AssetTotalNum = params.get("AssetTotalNum")
self.ConfigTotalNum = params.get("ConfigTotalNum")
self.RequestId = params.get("RequestId")
class DescribeConfigListRequest(AbstractModel):
"""DescribeConfigList请求参数结构体
"""
def __init__(self):
r"""
:param Filter: 搜索过滤条件
:type Filter: str
"""
self.Filter = None
def _deserialize(self, params):
self.Filter = params.get("Filter")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeConfigListResponse(AbstractModel):
"""DescribeConfigList返回参数结构体
"""
def __init__(self):
r"""
:param Data: 检查项列表
:type Data: list of DataCheck
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = DataCheck()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeEventDetailRequest(AbstractModel):
"""DescribeEventDetail请求参数结构体
"""
def __init__(self):
r"""
:param Index: 事件索引名
:type Index: str
:param Id: 事件id
:type Id: str
:param Source: 事件来源
:type Source: str
:param SubEventType: 事件子类型
:type SubEventType: int
:param Name: 事件名称
:type Name: str
"""
self.Index = None
self.Id = None
self.Source = None
self.SubEventType = None
self.Name = None
def _deserialize(self, params):
self.Index = params.get("Index")
self.Id = params.get("Id")
self.Source = params.get("Source")
self.SubEventType = params.get("SubEventType")
self.Name = params.get("Name")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeEventDetailResponse(AbstractModel):
"""DescribeEventDetail返回参数结构体
"""
def __init__(self):
| |
<gh_stars>10-100
strong_affirmatives = ["yes", "yeah", "always", "all", "any", "every", "everybody", "everywhere", "ever"]
strong_negations = ["no", "not", "never", "none" "n't", "nothing", "neither", "nobody", "nowhere"]
punctuation = ["?", "!", "..."]
interjections = ["oh", "hey", "wow", "aha", "aham", "aw", "bam", "blah", "bingo", "boo", "bravo",
"cheers", "congratulations", "congrats", "duh", "eh", "gee", "gosh", "hey", "hmm",
"huh", "hurray", "oh", "oh dear", "oh my", "oh well", "oops", "ouch", "ow", "phew",
"shh", "uh", "uh-huh", "mhm", "ugh", "well", "wow", "woah", "yeah", "yep", "yikes", "yo"]
intensifiers = ["amazingly", "astoundingly", "awful", "bare", "bloody", "crazy", "dreadfully",
"colossally", "especially", "exceptionally", "excessively", "extremely",
"extraordinarily", "fantastically", "frightfully", "fucking", "fully", "hella",
"holy", "incredibly", "insanely", "literally", "mightily", "moderately", "most",
"outrageously", "phenomenally", "precious", "quite", "radically", "rather",
"really", "remarkably", "right", "sick", "strikingly", "super", "supremely",
"surprisingly", "terribly", "terrifically", "too", "totally", "uncommonly",
"unusually", "veritable", "very", "wicked"]
# Based on wikipedia
contractions = {
"ain't": "is not",
"aren't": "are not",
"can't": "cannot",
"ve": "have",
"cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"ll": "will",
"n": "and",
"s": "is", # or has
"d": "would", # or had
"m": "am",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"let's": "let us",
"all's": "all",
"ma'am": "madam",
"b'day": "birthday",
"might've": "might have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"she'll": "she will",
"she'd": "she would",
"he'd": "he would",
"i'd": "I would",
"i'm": "I am",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"that'd": "that would", # or that had
"that's": "that is",
"there'd": "there would", # or there had
"there'd've": "there would have",
"there's": "there is",
"to've": "to have",
"wasn't": "was not",
"re": "are",
"weren't": "were not",
"what'll": " what will",
"what'll've": "what will have",
"what're": "what are",
"they're": "they are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": " who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'll": "you all",
"ya'll": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"c'mon": "come on",
"ma": "am going to"
}
slang = {
"4ward": "forward",
"brb": "be right back",
"b4": "before",
"bfn": "bye for now",
"bgd": "background",
"btw": "by the way",
"br": "best regards",
"clk": "click",
"da": "the",
"deet": "detail",
"deets": "details",
"dm": "direct message",
"f2f": "face to face",
"ftl": " for the loss",
"ftw": "for the win",
"kk" : "cool cool",
"kewl": "cool",
"rt": "retweet",
"smh": "so much hate",
"yaass": "yes",
"a$$":"ass",
"bby": "baby",
"bc": "because",
"coz": "because",
"cuz": "because",
"cause": "because",
"cmon": "come on",
"cmonn": "come on",
"dafuq": "what the fuck",
"dafuk": "what the fuck",
"dis": "this",
"diss": "this",
"ma": "my",
"dono": "do not know",
"donno": "do not know",
"dunno": "do not know",
"fb": "facebook",
"couldnt": "could not",
"n": "and",
"gtg": "got to go",
"yep": "yes",
"yw": "you are welcome",
"im": "i am",
"youre":"you are",
"hes": "he is",
"shes": "she is",
"theyre": "they are",
"af": "as fuck",
"fam": "family",
"fwd": "forward",
"ffs": "for fuck sake",
"fml": "fuck my life",
"lol": "laugh out loud",
"lel": "laugh out loud",
"lool": "laugh out loud",
"lmao": "laugh my ass off",
"lmaoo": "laugh my ass off",
"omg":"oh my god",
"oomg":"oh my god",
"omgg":"oh my god",
"omfg": "oh my fucking god",
"stfu": "shut the fuck up",
"awsome":"awesome",
"imo": "in my opinion",
"imho": "in my humble opinion",
"ily": "i love you",
"ilyy": "i love you",
"ikr": "i know right",
"ikrr": "i know right",
"idk": "i do not know",
"jk": "joking",
"lmk": "let me know",
"nsfw": "not safe for work",
"hehe": "haha",
"tmrw": "tomorrow",
"yt": "youtube",
"hahaha": "haha",
"hihi": "haha",
"pls": "please",
"ppl": "people",
"wtf": "what the fuck",
"wth": "what teh hell",
"obv": "obviously",
"nomore": "no more",
"u": "you",
"ur": "your",
"wanna": "want to",
"luv": "love",
"imma": "i am",
"&": "and",
"thanx": "thanks",
"til": "until",
"till": "until",
"thx": "thanks",
"pic": "picture",
"pics": "pictures",
"gp": "doctor",
"xmas": "christmas",
"rlly": "really",
"boi": "boy",
"boii": "boy",
"rly": "really",
"whch": "which",
"awee": "awe", # or maybe awesome is better
"sux" : "sucks",
"nd": "and",
"fav": "favourite",
"frnds": "friends",
"info": "information",
"loml": "love of my life",
"bffl": "best friend for life",
"gg": "goog game",
"xx": "love",
"xoxo": "love",
"thats": "that is",
"homie": "best friend",
"homies": "best friends"
}
implicit_emoticons = {
":)": "smiling face with open mouth",
"=)": "smiling face with open mouth",
":-)": "smiling face with open mouth",
";-)": "winking face",
"(:": "smiling face with open mouth",
"(-:": "smiling face with open mouth",
"(':": "smiling face with open mouth",
"='d": "happy face",
":d": "grinning face",
";d": "grinning face",
"xd": "grinning face",
"dx": "grinning face",
":))": "face with tears of joy",
":-))": "face with tears of joy",
"=))": "face with tears of joy",
";)": "winking face",
":x": "smiling face with open mouth with heart-shaped eyes",
"p": "face with stuck-out tongue",
":p": "face with stuck-out tongue",
";p": "face with stuck-out tongue",
":-p": "face with stuck-out tongue",
":(": "disappointed face",
":-(": "disappointed face",
";(": "disappointed face",
";;": "confused face",
"::": "confused face",
":'(": "crying face",
":((": "crying face",
":/": "sarcastic dace",
":|": "neutral face",
":3": "cute face",
"x": "love",
"xx": "love",
"xoxo": "hugs and kisses",
"xo": "hugs and kisses",
":o": "face with open mouth",
":-o": "face with open mouth",
"\m/": "metal music"
}
# Processed from https://en.wikipedia.org/wiki/List_of_emoticons
wikipedia_emoticons = {
':-)': 'smiling face with open mouth',
'8-)': 'smiling face with open mouth',
':]': 'smiling face with open mouth',
':)': 'smiling face with open mouth',
':-3': 'smiling face with open mouth',
':->': 'smiling face with open mouth',
':-}': 'smiling face with open mouth',
'(-:': 'smiling face with open mouth',
"(:": "smiling face with open mouth",
':-]': 'smiling face with open mouth',
'=]': 'smiling face with open mouth',
'=)': 'smiling face with open mouth',
':3': 'smiling face with open mouth',
':c)': 'smiling face with open mouth',
':^)': 'smiling face with open mouth',
':}': 'smiling face with open mouth',
':>': 'smiling face with open mouth',
'8)': 'smiling face with open mouth',
'=d': 'grinning face',
":d": "grinning face",
'xd': 'grinning face',
'8-d': 'grinning face',
'8d': 'grinning face',
':-d': 'grinning face',
'=3': 'grinning face',
'x-d': 'grinning face',
':-))': 'face with tears of joy',
':))': 'face with tears of joy',
'))': 'face with tears of joy',
']]': 'face with tears of joy',
'=))': 'face with tears of joy',
':<': 'disappointed face',
':(': 'disappointed face',
':@': 'disappointed face',
':-<': 'disappointed face',
'>:[': 'disappointed face',
':[': 'disappointed face',
':{': 'disappointed face',
':c': 'disappointed face',
'>:(': 'disappointed face',
':-c': 'disappointed face',
':-(': 'disappointed face',
':-||': 'disappointed face',
':-[': 'disappointed face',
":-(": "disappointed face",
";(": "disappointed face",
";;": "confused face",
"::": "confused face",
":'-(": 'crying face',
":'(": 'crying face',
":'((": 'crying face',
":((": 'crying face',
"((": 'crying face',
":'-)": 'face with tears of joy',
":')": 'face with tears of joy',
'd=': 'anguished face',
'd:<': 'anguished face',
'd8': 'anguished face',
'd;': 'anguished face',
'dx': 'anguished face',
"d-':": 'anguished face',
':-o': 'astonished face',
':o)': 'astonished face',
'8-0': 'astonished face',
':-O': 'astonished face',
':O': 'astonished face',
':-0': 'astonished face',
':o': 'astonished face',
'>:o': 'astonished face',
':x': 'kissing face',
':*': 'kissing face',
':-*': 'kissing face',
'xx': 'black heart suit',
'x': 'black heart suit',
'xoxo': 'kiss mark',
'xo': 'kiss mark',
':-,': 'winking face',
';^)': 'winking face',
';d': 'winking face',
';-]': 'winking face',
';]': 'winking face',
'*)': 'winking face',
';-)': 'winking face',
'*-)': 'winking face',
';)': 'winking face',
'x-p': 'face with stuck-out tongue',
'=p': 'face with stuck-out tongue',
'd:': 'face with stuck-out tongue',
':p': 'face with stuck-out tongue',
':b': 'face with stuck-out | |
import torch
import torch.nn as nn
import torch.nn.functional as functional
import numpy as np
class STLoss(nn.Module):
def __init__(self, ignore_index=-1, num_class=19, deque_capacity_factor=2.0, feat_channel=2048, device='cuda'):
super(STLoss, self).__init__()
if num_class == 19: # GTA5
self.BG_LABEL = [0, 1, 2, 3, 4, 8, 9, 10]
self.FG_LABEL = [5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 18]
# GTA5 标签数量比例
# self.source_weight = [6305, 617, 2227, 244, 85, 165, 23, 13, 1083, 317, 2110, 25, 6, 493, 224, 66, 14, 6, 1]
# 特征图大小(2048 161 91)(下采样8倍),根据特征图大小和标签数量比例计算出的默认容量大小
self.deque_capacities = [2328, 330, 193, 364, 87, 6949, 3159, 935, 197, 89, 14]
else: # SYNTHIA
self.BG_LABEL = [0, 1, 2, 3, 4, 8, 9]
self.FG_LABEL = [5, 6, 7, 10, 11, 12, 13, 14, 15]
# 特征图大小(2048 160 95)
# array([ 85.59606754, 472.81451195, 494.77850212, 750.52920229,
# 6.89837241, 6.88019907, 26.59592289, 1. ,
# 2.5971885 , 263.51557193, 175.39588055, 108.65349356,
# 11.99949387, 103.79542911, 38.99894613, 5.28002645])
self.deque_capacities = [218, 845, 32, 5570, 3451, 381, 3296, 1239, 168]
# 像素点数量和权重
self.source_num_pixel_BG, self.target_num_pixel_BG = [0] * len(self.BG_LABEL), [0] * len(self.BG_LABEL)
self.source_num_pixel_FG, self.target_num_pixel_FG = [0] * len(self.FG_LABEL), [0] * len(self.FG_LABEL)
self.source_weight_BG, self.target_weight_BG = [1.] * len(self.BG_LABEL), [1.] * len(self.BG_LABEL)
self.source_weight_FG, self.target_weight_FG = [1.] * len(self.FG_LABEL), [1.] * len(self.FG_LABEL)
self.ignore_index = ignore_index
self.device = device
self.num_class = num_class
self.dist_func = None # L0、L1、L2...
# 维度
self.B = None # batch size
self.F = feat_channel # 特征图的通道数
self.Hs, self.Ws, self.hs, self.ws = None, None, None, None # 源域预测图和特征图大小
self.Ht, self.Wt, self.ht, self.wt = None, None, None, None # 目标域预测图和特征图大小
self.C_bg, self.C_fg = len(self.BG_LABEL), len(self.FG_LABEL) # 背景和前景的类别数量
self.source_feat = None
self.source_label = None
self.target_feat = None
self.target_label = None
# 背景类:计算源域和目标域的类中心,执行对比学习
self.centroids_source_BG = None # 源域背景类中心
self.centroids_target_BG = None # 目标域背景类中心
self.centroids_source_BG_avg = None
self.centroids_target_BG_avg = None
# 前景类:计算源域特征向量队列和当前批次目标域特征向量,执行余弦相识度最大化
self.deque_capacities = [int(capacity * deque_capacity_factor) for capacity in self.deque_capacities] # 源域特征向量队列容量, 一共有C_fg个队列,每个队列都有自己的容量
self.remaining_capacities = self.deque_capacities.copy() # 队列的剩余容量
self.source_feat_deques = [torch.full(dtype=torch.float32, fill_value=float('Inf'), size=(self.deque_capacities[i], self.F)).to('cuda') for i in range(self.C_fg)]
self.target_feat_cur = [torch.tensor([float("Inf")])] * self.C_fg # 当前批次目标域特征向量
def dist(self, tensor, dim=None):
if isinstance(self.dist_func, int): # dist_func == 1, L1距离;ist_func == 2, L2距离.....
return torch.norm(tensor, p=self.dist_func, dim=dim) / tensor.numel() # sum(abs(tensor)**p)**(1./p) , default output is scalar
else:
return self.dist_func(tensor) # dist_func是距离函数,例如,L1loss(),MSEloss()
def feature_processing(self, feat, softmax, label, domain, argmax_dws_type='bilinear'):
self.B = softmax.size(0)
assert self.F == feat.size(1)
# 获取源域、目标域的特征图、预测结果的尺度
if domain == 'source':
self.Hs, self.Ws = softmax.size(2), softmax.size(3)
self.hs, self.ws = feat.size(2), feat.size(3)
else:
self.Ht, self.Wt = softmax.size(2), softmax.size(3)
self.ht, self.wt = feat.size(2), feat.size(3)
# 拉直特征图
h, w = feat.size(2), feat.size(3)
feat = feat.permute(0, 2, 3, 1).contiguous() # size B x h x w x F
feat = feat.view(-1, feat.size()[-1]) # size N x F (N = B x h x w)
# 预测结果argmax下采样
if argmax_dws_type == 'nearest':
peak_values, argmax = torch.max(softmax, dim=1) # size B x H x W
argmax_dws = torch.squeeze(functional.interpolate(torch.unsqueeze(argmax.float(), dim=1), size=(h, w), mode='nearest'), dim=1) # size B x h x w
else:
softmax_dws = functional.interpolate(softmax, size=(h, w), mode='bilinear', align_corners=True) # size B x C x h x w
peak_values_dws, argmax_dws = torch.max(softmax_dws, dim=1) # size B x h x w
# 标签下采样
label_dws = torch.squeeze(functional.interpolate(torch.unsqueeze(label.float(), dim=1), size=(h, w), mode='nearest'), dim=1) # size B x h x w, GT下采样到和特征图一样大
# 分离计算图
argmax_dws = argmax_dws.detach()
label_dws = label_dws.detach()
# 标签处理
if domain == 'source': # 源域只计算预测正确的标签
label_dws[label_dws != argmax_dws] = self.ignore_index # 忽略预测错误
# pass
else: # 目标域补全伪标签
label_dws_ignore_mask = torch.eq(label_dws, self.ignore_index) # 伪标签为空的mask
label_dws[label_dws_ignore_mask] = argmax_dws.to(torch.float32)[label_dws_ignore_mask] # 用预测结果补充伪标签的空缺
label_dws = label_dws.view(-1) # size N(N = B x h x w)
if domain == 'source':
self.source_feat = feat # N x F, 每一个点的特征向量,
self.source_label = label_dws # N , 源域预测正确标签
else:
self.target_feat = feat # N x F, 每一个点的特征向量
self.target_label = label_dws # N, 目标域补全伪标签
def computer_stuff(self, centroids_smoothing=-1):
# 背景类:计算源域和目标域的类中心,执行对比学习
centroid_list_source_BG, centroid_list_target_BG = [], [] # 保存每一个类的平均特征向量,列表大小C_BG(背景类的数量)
# 计算源域和目标域的背景(stuff)的中心
for i, label_i in enumerate(self.BG_LABEL):
# boolean tensor, True where features belong to class label_i
source_mask = torch.eq(self.source_label.detach(), label_i) # size N
target_mask = torch.eq(self.target_label.detach(), label_i) # size N
# select only features of class label_i
source_feat_i = self.source_feat[source_mask, :] # size Ns_i x F
target_feat_i = self.target_feat[target_mask, :] # size Nt_i x F
# 更新前景像素数量
self.source_num_pixel_BG[i] += source_feat_i.size(0)
self.target_num_pixel_BG[i] += target_feat_i.size(0)
# compute the source centroid of class label_i
if source_feat_i.size(0) > 0: # class label_i点的数量大于0
centroid = torch.mean(source_feat_i, dim=0, keepdim=True) # size 1 x F,计算平均特征向量
centroid_list_source_BG.append(centroid)
else: # class label_i不存在
centroid = torch.tensor([[float("Inf")] * self.F], dtype=torch.float).to(self.device) # size 1 x F
centroid_list_source_BG.append(centroid)
# compute the target centroid of class label_i
if target_feat_i.size(0) > 0:
centroid = torch.mean(target_feat_i, dim=0, keepdim=True) # size 1 x F
centroid_list_target_BG.append(centroid)
else:
centroid = torch.tensor([[float("Inf")] * self.F], dtype=torch.float).to(self.device) # size 1 x F
centroid_list_target_BG.append(centroid)
self.centroids_source_BG = torch.squeeze(torch.stack(centroid_list_source_BG, dim=0)) # size C_BG x 1 x F -> C_BG x F
self.centroids_target_BG = torch.squeeze(torch.stack(centroid_list_target_BG, dim=0)) # size C_BG x 1 x F -> C_BG x F
# 类别平衡系数
# oc = Nc/N , oc小于1/5的都替换为1/5
# Wc = min(N/Nc,μ)/ SUM(min(N/Nc,μ)
# 1/oc = min(N/Nc,μ)
source_N = sum(self.source_num_pixel_BG)
target_N = sum(self.target_num_pixel_BG)
source_Oc_inverse = [min(source_N / float(Nc), 10) if Nc != 0 else 10 for Nc in self.source_num_pixel_BG]
target_Oc_inverse = [min(target_N / float(Nc), 10) if Nc != 0 else 10 for Nc in self.target_num_pixel_BG]
source_Oc_inverse_sum = sum(source_Oc_inverse)
target_Oc_inverse_sum = sum(target_Oc_inverse)
for i in range(self.C_bg):
self.source_weight_BG[i] = source_Oc_inverse[i] / float(source_Oc_inverse_sum)
self.target_weight_BG[i] = target_Oc_inverse[i] / float(target_Oc_inverse_sum)
# 指数平均移动
if centroids_smoothing >= 0.:
if self.centroids_source_BG_avg is None:
self.centroids_source_BG_avg = self.centroids_source_BG # size C_BG x F
# In early steps there may be no centroids for small classes, so avoid averaging with Inf values by replacing them with values of current step
self.centroids_source_BG_avg = torch.where(self.centroids_source_BG_avg != float('inf'), self.centroids_source_BG_avg, self.centroids_source_BG)
# In some steps there may be no centroids for some classes, so avoid averaging with Inf values by replacing them with avg values
self.centroids_source_BG = torch.where(self.centroids_source_BG == float('inf'), self.centroids_source_BG_avg.detach(), self.centroids_source_BG)
# Exponential Moving Average
self.centroids_source_BG = centroids_smoothing * self.centroids_source_BG + (1 - centroids_smoothing) * self.centroids_source_BG_avg.detach()
self.centroids_source_BG_avg = self.centroids_source_BG.detach().clone()
if self.centroids_target_BG_avg is None:
self.centroids_target_BG_avg = self.centroids_target_BG # size C_BG x F
self.centroids_target_BG_avg = torch.where(self.centroids_target_BG_avg != float('inf'), self.centroids_target_BG_avg, self.centroids_target_BG)
self.centroids_target_BG = torch.where(self.centroids_target_BG == float('inf'), self.centroids_target_BG_avg.detach(), self.centroids_target_BG)
self.centroids_target_BG = centroids_smoothing * self.centroids_target_BG + (1 - centroids_smoothing) * self.centroids_target_BG_avg.detach()
self.centroids_target_BG_avg = self.centroids_target_BG.detach().clone()
def computer_things(self):
for i, label_i in enumerate(self.FG_LABEL):
source_mask = torch.eq(self.source_label.detach(), label_i) # size N,类别label_i的掩码
target_mask = torch.eq(self.target_label.detach(), label_i) # size N
# select only features of class label_i
source_feat_i = self.source_feat[source_mask, :] # size Ns_i x F
target_feat_i = self.target_feat[target_mask, :] # size Nt_i x F
# 更新背景像素数量
self.source_num_pixel_FG[i] += source_feat_i.size(0)
self.target_num_pixel_FG[i] += target_feat_i.size(0)
num_source_feat_i = source_feat_i.size(0) # 源域类别label_i的特征向量数量
if num_source_feat_i > 0:
if num_source_feat_i <= self.deque_capacities[i]:
self.source_feat_deques[i] = torch.cat((self.source_feat_deques[i][num_source_feat_i:], source_feat_i)) # 队列可以放入label_i的特征向量, 用cat方法实现FIFO操作
else:
self.source_feat_deques[i] = source_feat_i[:self.deque_capacities[i]] # 队列放不下label_i的特征向量,放入部分label_i的特征向量
self.remaining_capacities[i] = max(0, self.remaining_capacities[i] - num_source_feat_i) # 更新剩余容量
self.target_feat_cur[i] = target_feat_i
# 类别平衡系数
# Wc = min(N/Nc,μ)/ SUM(min(N/Nc,μ)
# 1/oc = min(N/Nc,μ)
source_N = sum(self.source_num_pixel_FG)
target_N = sum(self.target_num_pixel_FG)
source_Oc_inverse = [min(source_N / float(Nc), 10) if Nc != 0 else 10 for Nc in self.source_num_pixel_FG] # 防止Nc过小,以及防止Nc为0
target_Oc_inverse = [min(target_N / float(Nc), 10) if Nc != 0 else 10 for Nc in self.target_num_pixel_FG]
source_Oc_inverse_sum = sum(source_Oc_inverse)
target_Oc_inverse_sum = sum(target_Oc_inverse)
for i in range(self.C_fg):
self.source_weight_FG[i] = source_Oc_inverse[i] / float(source_Oc_inverse_sum)
self.target_weight_FG[i] = target_Oc_inverse[i] / float(target_Oc_inverse_sum)
def stuff_alignment(self, T=1):
# 源域和目标域背景类中心
centroids_source = self.centroids_source_BG # size:C_bg * F
centroids_target = self.centroids_target_BG # size:C_bg * F
# 计算有效类中心索引
seen_source_indices = [i for i in range(self.C_bg) if not torch.isnan(centroids_source[i, 0]) and not centroids_source[i, 0] == float('Inf')] # list of C_bg elems, True for seen classes, False elsewhere
seen_target_indices = [i for i in range(self.C_bg) if not torch.isnan(centroids_target[i, 0]) and not centroids_target[i, 0] == float('Inf')] # list of C_bg elems, True for seen classes, False elsewhere
# 计算源域和目标域共同的有效类中心索引
seen_source_target_indices = [i for i in seen_source_indices if i in seen_target_indices]
# 源域和目标域的背景类中心归一化
centroids_source_nor = functional.normalize(centroids_source[seen_source_indices], dim=1) # size:C_seen_s * F
centroids_target_nor = functional.normalize(centroids_target[seen_target_indices], dim=1) # size:C_seen_t * F
# 计算目标域和源域类中心相似度矩阵
sim_matrix = torch.mm(centroids_target_nor, centroids_source_nor.t()) # C_seen_t * C_seen_s
sim_matrix = sim_matrix / T
CL_loss = torch.tensor([0.]).cuda()
for i in | |
or self.options.private_members:
ret.append((membername, member, isattr))
return ret
def format_name(self) -> str:
"""Format the name of *self.object*.
This normally should be something that can be parsed by the generated
directive, but doesn't need to be (Sphinx will display it unparsed
then).
For things like functions and others this will include the return type.
"""
return self.object.format_name()
def format_args(self, **kwargs: Any) -> str:
"""
Creates the parenthesis version of the function signature. i.e. this
will be the `(int hello, int what)` portion of the header.
"""
return self.object.format_args(**kwargs)
class CModuleDocumenter(CObjectDocumenter):
"""
This auto documenter will be registered as a directive named `autocmodule`,
there may be a way to override the python `automodule`, just not sure yet...
"""
objtype = "cmodule"
directivetype = "module"
@classmethod
def can_document_member(
cls, member: Any, membername: str, isattr: bool, parent: Any
) -> bool:
"""
Modules are top levels so should never be included as a child of another
c object.
Parameters:
member (object): The member item to document. This type is specific
to the item being processed by autodoc. These instances will
only attempt to process
:class:`sphinx_c_autodoc.loader.CObjectDocumenter`.
membername (str): The name of the item to document. For example if
this is a function then this will be the name of the function,
no return types, no arguments.
isattr (bool): Is the member an attribute. This is unused for c
documenation.
parent (object): The parent item of this `member`.
Returns:
bool: True if this class can document the `member`.
"""
return False
class CTypeDocumenter(CObjectDocumenter):
"""
The documenter for the autoctype directive.
"""
objtype = "ctype"
directivetype = "type"
def __init__(
self, directive: DocumenterBridge, name: str, indent: str = ""
) -> None:
"""
Override the :attr:`directive` so that some post processing can be
performed in :meth:`generate`
"""
super().__init__(directive, name, indent)
self._original_directive = self.directive
self.directive = DocumenterBridge(
self.directive.env,
self.directive.reporter,
self.directive.genopt,
self.directive.lineno,
self.directive.state,
)
def generate(
self,
more_content: Optional[StringList] = None,
real_modname: Optional[str] = None,
check_module: bool = False,
all_members: bool = False,
) -> None:
"""
generate stuff
"""
super().generate(
more_content=more_content,
real_modname=real_modname,
check_module=check_module,
all_members=all_members,
)
self._original_directive.result.append(self.consolidate_members())
def _find_member_directives(self, name: str) -> List[Tuple[str, str, int]]:
"""
Find all directive lines which start with `` ..c:<name>::``.
Creates a sequence of:
- The short name of the item documented by the directive.
- The full signature of the item documented.
- The line number in :attr:`directive.results`.
For instnace a directive of ``..c:some_directive word1 word2 word3``
would result in ``word3`` being the short name and
``word1 word2 word3`` being the full signature.
Args:
name (str): The name of the directive(s) to search for.
Returns:
list(tuple(str, str, int)): The short name, the full signature,
and the line in :attr:`directive.results` where the
directive occured.
"""
members = []
directive_string = f".. c:{name}::"
for line_no, line in enumerate(self.directive.result):
if not line.startswith(self.indent):
continue
if line.lstrip().startswith(directive_string):
_, signature = line.split(directive_string)
# members may document array types so break on the brace
# `int member_name [some_size][maybe_2nd_dimension]`
type_and_name, *(_) = signature.strip().partition("[")
sig_parts = type_and_name.strip().split()
members.append((sig_parts[-1], signature, line_no))
return members
def _remove_directive(self, line: int) -> StringList:
"""
Remove the directive which starts at `line_no` from
:attr:`directive.results`. The locations in :attr:`directive.results`
will be replaced with empty lines so that the total line count of
:attr:`directive.results` is unaffected.
Args:
line (int): The starting line to remove the directive from.
Returns:
:class:`StringList`: The removed directive which started at `line_no`
"""
# Just need to do at least one more indentation than the actual
# directive to not end up grabbing the next directive.
directive_line = self.directive.result[line]
block_indent = (len(directive_line) - len(directive_line.lstrip())) + 1
directive, _, _ = self.directive.result.get_indented(
line, first_indent=0, block_indent=block_indent, strip_indent=False
)
directive.disconnect()
# Setting slices need viewlists/stringlists so just iterate through and
# set indices which can take strings
directive_length = len(directive)
for line_no in range(line, line + directive_length):
self.directive.result[line_no] = self.indent
return directive
@staticmethod
def _merge_directives(directives: List[StringList]) -> StringList:
"""
The last directive heading will be used to represent the heading for the entire
group of directives.
Args:
directives (list(StringList)): The list of directives to merge.
Returns:
StringList: One directive
"""
merged_heading = StringList()
merged_directive = StringList()
merged_options = StringList()
for directive in directives:
options, _, _ = directive.get_indented(
1, until_blank=True, strip_indent=False
)
if options:
merged_options.extend(options)
del directive[1 : 1 + len(options)]
directive_heading = directive[0]
del directive[0]
merged_directive.extend(directive)
merged_heading = directive_heading
merged_directive.insert(0, merged_options)
merged_directive.insert(0, merged_heading, source=merged_directive.source(0))
return merged_directive
def consolidate_members(self) -> StringList:
"""
Take any duplicate autodoc member directives and consolidate them into
one directive. The subsequent contents of duplicate directives will be
added as additional paragraphs on the first occurrence of the directive.
Returns:
StringList: The entire rst contents for this directive instance.
"""
# Grab any constructs that could be declared inside of a struct, union or enum.
members = []
for sub_type in ("member", "struct", "union", "enumerator"):
members += self._find_member_directives(sub_type)
# Group all the items by their name. This sort logic here leverages the order
# preservation that python sort has, in that napoleon documented constructs are
# always "member" however the actual c constructs will come after as "struct"
# or similar.
members.sort(key=lambda m: m[0])
data_blocks = []
for _, member_group in groupby(members, lambda m: m[0]):
start_line = len(self.directive.result)
directives = []
for _, _, line in member_group:
directives.append(self._remove_directive(line))
if line < start_line:
start_line = line
original_length = len(directives[-1])
merged_directive = self._merge_directives(directives)
data_blocks.append((start_line, original_length, merged_directive))
data_blocks.sort()
delta_length = 0
for line, original_length, directive in data_blocks:
start = line + delta_length
end = start + original_length
self.directive.result[start:end] = directive
delta_length += len(directive) - original_length
return self.directive.result
def format_name(self) -> str:
"""Format the name of *self.object*.
Sphinx doesn't like the typedef keyword being in typedef signatures so strip
them off here.
"""
raw_name = self.object.format_name()
cleaned_name = raw_name.replace("typedef ", "")
return cleaned_name
class CStructDocumenter(CTypeDocumenter):
"""
The documenter for the autocstruct directive.
"""
objtype = "cstruct"
directivetype = "struct"
def filter_members( # type: ignore[override]
self, members: List[Tuple[str, Any]], want_all: bool
) -> List[Tuple[str, Any, bool]]:
"""Filter the given member list.
For structures if they are documented then all members provided are
documented.
"""
ret = []
isattr = False
for (membername, member) in members:
ret.append((membername, member, isattr))
return ret
class CEnumDocumenter(CTypeDocumenter):
"""
The documenter for the autocenum directive.
"""
objtype = "cenum"
directivetype = "enum"
class CUnionDocumenter(CStructDocumenter):
"""
The documenter for the autocunion directive.
"""
objtype = "cunion"
directivetype = "union"
class CMemberDocumenter(CObjectDocumenter):
"""
The documenter for the autocmember directive.
This handles structure and union fields.
"""
objtype = "cmember"
directivetype = "member"
class CFunctionDocumenter(CObjectDocumenter):
"""
The documenter for the autocfunction directive.
"""
objtype = "cfunction"
directivetype = "function"
class CMacroDocumenter(CObjectDocumenter):
"""
The documenter for the autocmacro directive.
"""
objtype = "cmacro"
directivetype = "macro"
class CEnumeratorDocumenter(CObjectDocumenter):
"""
The documenter for the autocenumerator directive.
These are enumerator constants, versus the enum (type).
"""
objtype = "cenumerator"
directivetype = "enumerator"
class CDataDocumenter(CObjectDocumenter):
"""
The documenter for the autocdata directive.
"""
objtype = "cdata"
directivetype = "var"
@classmethod
def can_document_member(
cls, member: Any, membername: str, isattr: bool, parent: Any
) -> bool:
"""
Parameters:
member (object): The member item to document. This type is specific
to the item being processed by autodoc. These classes will
only attempt to process
:class:`sphinx_c_autodoc.loader.CObjectDocumenter` members.
membername (str): The name of the item to document. For example if
this is a function then this will be the name of the function,
no return types, no arguments.
isattr (bool): Is the member an attribute. This is unused for c
documenation.
parent (object): The parent item of this `member`.
Returns:
bool: True if this class can document the `member`.
"""
# Handle the mapping of c land `variable` to sphinx land `data`. The c
# domain in sphinx seems inconsistent the directive is called
# ``.. c:var::``, yet the role is ``:c:data:``.
return isinstance(parent, CObjectDocumenter) and member.type == "variable"
class CModule(CObject):
"""
Module directive for C files
"""
has_content = True
required_arguments = 1
object_type = "module"
def run(self) -> | |
par : ndarray
Kernel parameters.
args[0] : ndarray
Function observations.
Returns
-------
: float
Expected model variance.
"""
fcn_obs = np.squeeze(args[0])
if self.estimate_par:
# re-compute EMV if kernel parameters are being estimated
iK = self.kernel.exp_x_kxkx(par, par, self.points)
scale = (self.nu - 2 + fcn_obs.dot(iK).dot(fcn_obs.T)) / (self.nu - 2 + self.num_pts)
gp_emv = super(StudentTProcessModel, self).exp_model_variance(par)
else:
# otherwise use pre-computed values (based on unit sigma-points and kernel parameters given at init)
scale = (self.nu - 2 + fcn_obs.dot(self.iK).dot(fcn_obs.T)) / (self.nu - 2 + self.num_pts)
gp_emv = self.model_var
return scale * gp_emv
def integral_variance(self, par, *args):
"""
Variance of the integral.
Parameters
----------
par : ndarray
Kernel parameters.
args[0] : ndarray
Function evaluations.
Returns
-------
: float
"""
fcn_obs = np.squeeze(args[0])
par = self.kernel.get_parameters(par)
if self.estimate_par:
iK = self.kernel.eval_inv_dot(par, self.points, scaling=False)
scale = (self.nu - 2 + fcn_obs.dot(iK).dot(fcn_obs.T)) / (self.nu - 2 + self.num_pts)
gp_ivar = super(StudentTProcessModel, self).integral_var(par)
else:
scale = (self.nu - 2 + fcn_obs.dot(self.iK).dot(fcn_obs.T)) / (self.nu - 2 + self.num_pts)
gp_ivar = self.integral_var
return scale * gp_ivar
def neg_log_marginal_likelihood(self, log_par, fcn_obs, x_obs, jitter):
"""
Negative marginal log-likelihood of Student's t-process regression model.
Parameters
----------
log_par : (num_par, ) ndarray
Kernel log-parameters.
fcn_obs : (num_pts, dim_out) ndarray
Function values.
x_obs : ndarray
Function inputs.
jitter : ndarray
Regularization term for kernel matrix inversion.
Returns
-------
: float
Negative log-likelihood and gradient for given parameter.
Notes
-----
Used as an objective function by the `Model.optimize()` to find an estimate of the kernel parameters.
"""
# convert from log-par to par
par = np.exp(log_par)
num_data, num_out = fcn_obs.shape
nu = self.nu
K = self.kernel.eval(par, x_obs) + jitter # (N, N)
L = la.cho_factor(K) # jitter included from eval
a = la.cho_solve(L, fcn_obs) # (N, E)
y_dot_a = np.einsum('ij, ij -> j', fcn_obs, a) # sum of diagonal of A.T.dot(A)
# negative marginal log-likelihood
from scipy.special import gamma
half_logdet_K = np.sum(np.log(np.diag(L[0])))
const = (num_data/2) * np.log((nu-2)*np.pi) - np.log(gamma((nu+num_data)/2)) + np.log(gamma(nu/2))
log_sum = 0.5*(self.nu + num_data) * np.log(1 + y_dot_a/(self.nu - 2)).sum()
nlml = log_sum + num_out*(half_logdet_K + const)
# negative marginal log-likelihood derivatives w.r.t. hyper-parameters
dK_dTheta = self.kernel.der_par(par, x_obs) # (N, N, num_par)
# gradient
iKdK = la.cho_solve(L, dK_dTheta)
scale = (self.nu + num_data) / (self.nu + y_dot_a - 2)
a_out_a = np.einsum('j, i...j, ...jn', scale, a, a.T) # (N, N) weighted sum of outer products of columns of A
dnlml_dtheta = 0.5 * np.trace((num_out * iKdK - a_out_a.dot(dK_dTheta))) # (num_par, )
return nlml, dnlml_dtheta
class MultiOutputModel(Model):
def __init__(self, dim_in, dim_out, kern_par, kern_str, point_str, point_par=None, estimate_par=False):
super(MultiOutputModel, self).__init__(dim_in, kern_par, kern_str, point_str, point_par, estimate_par)
self.dim_out = dim_out
def bq_weights(self, par):
"""
Weights of the Bayesian quadrature with multi-output process model.
Parameters
----------
par : (dim_out, num_par) ndarray
Kernel parameters in a matrix, where e-th row contains parameters for e-th output.
Returns
-------
wm : (num_pts, dim_out) ndarray
Multi-output GP quadrature weights for the mean.
wc : (num_pts, num_pts, dim_out, dim_out) ndarray
Multi-output GP quadrature weights for the mean.
wcc : (dim_in, num_pts, dim_out) ndarray
Multi-output GP quadrature weights for the cross-covariance.
"""
# if kern_par=None return parameters stored in Kernel
par = self.kernel.get_parameters(par)
# retrieve sigma-points from Model
x = self.points
d, e, n = self.dim_in, self.dim_out, self.num_pts
# Kernel expectations
q = np.zeros((n, e))
Q = np.zeros((n, n, e, e))
R = np.zeros((d, n, e))
iK = np.zeros((n, n, e))
w_c = np.zeros((n, n, e, e))
for i in range(e):
q[:, i] = self.kernel.exp_x_kx(par[i, :], x)
R[..., i] = self.kernel.exp_x_xkx(par[i, :], x)
iK[..., i] = self.kernel.eval_inv_dot(par[i, :], x, scaling=False)
for j in range(i + 1):
Q[..., i, j] = self.kernel.exp_x_kxkx(par[i, :], par[j, :], x)
Q[..., j, i] = Q[..., i, j]
w_c[..., i, j] = iK[..., i].dot(Q[..., i, j]).dot(iK[..., j])
w_c[..., j, i] = w_c[..., i, j]
# DEBUG, la.cond(Q) is high
self.q, self.Q, self.R, self.iK = q, Q, R, iK
# weights
# w_m = q(\theta_e) * iK(\theta_e) for all e = 1, ..., dim_out
w_m = np.einsum('ne, nme -> me', q, iK)
# w_c = iK(\theta_e) * Q(\theta_e, \theta_f) * iK(\theta_f) for all e,f = 1, ..., dim_out
# NOTE: einsum gives slighly different results than dot, or I don't know how to use it
# w_c = np.einsum('nie, ijed, jmd -> nmed', iK, Q, iK)
# w_cc = R(\theta_e) * iK(\theta_e) for all e = 1, ..., dim_out
w_cc = np.einsum('die, ine -> dne', R, iK)
# covariance weights should be symmetric
w_c = 0.5 * (w_c + w_c.swapaxes(0, 1).swapaxes(2, 3))
return w_m, w_c, w_cc
def optimize(self, log_par_0, fcn_obs, x_obs, method='BFGS', **kwargs):
"""
Find optimal values of kernel parameters by minimizing negative marginal log-likelihood.
Parameters
----------
log_par_0 : ndarray
Initial guess of the kernel log-parameters.
fcn_obs : ndarray
Observed function values at the point-set locations.
x_obs : ndarray
Function inputs.
crit : str
Objective function to use as a criterion for finding optimal setting of kernel parameters. Possible
values are:
- 'nlml' : negative marginal log-likelihood,
- 'nlml+emv' : NLML with expected model variance as regularizer,
- 'nlml+ivar' : NLML with integral variance as regularizer.
method : str
Optimization method for `scipy.optimize.minimize`, default method='BFGS'.
**kwargs
Keyword arguments for the `scipy.optimize.minimize`.
Returns
-------
: scipy.optimize.OptimizeResult
Results of the optimization in a dict-like structure returned by `scipy.optimize.minimize`.
Notes
-----
The criteria using expected model variance and integral variance as regularizers ('nlml+emv', 'nlml+ivar')
are somewhat experimental. I did not operate under any sound theoretical justification when implementing
those. Just curious to see what happens, thus might be removed in the future.
See Also
--------
scipy.optimize.minimize
"""
obj_func = self.neg_log_marginal_likelihood
jitter = 1e-8 * np.eye(x_obs.shape[1])
results = list()
for d in range(self.dim_out):
r = minimize(obj_func, log_par_0[d, :], args=(fcn_obs[d, :, None], x_obs, jitter),
method=method, jac=True, **kwargs)
results.append(r)
# extract optimized parameters and arrange in 2D array
par = np.vstack([r.x for r in results])
return par, results
@abstractmethod
def predict(self, test_data, fcn_obs, par=None):
"""
Model predictions based on test points and the kernel parameters.
Notes
-----
This is an abstract method. Implementation needs to be provided by the subclass.
Parameters
----------
test_data : numpy.ndarray
Test points where to generate data.
fcn_obs : numpy.ndarray
Observed function values at the point-set locations.
par : numpy.ndarray
Kernel parameters, default `par=None`.
Returns
-------
(mean, var)
Model predictive mean and variance at the test point locations.
"""
pass
@abstractmethod
def exp_model_variance(self, fcn_obs):
"""
Expected model variance given the function observations and the kernel parameters.
Notes
-----
This is an abstract method. Implementation needs to be provided by the subclass and should be easily
accomplished using the kernel expectation method from the `Kernel` class.
Parameters
----------
fcn_obs : numpy.ndarray
Observed function values at the point-set locations.
Returns
-------
float
Expected model variance.
"""
pass
@abstractmethod
def integral_variance(self, fcn_obs, par=None):
"""
Integral variance given the function value observations and the kernel parameters.
Notes
-----
This is an abstract method. Implementation needs to be provided by the subclass and should be easily
accomplished using the kernel expectation method from the `Kernel` class.
Parameters
----------
fcn_obs : numpy.ndarray
Observed function values at the point-set locations.
par : numpy.ndarray
Kernel parameters, default `par=None`.
Returns
-------
float
Variance of the integral.
"""
pass
@abstractmethod
def neg_log_marginal_likelihood(self, log_par, fcn_obs, x_obs, jitter):
"""
Negative logarithm of marginal likelihood of the model given the kernel parameters and the function
observations.
Parameters
----------
log_par : numpy.ndarray
Logarithm of the kernel parameters.
fcn_obs : numpy.ndarray
Observed function values at the inputs supplied in `x_obs`.
x_obs : numpy.ndarray
Function inputs.
jitter : numpy.ndarray
Regularization term for kernel matrix inversion.
Returns
-------
float
Negative log marginal likelihood.
Notes
-----
Intends to be used as an objective function passed into the optimizer, thus it needs to subscribe to certain
implementation conventions.
"""
pass
class GaussianProcessMO(MultiOutputModel): # TODO: Multiple inheritance could be used here
"""
Multi-output Gaussian process regression model of the integrand in the Bayesian quadrature.
Parameters
----------
| |
# encoding:utf-8
'''
解析数据,生成训练数据
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import inspect
from collections import defaultdict
import warnings
import sklearn.utils
from copy import deepcopy
from PIL import Image
import cv2
import csv
import os
import sys
import h5py
from bs4 import BeautifulSoup
import pickle
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from data_generator.object_detection_2d_image_boxes_validation_utils import BoxFilter
class DegenerateBatchError(Exception):
'''
An exception class to be raised if a generated batch ends up being degenerate,
e.g. if a generated batch is empty.
'''
pass
class DatasetError(Exception):
'''
An exception class to be raised if a anything is wrong with the dataset,
in particular if you try to generate batches when no dataset was loaded.
'''
pass
class DataGenerator:
'''
生成样本和对应标签
每次传递数据之后可以打乱数据的顺序
Can shuffle the dataset consistently after each complete pass.
Can perform image transformations for data conversion and data augmentation,
'''
def __init__(self,
load_images_into_memory=False,
hdf5_dataset_path=None,
filenames=None,
filenames_type='text',
images_dir=None,
labels=None,
image_ids=None,
eval_neutral=None,
labels_output_format=('class_id', 'xmin', 'ymin', 'xmax', 'ymax'),
verbose=True):
'''
初始化数据生成器,可以直接从这里的构造器中直接读取数据,比如一个HDF5数据集,或者使用这里的parser methods
读取数据集
参数:
load_images_into_memory (bool, optional): 如果是True,所有的数据集都会加载到内存中(请确保你有足够的内存)
这样比一批一批加载数据到内存中要快
hdf5_dataset_path (str, optional): 包含数据集的HDF5文件的路径 ,create_hdf5_dataset()模型制作。
如果你加载了一个这样的HDF5数据集, 不需要再使用任何的parser methods,HDF5数据集已经包含了所有相关数据。
filenames (string or list, optional): 图像路径
filenames_type (string, optional): pickled文件或text文件.
images_dir (string, optional):
如果为`filenames`传递文本文件,则图像的完整路径将由`images_dir`和文本文件中的名称组成,
即这应该是包含文本文件所引用的图像的目录。
如果`filenames_type`不是'text',那么这个参数是无关紧要的。
labels (string or list, optional): 数据集标签
image_ids (string or list, optional): 数据集中图像的ID
eval_neutral (string or list, optional): 是否应该在评估期间将该对象视为中立。
labels_output_format (list, optional): 下面几项数据的顺序,期望的数据顺序是 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'.
verbose (bool, optional): 如果True,打印出耗时较长的进程
'''
self.labels_output_format = labels_output_format
self.labels_format={'class_id': labels_output_format.index('class_id'),
'xmin': labels_output_format.index('xmin'),
'ymin': labels_output_format.index('ymin'),
'xmax': labels_output_format.index('xmax'),
'ymax': labels_output_format.index('ymax')}
self.dataset_size = 0 #现在还没有加载数据
self.load_images_into_memory = load_images_into_memory
self.images = None # 除非load_images_into_memory == True,这个值才不会是None
# `self.filenames`是一个包含图像样本的所有文件名的列表(完整路径)。
# 请注意,它不包含实际的图像文件本身。
# 此列表是解析器方法的输出之一
# 如果要加载HDF5数据集,此列表将为“None”。
if not filenames is None:
if isinstance(filenames, (list, tuple)):
self.filenames = filenames
elif isinstance(filenames, str):
with open(filenames, 'rb') as f:
if filenames_type == 'pickle':
self.filenames = pickle.load(f)
elif filenames_type == 'text':
self.filenames = [os.path.join(images_dir, line.strip()) for line in f]
else:
raise ValueError("`filenames_type` can be either 'text' or 'pickle'.")
else:
raise ValueError("`filenames` must be either a Python list/tuple or a string representing a filepath (to a pickled or text file). The value you passed is neither of the two.")
self.dataset_size = len(self.filenames)
self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)
if load_images_into_memory:
self.images = []
it = self.filenames
for filename in it:
with Image.open(filename) as image:
self.images.append(np.array(image, dtype=np.uint8))
else:
self.filenames = None
# 如果ground truth是可用的,
#`self.labels` 是一个包括每张图像ground truth bounding boxes的列表
if not labels is None:
if isinstance(labels, str):
with open(labels, 'rb') as f:
self.labels = pickle.load(f)
elif isinstance(labels, (list, tuple)):
self.labels = labels
else:
raise ValueError("`labels` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.")
else:
self.labels = None
if not image_ids is None:
if isinstance(image_ids, str):
with open(image_ids, 'rb') as f:
self.image_ids = pickle.load(f)
elif isinstance(image_ids, (list, tuple)):
self.image_ids = image_ids
else:
raise ValueError("`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.")
else:
self.image_ids = None
if not eval_neutral is None:
if isinstance(eval_neutral, str):
with open(eval_neutral, 'rb') as f:
self.eval_neutral = pickle.load(f)
elif isinstance(eval_neutral, (list, tuple)):
self.eval_neutral = eval_neutral
else:
raise ValueError("`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.")
else:
self.eval_neutral = None
if not hdf5_dataset_path is None:
self.hdf5_dataset_path = hdf5_dataset_path
self.load_hdf5_dataset()
else:
self.hdf5_dataset = None
def load_hdf5_dataset(self, ):
'''
加载create_hdf5_dataset()制作的HDF5数据集
Returns:
None.
'''
self.hdf5_dataset = h5py.File(self.hdf5_dataset_path, 'r')
self.dataset_size = len(self.hdf5_dataset['images'])
self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset or images in memory, we will shuffle this index list.
if self.load_images_into_memory:
self.images = []
tr = range(self.dataset_size)
for i in tr:
self.images.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))
if self.hdf5_dataset.attrs['has_labels']:
self.labels = []
labels = self.hdf5_dataset['labels']
label_shapes = self.hdf5_dataset['label_shapes']
tr = range(self.dataset_size)
for i in tr:
self.labels.append(labels[i].reshape(label_shapes[i]))
if self.hdf5_dataset.attrs['has_image_ids']:
self.image_ids = []
image_ids = self.hdf5_dataset['image_ids']
tr = range(self.dataset_size)
for i in tr:
self.image_ids.append(image_ids[i])
if self.hdf5_dataset.attrs['has_eval_neutral']:
self.eval_neutral = []
eval_neutral = self.hdf5_dataset['eval_neutral']
tr = range(self.dataset_size)
for i in tr:
self.eval_neutral.append(eval_neutral[i])
def parse_xml(self,
images_dirs,
image_set_filenames,
annotations_dirs=[],
classes=['background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor'],
include_classes = 'all',
exclude_truncated=False,
exclude_difficult=False,
ret=False,
verbose=True):
'''
这是Pascal VOC数据集的XML解析器, 对代码进行微小更改后可能适用于其它数据集。
但在当前代码,适用于Pascal VOC数据集的数据格式和XML标记。
参数:
images_dirs (list):Pascal VOC 2007和2012的图像路径
image_set_filenames (list):文本路径,文本包含训练集与测试集图像ID
annotations_dirs (list, optional):xml文件名称为对应的图像ID,文件包含每张图像的标签,包括目标类别以及坐标
classes (list, optional): 目标类别,列表第一项为背景类,类别的顺序确定了类别的ID
include_classes (list, optional): 'all' 或者是训练集中包括的类别ID的列表.
If 'all', all ground truth boxes will be included in the dataset.
exclude_truncated (bool, optional): 如果是 `True`, 不包括标记为只包含物体一部分的框.
exclude_difficult (bool, optional): 如果是 `True`, 不包括标记为难以判断的框.
ret (bool, optional): 是否返回parser的输出。
verbose (bool, optional): 如果是`True`, 打印出可能需要较长时间操作的进度.
Returns:
默认没有返回值,可选images, image filenames, labels, image IDs,以及标记为"difficult"的boxes的列表
'''
# Set class members.
self.images_dirs = images_dirs
self.annotations_dirs = annotations_dirs
self.image_set_filenames = image_set_filenames
self.classes = classes
self.include_classes = include_classes
self.filenames = []
self.image_ids = []
self.labels = []
self.eval_neutral = []
if not annotations_dirs:
self.labels = None
self.eval_neutral = None
annotations_dirs = [None] * len(images_dirs)
for images_dir, image_set_filename, annotations_dir in zip(images_dirs, image_set_filenames, annotations_dirs):
# 遍历文件得到训练集或测试集所有图像ID
with open(image_set_filename) as f:
image_ids = [line.strip() for line in f] # Note: 这些是字符串,不是整数
self.image_ids += image_ids
# 遍历数据集中所有图像
for image_id in image_ids:
filename = '{}'.format(image_id) + '.jpg' #图像名称
self.filenames.append(os.path.join(images_dir, filename)) #图像路径
# 解析标签文件
if not annotations_dir is None:
#打开当前图像的标签文件并解析
with open(os.path.join(annotations_dir, image_id + '.xml')) as f:
soup = BeautifulSoup(f, 'xml')
folder = soup.folder.text
boxes = []
eval_neutr = []
objects = soup.find_all('object') # 得到图像包含的目标
# 解析每一个目标的数据
for obj in objects:
#类别
class_name = obj.find('name', recursive=False).text
#类别ID
class_id = self.classes.index(class_name)
# 检查我们是否计划训练此类别,若不训练,解析下一个目标
if (not self.include_classes == 'all') and (not class_id in self.include_classes): continue
#pose = obj.find('pose', recursive=False).text
truncated = int(obj.find('truncated', recursive=False).text)
if exclude_truncated and (truncated == 1): continue
difficult = int(obj.find('difficult', recursive=False).text)
if exclude_difficult and (difficult == 1): continue
# 得到bounding box的坐标.
bndbox = obj.find('bndbox', recursive=False)
xmin = int(bndbox.xmin.text)
ymin = int(bndbox.ymin.text)
xmax = int(bndbox.xmax.text)
ymax = int(bndbox.ymax.text)
item_dict = {'folder': folder,
'image_name': filename,
'image_id': image_id,
'class_name': class_name,
'class_id': class_id,
#'pose': pose,
#'truncated': truncated,
#'difficult': difficult,
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax}
box = []
for item in self.labels_output_format:
box.append(item_dict[item])
boxes.append(box)
if difficult: eval_neutr.append(True)
else: eval_neutr.append(False)
self.labels.append(boxes)
self.eval_neutral.append(eval_neutr)
self.dataset_size = len(self.filenames)
self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)
if self.load_images_into_memory:
self.images = []
it = self.filenames
for filename in it:
with Image.open(filename) as image:
self.images.append(np.array(image, dtype=np.uint8))
if ret:
return self.images, self.filenames, self.labels, self.image_ids, self.eval_neutral
def parse_csv(self,
images_dir,
labels_filename,
input_format,
include_classes='all',
random_sample=False,
ret=False,
verbose=True):
'''
Arguments:
images_dir (str): 图像目录
labels_filename (str): csv路径,文件每一行为image file name, class ID, xmin, xmax, ymin, ymax.
input_format (list): 输入标签的顺序'image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id',
include_classes (list, optional):'all' 或者数据集中包含类别的ID
random_sample (float, optional): 随机采样,False生成器默认使用全部数据集,或者[0,1],随即采样数据集中的一部分
ret (bool, optional): 是否返回parser的输出。
verbose (bool, optional): 如果是`True`, 打印出可能需要较长时间操作的进度.
Returns:
默认返回空, 可选 images, image filenames, labels, and image IDs.
'''
self.images_dir = images_dir
self.labels_filename = labels_filename
self.input_format = input_format
self.include_classes = include_classes
if self.labels_filename is None or self.input_format is None:
raise ValueError("`labels_filename` and/or `input_format` have not been set yet. You need to pass them as arguments.")
# 清楚之前可能解析的数据
self.filenames = []
self.image_ids = []
self.labels = []
# 1.读取csv每行并且排序
data = []
with open(self.labels_filename, 'r') as csvfile:
#with open(self.labels_filename, newline='') as csvfile:
csvread = csv.reader(csvfile, delimiter=',')
#next(csvread) # Skip the header row.
for row in csvread: # 每行一个bbox
if self.include_classes == 'all' or int(row[self.input_format.index('class_id')].strip()) in self.include_classes: # If the class_id is among the classes that are to be included in the dataset...
box = [] # 类别和坐标
box.append(row[self.input_format.index('image_name')].strip()) # Select the image name column in the input format and append its content to `box`
for element in self.labels_output_format: #('class_id', 'xmin', 'ymin', 'xmax', 'ymax')
| |
from sklearn.datasets import fetch_mldata
from sklearn.datasets import fetch_openml
import pickle
import numpy as np
from PIL import Image
import os
import sys
import h5py
import time
import json
import threading as th
from params import path_cifar10, path_cifar100, path_fmow_rgb, path_catch_files, fmow_category_names, fmow_image_size, \
str_mnist, str_fashion_mnist, str_cifar10, str_cifar100, str_fmow
Image.MAX_IMAGE_PIXELS = None
# Mnist class envelope mnist dataset
class Mnist:
def __init__(self):
self.data = None
self.labels = None
self.str_name = str_mnist
def __str__(self):
return self.str_name
# load function saves loaded data and labels into Mnist object
def load(self):
# mnist = fetch_mldata("MNIST original")
mnist = fetch_openml("mnist_784", version=1)
print("MNIST dataset loaded!")
self.data = mnist.data.astype("uint8")
self.labels = mnist.target.astype("int32")
# np.array(list(map(int, fashion_mnist.target)))
# get function returns data and labels saved in Mnist object
def get(self):
return self.data, self.labels
# FashionMnist class envelope fashion-mnist dataset
class FashionMnist:
def __init__(self):
self.data = None
self.labels = None
self.str_name = str_fashion_mnist
def __str__(self):
return self.str_name
# load function saves loaded data and labels into FashionMnist object
def load(self):
# fashion_mnist = fetch_mldata("Fashion-MNIST")
fashion_mnist = fetch_openml("Fashion-MNIST")
print("Fashion-MNIST dataset loaded!")
self.data = fashion_mnist.data.astype("uint8")
self.labels = np.array(list(map(int, fashion_mnist.target)))
# get function returns data and labels saved in FashionMnist object
def get(self):
return self.data, self.labels
# Cifar10 class envelope cifar-10 dataset
class Cifar10:
def __init__(self):
self.data = None
self.labels = None
self.path = path_cifar10
self.str_name = str_cifar10
def __str__(self):
return self.str_name
# __load_file function load file from folder_path
def __load_file(self, folder_path):
with open(folder_path, mode="rb") as file:
batch = pickle.load(file, encoding="latin1")
file_data = batch["data"]
file_labels = batch["labels"]
return file_data, file_labels
# load function saves loaded data and labels into Cifar10 object
def load(self):
data, labels = self.__load_file(self.path + "/data_batch_" + str(1))
print("CIFAR-10 batch number 1/5 loaded!")
for i in range(2, 6):
temp_data, temp_labels = self.__load_file(self.path + "/data_batch_" + str(i))
data = np.concatenate((data, temp_data), axis=0)
labels = np.concatenate((labels, temp_labels), axis=0)
print("CIFAR-10 batch number " + str(i) + "/5 loaded!")
test_data, test_labels = self.__load_file(self.path + "/test_batch")
data = np.concatenate((data, test_data), axis=0)
labels = np.concatenate((labels, test_labels), axis=0)
print("CIFAR-10 test batch loaded!")
print("CIFAR-10 dataset loaded!")
self.data = data
self.labels = labels
# get function returns data and labels saved in Cifar100 object
def get(self):
return self.data, self.labels
# Cifar100 class envelope cifar-100 dataset
class Cifar100:
def __init__(self):
self.data = None
self.labels = None
self.path = path_cifar100
self.str_name = str_cifar100
def __str__(self):
return self.str_name
# __load_file function load file from folder_path
def __load_file(self, folder_path):
with open(folder_path, mode="rb") as file:
batch = pickle.load(file, encoding="latin1")
file_data = batch["data"]
file_labels = batch["fine_labels"]
return file_data, file_labels
# load function saves loaded data and labels into Cifar100 object
def load(self):
data, labels = self.__load_file(self.path + "/train")
print("CIFAR-100 train batch loaded!")
test_data, test_labels = self.__load_file(self.path + "/test")
data = np.concatenate((data, test_data), axis=0)
labels = np.concatenate((labels, test_labels), axis=0)
print("CIFAR-100 test batch loaded!")
print("CIFAR-100 dataset loaded!")
self.data = data
self.labels = labels
# get function returns data and labels saved in Cifar100 object
def get(self):
return self.data, self.labels
# Fmow class envelope fmow dataset
class Fmow:
def __init__(self, str_files_group="train_rgb"):
self.data = None
self.labels = None
self.path = path_fmow_rgb
self.path_catch_files = path_catch_files
self.category_names = fmow_category_names
self.image_size = fmow_image_size
self.file_name = "fmow-img_size=" + str(self.image_size)
self.thread_couple = []
self.str_name = str_fmow
self.files_group = str_files_group
def __str__(self):
return self.str_name + ""
# __scan_files function scan fmow dataset files structure
def __scan_files(self, root_file):
print("Scanning fmow dataset files structure")
category_index = 0
rgb_files = []
msrgb_files = []
while category_index < len(self.category_names):
cat = self.category_names[category_index]
path = self.path + "/" + root_file + "/" + cat
dir_index = 0
dir_found = 0
number_of_dirs = len(next(os.walk(path))[1])
while dir_found < number_of_dirs:
if os.path.isdir(path + "/" + cat + "_" + str(dir_index)):
# print(str(category_index) + ": " + path + "/" + cat + "_" + str(dir_index))
dir_found += 1
file_index = 0
file_found = 0
number_of_files = len(next(os.walk(path + "/" + cat + "_" + str(dir_index)))[2])
for l in os.listdir(path + "/" + cat + "_" + str(dir_index)):
if "jpg." in l or "json." in l or ".jpg_tmp" in l or ".json_tmp" in l:
file_found += 1
while file_found < number_of_files:
meta_path = path + "/" + cat + "_" + str(dir_index) + "/" + cat + "" + "_" + str(
dir_index) + "_" + str(
file_index)
if os.path.isfile(meta_path + "_rgb" + ".jpg"):
rgb_files.append(meta_path + "_rgb")
file_found += 2
if os.path.isfile(meta_path + "_msrgb" + ".jpg"):
msrgb_files.append(meta_path + "_msrgb")
file_found += 2
file_index += 1
dir_index += 1
category_index += 1
return rgb_files, msrgb_files
# __load_file_list function open fmow dataset files structure file if it exist, or creates it
def __load_file_list(self):
if os.path.exists(self.path_catch_files + "fmow_file_list.hdf5"):
print("Load file fmow_file_list.hdf5 exist!")
hdf5_store = h5py.File(self.path_catch_files + "fmow_file_list.hdf5", "r")
files = hdf5_store[self.files_group][:].astype(str)
hdf5_store.close()
return files
else:
print("Creating file fmow_file_list.hdf5")
val_rgb_files, val_msrgb_files = self.__scan_files("val")
train_rgb_files, train_msrgb_files = self.__scan_files("train")
hdf5_store = h5py.File(self.path_catch_files + "fmow_file_list.hdf5", "a")
hdf5_store.create_dataset("val_rgb", data=np.array(val_rgb_files, dtype="S"))
hdf5_store.create_dataset("val_msrgb", data=np.array(val_msrgb_files, dtype="S"))
hdf5_store.create_dataset("train_rgb", data=np.array(train_rgb_files, dtype="S"))
hdf5_store.create_dataset("train_msrgb", data=np.array(train_msrgb_files, dtype="S"))
hdf5_store.close()
if self.files_group == "train_rgb":
return train_rgb_files
elif self.files_group == "train_msrgb":
return train_msrgb_files
elif self.files_group == "val_rgb":
return val_rgb_files
elif self.files_group == "val_msrgb":
return val_msrgb_files
return
# __transform_thread function load and returns img and label from file_path
def __transform_thread(self, file_path):
img = Image.open(
file_path + ".jpg")
im_resized = img.resize((self.image_size, self.image_size), Image.ANTIALIAS)
img_pixels = self.image_size * self.image_size
rgb_arr = np.reshape(np.reshape(np.array(im_resized), (img_pixels, 3)), (img_pixels * 3), order="F")
with open(file_path + ".json") as json_file:
json_temp = json.load(json_file)
self.thread_couple.append([rgb_arr, self.category_names.index(json_temp["bounding_boxes"][0]["category"])])
# load function creates img data and labels memory maps
# and fill them with data from file list through paralel threads using __transform_thread function
def load(self):
files = self.__load_file_list()
num_of_files = len(files)
if not os.path.exists(self.path_catch_files + self.file_name + "-data(" + self.files_group + ").mymemmap"):
data = np.memmap(self.path_catch_files + self.file_name + "-data(" + self.files_group + ").mymemmap",
dtype="uint8", mode="w+",
shape=(num_of_files, self.image_size * self.image_size * 3))
labels = np.memmap(self.path_catch_files + self.file_name + "-labels(" + self.files_group + ").mymemmap",
dtype="int32", mode="w+",
shape=(num_of_files))
else:
data = np.memmap(self.path_catch_files + self.file_name + "-data(" + self.files_group + ").mymemmap",
dtype="uint8", mode="r+",
shape=(num_of_files, self.image_size * self.image_size * 3))
labels = np.memmap(self.path_catch_files + self.file_name + "-labels(" + self.files_group + ").mymemmap",
dtype="int32", mode="r+",
shape=(num_of_files))
downloaded_files = 0
num_threads = th.active_count()
while downloaded_files < num_of_files:
if downloaded_files + num_threads > num_of_files:
num_threads = num_of_files - downloaded_files
threads = []
self.thread_couple = []
for i in range(num_threads):
t = th.Thread(target=self.__transform_thread, args=(files[downloaded_files + i],))
threads.append(t)
t.start()
for t in threads:
t.join()
del threads
for i in range(len(self.thread_couple)):
data[downloaded_files + i] = self.thread_couple[i][0]
labels[downloaded_files + i] = self.thread_couple[i][1]
del self.thread_couple
print(str(downloaded_files + num_threads) + "/" + str(num_of_files))
downloaded_files += num_threads
return
# get function return img data and labels memory maps if they exist, or return None, None
def get(self):
if os.path.exists(self.path_catch_files + self.file_name + "-data(" + self.files_group + ").mymemmap"):
num_of_files = len(self.__load_file_list())
data = np.memmap(self.path_catch_files + self.file_name + "-data(" + self.files_group + ").mymemmap",
dtype="uint8", mode="r",
shape=(num_of_files, self.image_size * self.image_size * 3))
labels = np.memmap(self.path_catch_files + self.file_name + "-labels(" + self.files_group + ").mymemmap",
dtype="int32", mode="r",
shape=(num_of_files))
return data, labels
else:
print(
"Fmow mymemmap file with name: " + self.file_name + "-data(" + self.files_group + ").mymemmap" + " dont exist!")
return None, None
# get function return normalized img data and labels memory maps if they exist, or return None, None
def get_normalized(self):
if os.path.exists(
self.path_catch_files + self.file_name + "-data_normalized(" + self.files_group + ").mymemmap"):
num_of_files = len(self.__load_file_list())
category_len = len(fmow_category_names)
data = np.memmap(
self.path_catch_files + self.file_name + "-data_normalized(" + self.files_group + ").mymemmap",
dtype="float32", mode="r",
shape=(num_of_files, self.image_size, self.image_size, 3))
labels = np.memmap(
self.path_catch_files + self.file_name + "-labels_normalized(" + self.files_group + ").mymemmap",
dtype="float32", mode="r",
shape=(num_of_files, category_len))
return data, labels
else:
print(
"Fmow mymemmap file with name: " + self.file_name + "-data_normalized(" + self.files_group + ").mymemmap" + " dont exist!")
return None, None
# __normalized_thread function reshape img data and labels memory maps into normalized form
def __normalized_thread(self, data_normalized, data, labels_normalized, labels, i):
data_normalized[i] = np.reshape(data[i] / 255, (self.image_size, self.image_size, 3), order="F")
labels_normalized[i][labels[i]] = 1
# load_normalize creates normalized img data and labels memory maps
# and fill them with reshaped data from non-normalized memory maps
# through paralel threads using __normalized_thread function
def load_normalize(self):
files = self.__load_file_list()
num_of_files = len(files)
| |
"""
Module for managing configuration data from `config.json`
"""
import copy
import json
import numbers
import os
from typing import Any, Dict, Iterable, List, Optional, Union
ConfigDict = Dict[str, Union[str, int, float]]
class SHConfig: # pylint: disable=too-many-instance-attributes
"""A sentinelhub-py package configuration class.
The class reads during its first initialization the configurable settings from ``./config.json`` file:
- `instance_id`: An instance ID for Sentinel Hub service used for OGC requests.
- `sh_client_id`: User's OAuth client ID for Sentinel Hub service
- `sh_client_secret`: User's OAuth client secret for Sentinel Hub service
- `sh_base_url`: There exist multiple deployed instances of Sentinel Hub service, this parameter defines the
location of a specific service instance.
- `sh_auth_base_url`: Base url for Sentinel Hub Authentication service. Authentication is typically sent to the
main service deployment even if `sh_base_url` points to another deployment.
- `geopedia_wms_url`: Base url for Geopedia WMS services.
- `geopedia_rest_url`: Base url for Geopedia REST services.
- `aws_access_key_id`: Access key for AWS Requester Pays buckets.
- `aws_secret_access_key`: Secret access key for AWS Requester Pays buckets.
- `aws_session_token`: A session token for your AWS account. It is only needed when you are using temporary
credentials.
- `aws_metadata_url`: Base url for publicly available metadata files
- `aws_s3_l1c_bucket`: Name of Sentinel-2 L1C bucket at AWS s3 service.
- `aws_s3_l2a_bucket`: Name of Sentinel-2 L2A bucket at AWS s3 service.
- `opensearch_url`: Base url for Sentinelhub Opensearch service.
- `max_wfs_records_per_query`: Maximum number of records returned for each WFS query.
- `max_opensearch_records_per_query`: Maximum number of records returned for each Opensearch query.
- `max_download_attempts`: Maximum number of download attempts from a single URL until an error will be raised.
- `download_sleep_time`: Number of seconds to sleep between the first failed attempt and the next. Every next
attempt this number exponentially increases with factor `3`.
- `download_timeout_seconds`: Maximum number of seconds before download attempt is canceled.
- `number_of_download_processes`: Number of download processes, used to calculate rate-limit sleep time.
Usage in the code:
* ``SHConfig().sh_base_url``
* ``SHConfig().instance_id``
"""
CREDENTIALS = {
"instance_id",
"sh_client_id",
"sh_client_secret",
"aws_access_key_id",
"aws_secret_access_key",
"aws_session_token",
}
CONFIG_PARAMS = [
"instance_id",
"sh_client_id",
"sh_client_secret",
"sh_base_url",
"sh_auth_base_url",
"geopedia_wms_url",
"geopedia_rest_url",
"aws_access_key_id",
"aws_secret_access_key",
"aws_session_token",
"aws_metadata_url",
"aws_s3_l1c_bucket",
"aws_s3_l2a_bucket",
"opensearch_url",
"max_wfs_records_per_query",
"max_opensearch_records_per_query",
"max_download_attempts",
"download_sleep_time",
"download_timeout_seconds",
"number_of_download_processes",
]
_cache: Optional[Dict[str, Any]] = None
def __init__(self, hide_credentials: bool = False, use_defaults: bool = False):
"""
:param hide_credentials: If `True` then methods that provide the entire content of the config object will mask
out all credentials. But credentials could still be accessed directly from config object attributes. The
default is `False`.
:param use_defaults: Does not load the configuration file, returns config object with defaults only.
"""
self.instance_id: str = ""
self.sh_client_id: str = ""
self.sh_client_secret: str = ""
self.sh_base_url: str = "https://services.sentinel-hub.com"
self.sh_auth_base_url: str = "https://services.sentinel-hub.com"
self.geopedia_wms_url: str = "https://service.geopedia.world"
self.geopedia_rest_url: str = "https://www.geopedia.world/rest"
self.aws_access_key_id: str = ""
self.aws_secret_access_key: str = ""
self.aws_session_token: str = ""
self.aws_metadata_url: str = "https://roda.sentinel-hub.com"
self.aws_s3_l1c_bucket: str = "sentinel-s2-l1c"
self.aws_s3_l2a_bucket: str = "sentinel-s2-l2a"
self.opensearch_url: str = "http://opensearch.sentinel-hub.com/resto/api/collections/Sentinel2"
self.max_wfs_records_per_query: int = 100
self.max_opensearch_records_per_query: int = 500 # pylint: disable=invalid-name
self.max_download_attempts: int = 4
self.download_sleep_time: float = 5.0
self.download_timeout_seconds: float = 120.0
self.number_of_download_processes: int = 1
self._hide_credentials = hide_credentials
if not use_defaults:
for param, value in self._global_cache.items():
setattr(self, param, value)
def _validate_values(self) -> None:
"""Ensures that the values are aligned with expectations."""
default = SHConfig(use_defaults=True)
for param in self.CONFIG_PARAMS:
value = getattr(self, param)
default_value = getattr(default, param)
param_type = type(default_value)
if isinstance(value, str) and value.startswith("http"):
value = value.rstrip("/")
if (param_type is float) and isinstance(value, numbers.Number):
continue
if not isinstance(value, param_type):
raise ValueError(f"Value of parameter '{param}' must be of type {param_type.__name__}")
if self.max_wfs_records_per_query > 100:
raise ValueError("Value of config parameter 'max_wfs_records_per_query' must be at most 100")
if self.max_opensearch_records_per_query > 500:
raise ValueError("Value of config parameter 'max_opensearch_records_per_query' must be at most 500")
def __getitem__(self, name: str) -> Union[str, int, float]:
"""Config parameters can also be accessed as items."""
if name in self.CONFIG_PARAMS:
return getattr(self, name)
raise KeyError(f"'{name}' is not a supported config parameter")
def __str__(self) -> str:
"""Content of SHConfig in json schema. If `hide_credentials` is set to `True` then credentials will be
masked.
"""
return json.dumps(self.get_config_dict(), indent=2)
def __repr__(self) -> str:
"""Representation of SHConfig parameters. If `hide_credentials` is set to `True` then credentials will be
masked.
"""
repr_list = [f"{self.__class__.__name__}("]
for key, value in self.get_config_dict().items():
repr_list.append(f"{key}={repr(value)},")
return "\n ".join(repr_list).strip(",") + "\n)"
@property
def _global_cache(self) -> Dict[str, Any]:
"""Uses a class attribute to store a global instance of a class with config parameters."""
if SHConfig._cache is None:
loaded_instance = SHConfig.load(self.get_config_location())
SHConfig._cache = {param: getattr(loaded_instance, param) for param in SHConfig.CONFIG_PARAMS}
return SHConfig._cache
@classmethod
def load(cls, filename: str) -> "SHConfig":
"""Method that loads configuration parameters from a file. Does not affect global settings.
:param filename: Path to file from which to read configuration.
"""
with open(filename, "r") as cfg_file:
config_dict = json.load(cfg_file)
config = cls(use_defaults=True)
for param, value in config_dict.items():
if param in cls.CONFIG_PARAMS:
setattr(config, param, value)
config._validate_values()
return config
def save(self, filename: Optional[str] = None) -> None:
"""Method that saves configuration parameter changes from instance of SHConfig class to global config class and
to `config.json` file.
:param filename: Optional name of file to which to save configuration. If not specified saves to global default.
:Example:
``my_config = SHConfig()`` \n
``my_config.instance_id = '<new instance id>'`` \n
``my_config.save()``
"""
self._validate_values()
is_changed = False
for param in self.CONFIG_PARAMS:
if getattr(self, param) != self._global_cache[param]:
is_changed = True
self._global_cache[param] = getattr(self, param) # pylint: disable=unsupported-assignment-operation
if is_changed:
config_dict = {param: getattr(self, param) for param in self.CONFIG_PARAMS}
with open(filename or self.get_config_location(), "w") as cfg_file:
json.dump(config_dict, cfg_file, indent=2)
def copy(self) -> "SHConfig":
"""Makes a copy of an instance of `SHConfig`"""
return copy.copy(self)
def reset(self, params: Union[str, Iterable[str], object] = ...) -> None:
"""Resets configuration class to initial values. Use `SHConfig.save()` method in order to save this change.
:param params: Parameters which will be reset. Parameters can be specified with a list of names, e.g.
``['instance_id', 'aws_access_key_id', 'aws_secret_access_key']``, or as a single name, e.g.
``'sh_base_url'``. By default, all parameters will be reset and default value is ``Ellipsis``.
"""
default = SHConfig(use_defaults=True)
if params is ...:
params = self.get_params()
if isinstance(params, str):
self._reset_param(params, default)
elif isinstance(params, Iterable):
for param in params:
self._reset_param(param, default)
else:
raise ValueError(
f"Parameters must be specified in form of a list of strings or as a single string, instead got {params}"
)
def _reset_param(self, param: str, default: "SHConfig") -> None:
"""Resets a single parameter
:param param: A configuration parameter
"""
if param not in self.get_params():
raise ValueError(f"Cannot reset unknown parameter '{param}'")
setattr(self, param, getattr(default, param))
def get_params(self) -> List[str]:
"""Returns a list of parameter names
:return: List of parameter names
"""
return list(self.CONFIG_PARAMS)
def get_config_dict(self) -> ConfigDict:
"""Get a dictionary representation of `SHConfig` class. If `hide_credentials` is set to `True` then
credentials will be masked.
:return: A dictionary with configuration parameters
"""
config_params = {param: getattr(self, param) for param in self.CONFIG_PARAMS}
if self._hide_credentials:
config_params = {param: self._mask_credentials(param, value) for param, value in config_params.items()}
return config_params
@classmethod
def get_config_location(cls) -> str:
"""Returns location of configuration file on disk
:return: File path of `config.json` file
"""
config_file = os.path.join(os.path.dirname(__file__), "config.json")
if not os.path.isfile(config_file):
with open(config_file, "w") as cfg_file:
default_dict = cls(use_defaults=True).get_config_dict()
json.dump(default_dict, cfg_file, indent=2)
return config_file
def _mask_credentials(self, param: str, value: object) -> object:
"""In case a parameter that holds credentials is given it will mask its value"""
if not (param in self.CREDENTIALS and value):
return value
if not isinstance(value, str):
raise ValueError(f"Parameter '{param}' should be a string but {value} found")
hide_size = min(max(len(value) - 4, 10), len(value))
return "*" * hide_size + value[hide_size:]
def get_sh_oauth_url(self) -> str:
"""Provides URL for Sentinel Hub authentication endpoint
:return: A URL endpoint
"""
return f"{self.sh_auth_base_url}/oauth/token"
def get_sh_process_api_url(self) -> str:
"""Provides URL for Sentinel Hub Process API endpoint
:return: A URL endpoint
"""
return f"{self.sh_base_url}/api/v1/process"
def get_sh_ogc_url(self) -> str:
"""Provides URL for Sentinel Hub OGC endpoint
:return: A URL endpoint
"""
return f"{self.sh_base_url}/ogc"
def get_sh_rate_limit_url(self) -> str:
"""Provides URL for Sentinel Hub rate | |
{
}
},
'srcPortDist': {
'min': {
},
'max': {
},
'type': {
}
},
'rampUpProfile': {
'min': {
},
'max': {
},
'increment': {
},
'interval': {
},
'type': {
}
},
'delayStart': {
},
'rampDist': {
'upBehavior': {
},
'down': {
},
'steadyBehavior': {
},
'downBehavior': {
},
'up': {
},
'synRetryMode': {
},
'steady': {
}
}
},
'@type:security_all': {
'maxConcurrAttacks': {
},
'attackRetries': {
},
'maxPacketsPerSecond': {
},
'attackPlan': {
},
'randomSeed': {
},
'delayStart': {
},
'attackProfile': {
},
'attackPlanIterations': {
},
'attackPlanIterationDelay': {
},
'maxAttacksPerSecond': {
}
},
'@type:security_np': {
'attackRetries': {
},
'sessions': {
'max': {
},
'maxPerSecond': {
}
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'attackPlan': {
},
'randomSeed': {
},
'delayStart': {
},
'attackProfile': {
},
'attackPlanIterations': {
},
'attackPlanIterationDelay': {
}
},
'@type:layer3': {
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'rate': {
},
'increment': {
},
'type': {
},
'ramptype': {
}
},
'bidirectional': {
},
'randomizeIP': {
},
'enableTCP': {
},
'slowStart': {
},
'Templates': {
'TemplateType': {
}
},
'srcPort': {
},
'slowStartFps': {
},
'duration': {
'disable_nd_probes': {
},
'durationTime': {
},
'durationFrames': {
}
},
'udpSrcPortMode': {
},
'dstPort': {
},
'payload': {
'data': {
},
'type': {
},
'dataWidth': {
}
},
'syncIP': {
},
'addrGenMode': {
},
'maxStreams': {
},
'dstPortMask': {
},
'udpDstPortMode': {
},
'advancedUDP': {
'lengthVal': {
},
'lengthField': {
},
'checksumVal': {
},
'checksumField': {
}
},
'delayStart': {
},
'payloadAdvanced': {
'udfMode': {
},
'udfLength': {
},
'udfDataWidth': {
},
'udfOffset': {
}
},
'sizeDist': {
'increment': {
},
'type': {
},
'min': {
},
'rate': {
},
'mixlen2': {
},
'mixweight6': {
},
'mixlen1': {
},
'mixweight7': {
},
'mixlen4': {
},
'mixweight4': {
},
'mixlen3': {
},
'mixweight5': {
},
'mixlen6': {
},
'mixlen5': {
},
'mixlen8': {
},
'mixweight8': {
},
'mixlen7': {
},
'mixweight9': {
},
'mixlen9': {
},
'mixweight2': {
},
'max': {
},
'mixweight3': {
},
'mixweight1': {
},
'mixlen10': {
},
'mixweight10': {
},
'unit': {
}
},
'advancedIPv4': {
'lengthVal': {
},
'optionHeaderField': {
},
'optionHeaderData': {
},
'lengthField': {
},
'checksumVal': {
},
'tos': {
},
'checksumField': {
},
'ttl': {
}
},
'srcPortMask': {
},
'advancedIPv6': {
'flowLabel': {
},
'lengthVal': {
},
'extensionHeaderField': {
},
'lengthField': {
},
'nextHeader': {
},
'trafficClass': {
},
'extensionHeaderData': {
},
'hopLimit': {
}
}
},
'@type:layer4': {
'tcp': {
'disable_ack_piggyback': {
},
'delay_acks': {
},
'mss': {
},
'raw_flags': {
},
'psh_every_segment': {
},
'ecn': {
},
'tcp_window_scale': {
},
'initial_receive_window': {
},
'initial_congestion_window': {
},
'reset_at_end': {
},
'dynamic_receive_window_size': {
},
'tcp_connect_delay_ms': {
},
'aging_time_data_type': {
},
'tcp_4_way_close': {
},
'shutdown_data': {
},
'tcp_keepalive_timer': {
},
'aging_time': {
},
'add_timestamps': {
},
'retries': {
},
'handshake_data': {
},
'ack_every_n': {
},
'retry_quantum_ms': {
},
'delay_acks_ms': {
}
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'sessions': {
'openFast': {
},
'closeFast': {
},
'max': {
},
'allocationOverride': {
},
'targetPerSecond': {
},
'target': {
},
'targetMatches': {
},
'maxPerSecond': {
},
'engine': {
},
'statDetail': {
},
'emphasis': {
},
'maxActive': {
}
},
'loadprofile': {
'name': {
},
'label': {
}
},
'ip': {
'tos': {
},
'ttl': {
}
},
'ip6': {
'flowlabel': {
},
'traffic_class': {
},
'hop_limit': {
}
},
'srcPortDist': {
'min': {
},
'max': {
},
'type': {
}
},
'rampUpProfile': {
'min': {
},
'max': {
},
'increment': {
},
'interval': {
},
'type': {
}
},
'delayStart': {
},
'payload': {
'add_timestamp': {
},
'data': {
},
'http_type': {
},
'transport': {
},
'type': {
}
},
'rampDist': {
'upBehavior': {
},
'down': {
},
'steadyBehavior': {
},
'downBehavior': {
},
'up': {
},
'synRetryMode': {
},
'steady': {
}
},
'packetsPerSession': {
},
'payloadSizeDist': {
'min': {
},
'max': {
},
'type': {
}
},
'dstPortDist': {
'min': {
},
'max': {
},
'type': {
}
}
},
'@type:playback': {
'tcp': {
'disable_ack_piggyback': {
},
'delay_acks': {
},
'mss': {
},
'raw_flags': {
},
'psh_every_segment': {
},
'ecn': {
},
'tcp_window_scale': {
},
'initial_receive_window': {
},
'initial_congestion_window': {
},
'reset_at_end': {
},
'dynamic_receive_window_size': {
},
'tcp_connect_delay_ms': {
},
'aging_time_data_type': {
},
'tcp_4_way_close': {
},
'shutdown_data': {
},
'tcp_keepalive_timer': {
},
'aging_time': {
},
'add_timestamps': {
},
'retries': {
},
'handshake_data': {
},
'ack_every_n': {
},
'retry_quantum_ms': {
},
'delay_acks_ms': {
}
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'sessions': {
'openFast': {
},
'closeFast': {
},
'max': {
},
'allocationOverride': {
},
'targetPerSecond': {
},
'target': {
},
'targetMatches': {
},
'maxPerSecond': {
},
'engine': {
},
'statDetail': {
},
'emphasis': {
},
'maxActive': {
}
},
'loadprofile': {
'name': {
},
'label': {
}
},
'ip': {
'tos': {
},
'ttl': {
}
},
'modification': {
'startpacket': {
},
'originalport': {
},
'newport': {
},
'replay': {
},
'bpfstring': {
},
'single': {
},
'loopcount': {
},
'endpacket': {
},
'independentflows': {
},
'serveripinjection': {
}
},
'ip6': {
'flowlabel': {
},
'traffic_class': {
},
'hop_limit': {
}
},
'srcPortDist': {
'min': {
},
'max': {
},
'type': {
}
},
'rampUpProfile': {
'min': {
},
'max': {
},
'increment': {
},
'interval': {
},
'type': {
}
},
'delayStart': {
},
'file': {
},
'rampDist': {
'upBehavior': {
},
'down': {
},
'steadyBehavior': {
},
'downBehavior': {
},
'up': {
},
'synRetryMode': {
},
'steady': {
}
},
'behavior': {
}
},
'@type:layer2': {
'bidirectional': {
},
'maxStreams': {
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'rate': {
},
'increment': {
},
'type': {
},
'ramptype': {
}
},
'advanced': {
'ethTypeField': {
},
'ethTypeVal': {
}
},
'slowStart': {
},
'slowStartFps': {
},
'duration': {
'disable_nd_probes': {
},
'durationTime': {
},
'durationFrames': {
}
},
'delayStart': {
},
'payloadAdvanced': {
'udfMode': {
},
'udfLength': {
},
'udfDataWidth': {
},
'udfOffset': {
}
},
'sizeDist': {
'increment': {
},
'type': {
},
'min': {
},
'rate': {
},
'mixlen2': {
},
'mixweight6': {
},
'mixlen1': {
},
'mixweight7': {
},
'mixlen4': {
},
'mixweight4': {
},
'mixlen3': {
},
'mixweight5': {
},
'mixlen6': {
},
'mixlen5': {
},
'mixlen8': {
},
'mixweight8': {
},
'mixlen7': {
},
'mixweight9': {
},
'mixlen9': {
},
'mixweight2': {
},
'max': {
},
'mixweight3': {
},
'mixweight1': {
},
'mixlen10': {
},
'mixweight10': {
},
'unit': {
}
},
'payload': {
'data': {
},
'type': {
},
'dataWidth': {
}
}
},
'@type:stackscrambler': {
'tcp': {
'disable_ack_piggyback': {
},
'delay_acks': {
},
'mss': {
},
'raw_flags': {
},
'psh_every_segment': {
},
'ecn': {
},
'tcp_window_scale': {
},
'initial_receive_window': {
},
'initial_congestion_window': {
},
'reset_at_end': {
},
'dynamic_receive_window_size': {
},
'tcp_connect_delay_ms': {
},
'aging_time_data_type': {
},
'tcp_4_way_close': {
},
'shutdown_data': {
},
'tcp_keepalive_timer': {
},
'aging_time': {
},
'add_timestamps': {
},
'retries': {
},
'handshake_data': {
},
'ack_every_n': {
},
'retry_quantum_ms': {
},
'delay_acks_ms': {
}
},
'scrambleOptions': {
'maxCorruptions': {
},
'badIPFlags': {
},
'badIPFragOffset': {
},
'badIPLength': {
},
'badUrgentPointer': {
},
'badIPFlowLabel': {
},
'badEthType': {
},
'badTCPOptions': {
},
'badGTPNext': {
},
'handshakeTCP': {
},
'badIPChecksum': {
},
'badSCTPLength': {
},
'badTCPFlags': {
},
'badICMPType': {
},
'badIPTTL': {
},
'badIPProtocol': {
},
'badSCTPFlags': {
},
'badGTPFlags': {
},
'badIPVersion': {
},
'badL4HeaderLength': {
},
'badL4Checksum': {
},
'badIPOptions': {
},
'badSCTPType': {
},
'badSCTPChecksum': {
},
'badGTPNpdu': {
},
'badICMPCode': {
},
'badSCTPVerificationTag': {
},
'badIPTOS': {
},
'badIPTotalLength': {
},
'badGTPLen': {
},
| |
#!/usr/bin/env python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: <EMAIL>
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = | |
<reponame>we-taper/quantum<gh_stars>1-10
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute analytic gradients by using general parameter-shift rule. """
import tensorflow as tf
from tensorflow_quantum.python.differentiators import differentiator
from tensorflow_quantum.python.differentiators import parameter_shift_util
class ParameterShift(differentiator.Differentiator):
"""Calculate the general version of parameter-shift rule based gradients.
This ParameterShift is the gradient estimator of the following paper:
[arXiv:1905.13311](https://arxiv.org/abs/1905.13311), <NAME>.
This ParameterShift is used for any programs with parameterized gates.
It internally decomposes any programs into array of gates with at most
two distinct eigenvalues.
>>> non_diff_op = tfq.get_expectation_op()
>>> linear_differentiator = tfq.differentiators.ParameterShift()
>>> # Get an expectation op, with this differentiator attached.
>>> op = linear_differentiator.generate_differentiable_op(
... analytic_op=non_diff_op
... )
>>> qubit = cirq.GridQubit(0, 0)
>>> circuit = tfq.convert_to_tensor([
... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha'))
... ])
>>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]])
>>> symbol_values_array = np.array([[0.123]], dtype=np.float32)
>>> # Calculate tfq gradient.
>>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
>>> with tf.GradientTape() as g:
... g.watch(symbol_values_tensor)
... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums)
>>> # This value is now computed via the ParameterShift rule.
>>> # https://arxiv.org/abs/1905.13311
>>> grads = g.gradient(expectations, symbol_values_tensor)
>>> grads
tf.Tensor([[-1.1839752]], shape=(1, 1), dtype=float32)
"""
@tf.function
def get_gradient_circuits(self, programs, symbol_names, symbol_values):
"""See base class description."""
raise NotImplementedError(
"Gradient circuits are not currently available for "
"ParameterShift.")
@tf.function
def differentiate_analytic(self, programs, symbol_names, symbol_values,
pauli_sums, forward_pass_vals, grad):
"""Calculate the gradient.
The gradient calculations follows the following steps:
1. Compute the decomposition of the incoming circuits so that we have
their generator information (done using cirq in a tf.py_function)
2. Use formula (31) from paper inside of TensorFlow to calculate
gradients from all the decomposed circuits.
3. Sum up terms and reshape for the total gradient that is compatible
with TensorFlow.
**CAUTION**
Analytic gradient measurements based on this ParameterShift generally
run at least K(=2) times SLOWER than the original circuit.
On top of it, since all parameters of gates are shifted individually,
the time complexity is linear in the number of parameterized gates L.
So, you will see O(KL) slower time & space complexity than the original
forward pass measurements.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specified by programs, following the ordering
dictated by `symbol_names`.
pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]
containing the string representation of the operators that will
be used on all of the circuits in the expectation calculations.
forward_pass_vals: `tf.Tensor` of real numbers with shape
[batch_size, n_ops] containing the output of the forward pass
through the op you are differentiating.
grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]
representing the gradient backpropagated to the output of the
op you are differentiating through.
Returns:
Backward gradient values for each program & each pauli sum. It has
the shape of [batch_size, n_symbols].
"""
# these get used a lot
n_symbols = tf.gather(tf.shape(symbol_names), 0)
n_programs = tf.gather(tf.shape(programs), 0)
n_ops = tf.gather(tf.shape(pauli_sums), 1)
# Assume cirq.decompose() generates gates with at most two distinct
# eigenvalues, which results in two parameter shifts.
n_shifts = 2
# STEP 1: Generate required inputs for executor
# Deserialize programs and parse the whole parameterized gates
# new_programs has [n_symbols, n_param_gates, n_shifts, n_programs].
# These new_programs has programs that parameter-shift rule is applied,
# so those programs has
(new_programs, weights, shifts,
n_param_gates) = parameter_shift_util.parse_programs(
programs, symbol_names, symbol_values, n_symbols)
# Reshape & transpose new_programs, weights and shifts to fit into
# the input format of tensorflow_quantum simulator.
# [n_symbols, n_param_gates, n_shifts, n_programs]
new_programs = tf.transpose(new_programs, [0, 2, 3, 1])
weights = tf.transpose(weights, [0, 2, 3, 1])
shifts = tf.transpose(shifts, [0, 2, 3, 1])
# reshape everything to fit into expectation op correctly
total_programs = n_programs * n_shifts * n_param_gates * n_symbols
# tile up and then reshape to order programs correctly
flat_programs = tf.reshape(new_programs, [total_programs])
flat_shifts = tf.reshape(shifts, [total_programs])
# tile up and then reshape to order ops correctly
n_tile = n_shifts * n_param_gates * n_symbols
flat_perturbations = tf.concat([
tf.reshape(
tf.tile(tf.expand_dims(symbol_values, 0),
tf.stack([n_tile, 1, 1])), [total_programs, n_symbols]),
tf.expand_dims(flat_shifts, axis=1)
],
axis=1)
flat_ops = tf.reshape(
tf.tile(tf.expand_dims(pauli_sums, 0), tf.stack([n_tile, 1, 1])),
[total_programs, n_ops])
# Append impurity symbol into symbol name
new_symbol_names = tf.concat([
symbol_names,
tf.expand_dims(tf.constant(
parameter_shift_util._PARAMETER_IMPURITY_NAME),
axis=0)
],
axis=0)
# STEP 2: calculate the required expectation values
expectations = self.expectation_op(flat_programs, new_symbol_names,
flat_perturbations, flat_ops)
# STEP 3: generate gradients according to the results
# we know the rows are grouped according to which parameter
# was perturbed, so reshape to reflect that
grouped_expectations = tf.reshape(
expectations,
[n_symbols, n_shifts * n_programs * n_param_gates, -1])
# now we can calculate the partial of the circuit output with
# respect to each perturbed parameter
def rearrange_expectations(grouped):
def split_vertically(i):
return tf.slice(grouped, [i * n_programs, 0],
[n_programs, n_ops])
return tf.map_fn(split_vertically,
tf.range(n_param_gates * n_shifts),
dtype=tf.float32)
# reshape so that expectations calculated on different programs are
# separated by a dimension
rearranged_expectations = tf.map_fn(rearrange_expectations,
grouped_expectations)
# now we will calculate all of the partial derivatives
partials = tf.einsum(
'spco,spc->sco', rearranged_expectations,
tf.cast(
tf.reshape(weights,
[n_symbols, n_param_gates * n_shifts, n_programs]),
rearranged_expectations.dtype))
# now apply the chain rule
return tf.einsum('sco,co -> cs', partials, grad)
@tf.function
def differentiate_sampled(self, programs, symbol_names, symbol_values,
pauli_sums, num_samples, forward_pass_vals, grad):
"""Calculate the gradient.
The gradient calculations follows the following steps:
1. Compute the decomposition of the incoming circuits so that we have
their generator information (done using cirq in a tf.py_function)
2. Use formula (31) from paper inside of TensorFlow to calculate
gradients from all the decomposed circuits.
3. Sum up terms and reshape for the total gradient that is compatible
with TensorFlow.
**CAUTION**
Analytic gradient measurements based on this ParameterShift generally
run at least K(=2) times SLOW than the original circuit.
On top of it, since all parameters of gates are shifted individually,
the time complexity is linear in the number of parameterized gates L.
So, you will see O(KL) slower time & space complexity than the original
forward pass measurements.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specified by programs, following the ordering
dictated by `symbol_names`.
pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]
containing the string representation of the operators that will
be used on all of the circuits in the expectation calculations.
num_samples: `tf.Tensor` of positiver integers indicating the number
of samples used per term to calculate the expectation value
in the forward pass.
forward_pass_vals: `tf.Tensor` of real numbers with shape
[batch_size, n_ops] containing the output of the forward pass
through the op you are differentiating.
grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]
representing the gradient backpropagated to the output of the
op you are differentiating through.
Returns:
Backward gradient values for each program & each pauli sum. It has
the shape of [batch_size, n_symbols].
"""
# these get used a lot
n_symbols = tf.gather(tf.shape(symbol_names), 0)
n_programs = tf.gather(tf.shape(programs), 0)
n_ops = tf.gather(tf.shape(pauli_sums), 1)
# Assume cirq.decompose() generates gates | |
<gh_stars>0
# built in modules
import random
import argparse
import typing
import itertools
import os
import sys
import zipfile
# installed modules
import tqdm
import dgl
import mxnet as mx
import numpy as np
import gluonnlp as nlp
from mxnet import gluon
from mxnet.gluon import HybridBlock
import mxnet.autograd
# project modules
import pipeline
from extract_syntactic_rels import (
_subgraph,
parse_tree_to_graph,
parse_tree_to_final_graph,
get_nodes_between_entities,
get_entity_head,
_get_entity_head,
get_edges_between_entites)
from utils.spacy_utils import get_token_root_distance
from utils import mxnet_utils
from utils.webutils import download_file_from_google_drive
from model import TreeLSTMNet, GCNNet, GATNet
# constants
EMBEDDINGS_PATHS = {
'wiki': 'cache/wiki-news-300d-1M.txt',
'arxiv': 'cache/arxiv.cs.w8.d100.txt',
'both': 'cache/merged-wiki-arxiv.txt'
}
# LABELS_MAP = {
# 'NONE': 0,
# 'COMPARE': 1,
# 'COMPARE:REVERSE': 2,
# 'MODEL-FEATURE': 3,
# 'MODEL-FEATURE:REVERSE': 4,
# 'PART_WHOLE': 5,
# 'PART_WHOLE:REVERSE': 6,
# 'RESULT': 7,
# 'RESULT:REVERSE': 8,
# 'TOPIC': 9,
# 'TOPIC:REVERSE': 10,
# 'USAGE': 11,
# 'USAGE:REVERSE': 12
# }
CACHEDIR = 'cache'
LABELS_MAP = {
'NONE': 0,
'COMPARE': 1,
'MODEL-FEATURE': 2,
'PART_WHOLE': 3,
'RESULT': 4,
'TOPIC': 5,
'USAGE': 6,
}
EMBEDDINGS_URLS = {
'wiki': '10tzFRTo2_aAP_zx6QKyp_s8klr9t9op3',
'arxiv': '17XxsRdDMAVIXT5hgJnCuzFI6nyQiSLts',
'both': '1XM5WAQlVJ7CDfdG9jcYk2ao6jTHFfg4T'
}
def json_dict(text):
try:
parsed = json.loads(text)
if type(parsed) is list:
raise ValueError('Expected a Dictionary, not a list.')
except (json.JSONDecodeError, ValueError) as e:
raise argparse.ArgumentTypeError(e)
return parsed
def download_embeddings(path, emb_type, cachedir=CACHEDIR):
"""Download embeddings from google drive"""
if not os.path.exists(cachedir):
os.mkdir(cachedir)
print(cachedir)
if not os.path.exists(path):
path_to_zip_file = os.path.join(cachedir, 'temp-{}.zip'.format(emb_type))
print('[info] downloading embeddings of type {}...', end=' ')
download_file_from_google_drive(EMBEDDINGS_URLS[emb_type], path_to_zip_file)
print(path_to_zip_file)
with zipfile.ZipFile(path_to_zip_file, 'r') as zf:
# path = os.path.join(cachedir, zf.filename().replace('.zip', ''))
# path = os.path.join(cachedir, zf.filename.replace('.zip', ''))
path = os.path.join(cachedir, zf.namelist()[0])
# zf.extractall('.')
zf.extractall(path = cachedir)
os.remove(path_to_zip_file)
return path
def prepare_data_for_net(
vocabulary: dict,
samples: list,
labels_map: dict,
pos_map: dict,
dependencies_map: dict,
entity_length_distribution: typing.Tuple[float, float],
include_entities_nodes: bool=False,
include_entities_children: bool=False,
case_sensitive: bool=False,
) -> typing.List[typing.Tuple[tuple, tuple, int, int]]:
"""
Convert training data to ids in a way that so that it
can be batched and fed into a RNN.
:param vocabulary: vocabulary that maps strings to
embedding ids
:type vocabulary: dict
:param samples: list of [training|test] samples to
convert to numeric format
:type samples: list
:param labels_map: map of ids to use for labels
:type labels_map: dict
:return: iterator with training samples
:rtype: typing.List[typing.Tuple[tuple, tuple, int, int]]
"""
data = []
def normalize_length(l):
v = 1 / (1 + np.exp((entity_length_distribution[0] - l) /
entity_length_distribution[1]))
return 2 * v - 1
for sample in samples:
parsed = sample['spacy']
_parsed = parsed
# Altered to have nodes
# be indices for the word embeddings
_sentence_graph = parse_tree_to_final_graph(
parsed,
vocabulary,
dependencies_map,
case_sensitive
)
sentence_graph = parse_tree_to_graph(
parsed,
)
trueloc= lambda token: token.i - parsed[0].i
# My additions to get the entity and path subgraphs from
# the main graph
span_ent_a = parsed[sample['ent_a_start']:sample['ent_a_end']]
# head_ent_a = _get_entity_head(span_ent_a, sentence_graph, vocabulary, case_sensitive)
span_ent_b = parsed[sample['ent_b_start']:sample['ent_b_end']]
# head_ent_b = _get_entity_head(span_ent_b, sentence_graph, vocabulary, case_sensitive)
ent_a_graph = _subgraph(
_sentence_graph,
_parsed[sample['ent_a_start']:sample['ent_a_end']],
vocabulary,
case_sensitive
)
ent_b_graph = _subgraph(
_sentence_graph,
span_ent_b,
vocabulary,
case_sensitive
)
'''
path_graph = _sentence_graph.subgraph(
get_nodes_between_entities(
head_ent_a, head_ent_b, _sentence_graph
)
)
'''
path_graph = 0
label = sample['type'] #+ (':REVERSE' if sample['is_reverse'] else '')
# if the label is not assigned to this sample
# (as it is on the evaluation data) we simply ignore this
# and set the label to -1 instead. (-1 would cause issue if
# feed into the network, so it is a good error check).
label_id = labels_map[label] if label is not None else -1
data.append((
_sentence_graph,
ent_a_graph,
ent_b_graph,
path_graph,
label_id
))
return data
def evaluate_on_test_data(
net,
test_sentences,
test_data,
labels_map,
vocabulary,
dependencies_map,
case_sensitive = True,
output_for_error_analysis=None,
evaluate_output=None
):
inverse_labels_map = {v: k for k, v in labels_map.items()}
predictions = []
if output_for_error_analysis:
# NB: You need to create the directory first
f = open(output_for_error_analysis, 'w+')
else:
f = None
for sample, sentence in zip(test_data, test_sentences):
(
graph, # original graph
ent_a_graph,
ent_b_graph,
path_graph,
label # the label for this sample
) = sample
graph = dgl.from_networkx(graph)
ent_a_graph = dgl.from_networkx(ent_a_graph)
ent_b_graph = dgl.from_networkx(ent_b_graph)
# path_graph = dgl.from_networkx(path_graph)
prob = net(
graph,
ent_a_graph,
ent_b_graph,
# path_graph,
#False # is_training=False # is testing
)
pred_class_id = int(mx.nd.argmax(prob, axis=1).asscalar())
pred_class = inverse_labels_map[pred_class_id]
pred_prob = mx.nd.softmax(prob, axis=1).reshape((-1, ))
# if pred_class == 'NONE':
# continue
is_reversed = ':' in pred_class
pred_class = pred_class.split(':')[0]
predictions.append({
'ent_a': sentence['ent_a'],
'ent_b': sentence['ent_b'],
# 'is_reverse': is_reversed,
'is_reverse': sentence['is_reverse'],
'type': pred_class,
})
if f:
tokens = sentence['spacy']
_graph = parse_tree_to_final_graph(
tokens,
vocabulary,
dependencies_map,
case_sensitive
)
graph = parse_tree_to_graph(
tokens,
)
ent_a = tokens[sentence['ent_a_start']:sentence['ent_a_end']]
ent_a_head_id = get_entity_head(ent_a, graph)
ent_a_head = tokens.doc[ent_a_head_id]
ent_b = tokens[sentence['ent_b_start']:sentence['ent_b_end']]
ent_b_head_id = get_entity_head(ent_b, graph)
ent_b_head = tokens.doc[ent_b_head_id]
f.write('sentence: "{}"\n'.format(tokens))
f.write('entity_a: "{}" (head: "{}")\n'.format(ent_a, ent_a_head))
f.write('entity_b: "{}" (head: "{}")\n'.format(ent_b, ent_b_head))
f.write('tree: {}\n'.format(str(ent_a_head) + ' ' + ' '.join(
('-> {}' if direction > 0 else '<- {}').format(tb)
for _, direction, (_, tb) in get_edges_between_entites(
ent_a_head_id, ent_b_head_id, graph, include_terms=True
)
)))
f.write('relation: {}{}\n'.format(
sentence['type'],
'-reversed' if sentence['is_reverse'] else ''))
f.write('predicted: {}{}\n'.format(
pred_class, '-reversed' if is_reversed else ''
))
f.write('confidence: {:.2%}\n'.format(
pred_prob[pred_class_id].asscalar())
)
f.write('\n\n')
if f:
f.close()
if evaluate_output:
pipeline.write_predictions_to_file(
predictions=predictions, path=evaluate_output)
else:
resp = pipeline.evaluate(
predictions=predictions, labels=test_sentences)
print(resp)
def main(opts):
# set a seed for reproducible network
random.seed(42)
labels_map = opts.labels_map
if not labels_map:
# get a copy of the labels if not provided
labels_map = dict(LABELS_MAP)
if not opts.include_negative_samples:
# pop out the "NONE" label if no negative
# samples are provided
labels_map.pop('NONE')
labels_map = {k: v - 1 for k, v in labels_map.items()}
# load the dataset
dataset = pipeline.load_abstracts_relations(opts.subtask)
# get list of all dependency tags used in the dataset
dependencies_map = pipeline.get_dependencies_map(dataset)
# get list of all pos tags used in the dataset
pos_map = pipeline.get_part_of_speech_map(dataset)
# split it by sentence, potentially include negative samples
sentences_dataset = pipeline.split_dataset_into_sentences(
*dataset, include_negative_samples=opts.include_negative_samples
)
# split sentences between train and test according to the
# official dataset split
train_sentences, validation_sentences = pipeline.split_train_test_sentences(
opts.subtask, sentences_dataset
)
test_dataset = pipeline.load_abstracts_relations(opts.subtask, load_test=True)
test_sentences = pipeline.split_dataset_into_sentences(
*test_dataset, include_negative_samples=opts.include_negative_samples
)
if opts.evaluate_output:
evaluate_dataset = pipeline.load_abstracts_relations(
opts.subtask, load_test=True)
evaluate_sentences_dataset = pipeline.split_dataset_into_sentences(
*evaluate_dataset,
include_negative_samples=opts.include_negative_samples
)
else:
# so that static code analyzers don't freak out!
evaluate_sentences_dataset = None
# get distribution info for entities in training set
ent_distr = pipeline.get_distribution_ent_length(train_sentences)
# get the mxnet context (aka cpu or gpu) as
# provided by the user. if none is provided, use cpu0
context = mxnet_utils.get_context_from_string(opts.mxnet_context)
# path to embeddings file in word2vec text format
# as specified by the user
embeddings_path = os.path.expanduser(EMBEDDINGS_PATHS[opts.embeddings_type])
# download embeddings from google drive
# -- updated path?
# embeddings_path = download_embeddings(path, opts.emb_type)
embeddings_path = download_embeddings(embeddings_path, opts.embeddings_type)
# execute mxnet operations accoring in specified context
with context:
# load embeddings and vocabulary
vocabulary, embeddings = \
mxnet_utils.word2vec_mxnet_embedding_initializer(
embeddings_path, max_embeddings=opts.max_embeddings
)
'''
vocabulary = \
mxnet_utils.get_vocab(
embeddings_path,
max_embeddings=opts.max_embeddings
)
'''
# get training data; has to be executed after vocabulary and
# embeddings (which need to be placed on the GPU if specified,
# hence the context) are loaded.
train_data = prepare_data_for_net(
vocabulary, train_sentences, labels_map,
dependencies_map=dependencies_map, pos_map=pos_map,
include_entities_nodes=opts.include_entities_nodes,
include_entities_children=opts.include_entities_children,
entity_length_distribution=ent_distr,
case_sensitive=opts.case_sensitive
)
# doing the same thing, but with test data
test_data = prepare_data_for_net(
vocabulary, test_sentences, labels_map,
dependencies_map=dependencies_map, pos_map=pos_map,
include_entities_children=opts.include_entities_children,
include_entities_nodes=opts.include_entities_nodes,
entity_length_distribution=ent_distr,
case_sensitive=opts.case_sensitive
)
# doing the same thing, but with test data
validation_data = prepare_data_for_net(
vocabulary, validation_sentences, labels_map,
dependencies_map=dependencies_map, pos_map=pos_map,
include_entities_children=opts.include_entities_children,
include_entities_nodes=opts.include_entities_nodes,
entity_length_distribution=ent_distr,
case_sensitive=opts.case_sensitive
)
# get stats abt average size of parse tree
parse_tree_lengths = [
len(t) for _, _, t, *_ in
itertools.chain(train_data, test_data)
]
print('[info] parse tree length: {:.2f} +/- {:.2f}'.format(
np.mean(parse_tree_lengths), np.std(parse_tree_lengths)
))
# net = TreeLSTMNet(
net = GATNet(
embeddings,
len(labels_map),
dropout=opts.dropout,
trainable_embeddings=opts.trainable_embeddings
)
net.initialize()
# loos and trainer initialized here
softmax_cross_entropy_labels = mx.gluon.loss.SoftmaxCrossEntropyLoss()
trainer = mx.gluon.Trainer(
net.collect_params(),
'adam',
{'learning_rate': opts.learning_rate}
)
# object to calculate F1 metric for the dataset
f1_score_class = mxnet_utils.F1Score(num_classes=len(labels_map))
for epoch in range(1, opts.epochs + 1):
# random.shuffle(train_data)
cumulative_loss = total_steps = 0
probs, labels = [], []
for sample in tqdm.tqdm(train_data, desc='Epoch {}'.format(epoch)):
with mx.autograd.record():
(
graph,
ent_a_graph,
ent_b_graph,
path_graph,
label # the label for this sample
)= sample
graph = dgl.from_networkx(graph)
ent_a_graph = dgl.from_networkx(ent_a_graph)
ent_b_graph = dgl.from_networkx(ent_b_graph)
# path_graph = dgl.from_networkx(path_graph)
out = net(
graph,
ent_a_graph,
ent_b_graph,
# path_graph,
True
)
probs.append(out)
labels.append([label])
if len(probs) == opts.batch_size:
total_steps += opts.batch_size
with mx.autograd.record():
probs = mx.nd.concat(*probs, dim=0)
labels = mx.nd.array(labels)
loss = | |
<reponame>Hritikbansal/MultivariateTimeSeries_Generalization<filename>src/pmsm/train.py
import argparse
import torch
import utils
import datetime
import os
import pickle
import random
import numpy as np
import logging
from torch.utils import data
from torch import nn
import mts_model
import sys
import helper
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=8,
help='Batch size.')
parser.add_argument('--epochs', type=int, default=100,
help='Number of training epochs.')
parser.add_argument('--dec_epochs', type=int, default=100,
help='Number of training epochs of decoder in CL+Dec case.')
parser.add_argument('--learning_rate', type=float, default=5e-4,
help='Learning rate.')
parser.add_argument('--decoder_l1', type=float, default=1.0,
help='Learning rate of the decoder.')
parser.add_argument('--decoder_gl', type=float, default=1.0,
help='Learning rate of the decoder.')
parser.add_argument('--soft_decoder_l1', action='store_true', default=False,
help='soft decoder with l1')
parser.add_argument('--soft_decoder_gl', action='store_true', default=False,
help='soft decoder with group lasso')
parser.add_argument('--hard_decoder', action='store_true', default=False,
help='hard decoder')
parser.add_argument('--sigma', type=float, default=0.5,
help='Energy scale.')
parser.add_argument('--per_node_MLP', action='store_true', default=False,
help='different MLP for every node')
parser.add_argument('--layer_l1', action='store_true', default=False,
help='L1 within hidden layers')
parser.add_argument('--layer_gl', action='store_true', default=False,
help='group lasso within hidden layers')
parser.add_argument('--l1', type=float, default=1.,
help='L1 regularization hyperparameter within the hidden layers.')
parser.add_argument('--message_l1', type=float, default=1.,
help='L1 regularization hyperparameter for the messages.')
parser.add_argument('--gl', type=float, default=1.,
help='group lasso hyperparameter.')
parser.add_argument('--ood', action='store_true', default=False,
help='whether testing on OOD data')
parser.add_argument('--hidden-dim', type=int, default=512,
help='Number of hidden units in transition MLP.')
parser.add_argument('--ignore-action', action='store_true', default=False,
help='Ignore action in GNN transition model.')
parser.add_argument('--copy-action', action='store_true', default=False,
help='Apply same action to all object slots.')
parser.add_argument('--normalize', action='store_true', default=False,
help='Normalize the embeddings for contrastive loss.')
parser.add_argument('--shuffle', action='store_true', default=False,
help='Shuffle data')
parser.add_argument('--recurrent', action='store_true', default=False,
help='recurrent transition model')
parser.add_argument('--message_pass', action='store_true', default=False,
help='allow nodes to pass messages, i.e. use GNN instead of MLP as transition model')
parser.add_argument('--hierarchical_ls', action='store_true', default=False,
help='hierarchical latent structure')
parser.add_argument('--sepCTRL', action='store_true', default=False,
help='separate control encoder')
parser.add_argument('--onlyReLU', action='store_true', default=False,
help='have ReLUs instead of tanh and bypass LSTMs')
parser.add_argument('--save_predictions', action='store_true', default=False,
help='Dump prediction and target csv files')
parser.add_argument('--save_embeddings', action='store_true', default=False,
help='Dump embeddings')
parser.add_argument('--path', type=str, default='none',required=True,
help='Path to dataset')
parser.add_argument('--num_objects', type=int, default=21,
help='Number of object slots in model.')
parser.add_argument('--num_cont', type=int, default=7,
help='Number of object slots in model.')
parser.add_argument('--stride', type=int, default=1,
help='CNN strides.')
parser.add_argument('--length', type=int, default=500,
help='input dim.')
parser.add_argument('--shift', type=int, default=1,
help='shift length')
parser.add_argument('--window_size', type=int, default=100,
help='chunk length.')
parser.add_argument('--embedding_dim', type=int, default=50,
help='embedding dim.')
parser.add_argument('--action_dim', type=int, default=15,
help='action dim.')
parser.add_argument('--nodes', type=int, default=None,
help='Number of nodes.')
parser.add_argument('--horizon', type=int, default=100,
help='Horizon.')
parser.add_argument('--steps', type=int, default=1,
help='Number of steps.')
parser.add_argument('--setup', type=int, default=0,
help='different setups for swat/narma')
parser.add_argument('--use_condenser', type=bool, default=False,
help='using condensery.')
parser.add_argument('--split', type=float, default=0.75,
help='Train-test.')
parser.add_argument('--decoder', action='store_true', default=False,
help='Train model using decoder and mse loss.')
parser.add_argument('--full', action='store_true', default=False,
help='Have direct transition instead of delta transition')
parser.add_argument('--isControl', action='store_true', default=False,
help='GNN should have control or not')
parser.add_argument('--dropout', type=float, default=0.0,
help='Train-test.')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disable CUDA training.')
parser.add_argument('--seed', type=int, default=42,
help='Random seed (default: 42).')
parser.add_argument('--log_interval', type=int, default=20,
help='How many batches to wait before logging'
'training status.')
parser.add_argument('--save_interval', type=int, default=20,
help='How many batches to wait before logging'
'training status.')
parser.add_argument('--dataset', type=str,
default='swat', required=True,
help='Name of the dataset')
parser.add_argument('--file_name', type=str,
default='PMSM_cont_iid.csv', help='PMSM dataset file')
parser.add_argument('--name', type=str, default='MVTS',
help='Experiment name.')
parser.add_argument('--save_folder', type=str,
default='checkpoints',
help='Path to checkpoints.')
parser.add_argument('--baseline', action='store_true', default=False,
help='training for forecasting without CL')
parser.add_argument('--forecasting_cl', action='store_true', default=False,
help='training for forecasting with CL')
parser.add_argument('--forecasting_M5', action='store_true', default=False,
help='training for forecasting with CL in M5')
parser.add_argument('--pastStateOnly', action='store_true', default=False,
help='training encoder for CL followed by decoder')
parser.add_argument('--pastControlOnly', action='store_true', default=False,
help='training encoder for CL followed by decoder')
parser.add_argument('--futureControlOnly', action='store_true', default=False,
help='training encoder for CL followed by decoder')
parser.add_argument('--pastinfo', action='store_true', default=False,
help='training encoder for CL followed by decoder')
parser.add_argument('--noise', action='store_true', default=False,
help='todo noise')
parser.add_argument('--pert', type=int, default=None,
help='Which obj to pert')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
now = datetime.datetime.now()
timestamp = now.isoformat()
train_split = args.split
if args.name == 'none':
exp_name = timestamp
else:
exp_name = args.name
np.random.seed(42)
torch.manual_seed(args.seed)
random.seed(42)
if args.cuda:
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
exp_counter = 0
#save_folder = '{}\\{}\\'.format(args.save_folder, exp_name)
save_folder = '{}/{}/'.format(args.save_folder, exp_name)
if not os.path.exists(save_folder):
os.makedirs(save_folder)
print('created save folder')
print(save_folder)
def _init_fn():
np.random.seed(42)
'''used_params='-'.join([args.dataset, str(args.l1), str(args.message_l1), str(args.length),str(args.window_size),str(args.embedding_dim),str(args.nodes),str(args.decoder),str(args.shift),
str(5),str(args.dropout),str(args.message_pass),str(args.isControl),str(args.sepCTRL),
str(args.stride),str(args.full),str(args.baseline), str(args.forecasting_cl), str(args.forecasting_M5)])'''
used_params='-'.join([args.dataset, str(args.batch_size), str(args.embedding_dim),str(args.full),str(args.learning_rate),str(args.baseline), str(args.sepCTRL),str(args.layer_gl), str(args.gl),str(args.hierarchical_ls),str(args.soft_decoder_gl),str(args.decoder_gl),str(args.hard_decoder), str(args.seed)]) #,str(args.onlyReLU)])
meta_file = os.path.join(save_folder, used_params+',metadata.pkl')
model_file = os.path.join(save_folder, used_params+',model.pt')
decoder_file = os.path.join(save_folder, used_params+',decoder.pt')
log_file = os.path.join(save_folder, used_params+',log.txt')
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(log_file, 'a'))
print = logger.info
pickle.dump({'args': args}, open(meta_file, "wb"))
device = torch.device('cuda' if args.cuda else 'cpu')
len_change_5=0
len_change_10=0
len_change_20=0
len_remaining_5=0
len_remaining_10=0
len_remaining_20=0
print(device)
print(args)
MSEloss = nn.MSELoss()
if(args.dataset=='sarcos'):
data_path=args.path+'sarcos_inv.csv'
train_state, train_control, train_fcontrol, train_fstate, valid_state, valid_control, valid_fcontrol, valid_fstate, test_state, test_control, test_fcontrol, test_fstate = utils.createSarcosDataset(path=data_path, window_length=args.length, horizon=args.horizon, shift=args.shift,shuffle_data=args.shuffle, steps = args.steps)
print(train_state.shape)
print(train_control.shape)
print(train_fstate.shape)
print(train_fcontrol.shape)
elif(args.dataset=='swat'):
data_path=args.path +'Swat3CV3DV_new.csv' if args.setup==3 else args.path+'SWaT_Dataset_Normal_v01.csv'
state_past, cont_past, action, next_obs = utils.createSwatDataset(path=data_path, window_length=args.length,
horizon=args.horizon, shift=args.shift,
shuffle_data=args.shuffle, train=True, setup=args.setup)
test_state_past, test_cont_past, test_action, test_next_obs = utils.createSwatDataset(path=data_path, window_length=args.length,
horizon=args.horizon, shift=args.shift,
shuffle_data=args.shuffle,train=False,steps=args.steps,setup=args.setup)
elif('narma' in args.dataset):
data_path=args.path + args.file_name
state_past, cont_past, action, next_obs = utils.createNarmaDataset(path=data_path, window_length=args.length,
horizon=args.horizon, num_cont=args.num_cont, num_objects=args.num_objects, shift=args.shift,
shuffle_data=args.shuffle, train=True, setup = args.setup)
test_state_past, test_cont_past, test_action, test_next_obs = utils.createNarmaDataset(path=data_path, window_length=args.length,
horizon=args.horizon, num_cont=args.num_cont, num_objects=args.num_objects, shift=args.shift,
shuffle_data=args.shuffle,train=False, setup = args.setup)
print(state_past.shape)
print(test_state_past.shape)
elif('PMSM' in args.dataset):
data_path = args.path + args.file_name
state_past, cont_past, action, next_obs = utils.createPMSMDataset(path=data_path, window_length=args.length,
horizon=args.horizon, num_cont=args.num_cont, num_objects=args.num_objects, shift=args.shift,
shuffle_data=args.shuffle, train=True, ood = args.ood)
test_state_past, test_cont_past, test_action, test_next_obs = utils.createPMSMDataset(path=data_path, window_length=args.length,
horizon=args.horizon, num_cont=args.num_cont, num_objects=args.num_objects, shift=args.shift,
shuffle_data=args.shuffle,train=False, ood = args.ood)
print(state_past.shape)
print(cont_past.shape)
print(action.shape)
print(test_state_past.shape)
print(test_cont_past.shape)
list_shift = [i for i in range(6,17,1)]
swatMasterDict={}
if(args.dataset == 'swat_full'):
for shift in list_shift:
data_path=args.path+'SWaT_Dataset_Normal_v01.csv'
state_past, cont_past, action, next_obs = utils.createSwatFullDataset(path=data_path, window_length=args.length,
horizon=args.horizon, shift=shift,
shuffle_data=args.shuffle, train=True)
test_state_past, test_cont_past, test_action, test_next_obs = utils.createSwatFullDataset(path=data_path, window_length=args.length,
horizon=args.horizon, shift=shift,
shuffle_data=args.shuffle,train=False,steps=args.steps)
print(state_past.shape)
print(test_state_past.shape)
train_dataset = utils.SwatFullDataset(state_past[0:int(train_split*state_past.shape[0])], cont_past[0:int(train_split*state_past.shape[0])],
action[0:int(train_split*state_past.shape[0])], next_obs[0:int(train_split*state_past.shape[0])])
validation_dataset = utils.SwatFullDataset(state_past[int(train_split*int(state_past.shape[0])):], cont_past[int(train_split*int(state_past.shape[0])):],
action[int(train_split*int(state_past.shape[0])):], next_obs[int(train_split*int(state_past.shape[0])):])
test_dataset = utils.SwatFullDataset(test_state_past, test_cont_past, test_action, test_next_obs)
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=0, worker_init_fn=_init_fn)
validation_loader = data.DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=0, worker_init_fn=_init_fn)
test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=0, worker_init_fn=_init_fn)
swatMasterDict[shift] = [train_loader, validation_loader,test_loader]
if(args.dataset=='sarcos'):
train_dataset = utils.SarcosDataset(train_state, train_control, train_fcontrol, train_fstate)
validation_dataset = utils.SarcosDataset(valid_state, valid_control, valid_fcontrol, valid_fstate)
test_dataset = utils.SarcosDataset(test_state, test_control, test_fcontrol, test_fstate)
elif(args.dataset=='swat'):
train_dataset = utils.SwatDataset(state_past[0:int(train_split*state_past.shape[0])], cont_past[0:int(train_split*state_past.shape[0])],
action[0:int(train_split*state_past.shape[0])], next_obs[0:int(train_split*state_past.shape[0])])
validation_dataset = utils.SwatDataset(state_past[int(train_split*int(state_past.shape[0])):], cont_past[int(train_split*int(state_past.shape[0])):],
action[int(train_split*int(state_past.shape[0])):], next_obs[int(train_split*int(state_past.shape[0])):])
test_dataset = utils.SwatDataset(test_state_past, test_cont_past, test_action, test_next_obs)
elif('narma' in args.dataset):
train_dataset = utils.NarmaDataset(state_past[0:int(train_split*state_past.shape[0])], cont_past[0:int(train_split*state_past.shape[0])],
action[0:int(train_split*state_past.shape[0])], next_obs[0:int(train_split*state_past.shape[0])])
validation_dataset = utils.NarmaDataset(state_past[int(train_split*int(state_past.shape[0])):], cont_past[int(train_split*int(state_past.shape[0])):],
action[int(train_split*int(state_past.shape[0])):], next_obs[int(train_split*int(state_past.shape[0])):])
test_dataset = utils.NarmaDataset(test_state_past, test_cont_past, test_action, test_next_obs)
elif('PMSM' in args.dataset):
train_dataset = utils.PMSMDataset(state_past[0:int(train_split*state_past.shape[0])], cont_past[0:int(train_split*state_past.shape[0])],
action[0:int(train_split*state_past.shape[0])], next_obs[0:int(train_split*state_past.shape[0])])
validation_dataset = utils.PMSMDataset(state_past[int(train_split*int(state_past.shape[0])):], cont_past[int(train_split*int(state_past.shape[0])):],
action[int(train_split*int(state_past.shape[0])):], next_obs[int(train_split*int(state_past.shape[0])):])
test_dataset = utils.PMSMDataset(test_state_past, test_cont_past, test_action, test_next_obs)
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=0, worker_init_fn=_init_fn)
validation_loader = data.DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=0, worker_init_fn=_init_fn)
test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=0, worker_init_fn=_init_fn)
print('Data loaded!')
model = mts_model.MVTS(
embedding_dim=args.embedding_dim,
input_dim=args.length,
num_objects=args.num_objects,
num_cont=args.num_cont,
tau = args.horizon,
window=args.window_size,
action_dim =args.action_dim,
final_nodes = args.num_cont,
steps = args.steps,
full = args.full,
isControl=args.isControl,
dropout = args.dropout,
stride=args.stride,
use_condenser=args.use_condenser,
use_GNN=args.message_pass,
normalize=args.normalize,
per_node_MLP=args.per_node_MLP,
sepCTRL=args.sepCTRL,
baseline = args.baseline,
forecasting_cl = args.forecasting_cl,
recurrent = args.recurrent,
forecasting_M5 = args.forecasting_M5,
hierarchical_ls = args.hierarchical_ls,
pastinfo=args.pastinfo,
only=args.pastStateOnly or args.pastControlOnly,
soft_decoder= not args.hard_decoder,
hard_decoder=args.hard_decoder,
onlyReLU=args.onlyReLU).to(device)
print(model)
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.learning_rate)
def eval_changes(predicted, truth, futureControls, inspection=5):
# predicted,truth (b,c,horizon) futureControls (b,d,horizon)
loss_ec = torch.zeros(futureControls.shape[1])
loss_remaining = torch.zeros(futureControls.shape[1])
for j in range(futureControls.shape[0]):
indices = []
for k in range(futureControls.shape[1]):
indices += helper.get_indices(futureControls[j][k])
indices.sort()
checkpoints = []
left = []
prev = 0
for point in indices:
left += [a for a in range(prev,point)]
prev = point+inspection if point+inspection < args.horizon else args.horizon
checkpoints+=[b for b in range(point,prev)]
checkpoints=np.array(checkpoints)
left+= [j for j in range(prev,args.horizon)]
left = np.array(left)
if(inspection==5):
global len_change_5
global len_remaining_5
len_change_5+=len(checkpoints)
len_remaining_5+=len(left)
elif(inspection==10):
global len_change_10
global len_remaining_10
len_change_10+=len(checkpoints)
len_remaining_10+=len(left)
elif(inspection==20):
global len_change_20
global len_remaining_20
len_change_20+=len(checkpoints)
len_remaining_20+=len(left)
for k in range(futureControls.shape[1]):
if(len(checkpoints)!=0):
loss_ec[k] += MSEloss(predicted[j][k][checkpoints],truth[j][k][checkpoints])*len(checkpoints)
if(len(left)!=0):
loss_remaining[k] += MSEloss(predicted[j][k][left],truth[j][k][left])*len(left)
return (loss_ec,loss_remaining)
def evaluate_forecasting(model, loader, args, isTest=False, getLoss=False):
with torch.no_grad():
model.eval()
for name, param in model.named_parameters():
if "transition_model" in name and "weight" in name and "stage" in name:
print(name)
#print(param)
print(torch.norm(param[:,:40], p=2, keepdim = True))
print(torch.norm(param[:,40:], p=2, keepdim = True))
#sys.exit()
state_predictions = []
state_groundTruth = []
loss_total=0
objwise_mse=0
total_len=0
pred_states = []
next_states = []
input_space=False
for batch_idx, data_batch in enumerate(loader):
data_batch = [tensor.to(device) for tensor in data_batch]
statePast = data_batch[0].float()
contPast = data_batch[1].float()
action = data_batch[2].float()
nextState = data_batch[3].float()
# multi step
loss = 0
message_loss = 0
per_obj_loss = 0
l1_term = 0
state_pred = []
state_gt = []
for i in range(action.shape[2]):
if input_space:
state_encoding, cont_encoding, action_encoding = model.getEncodings(torch.cat([statePast[:,:,i:],nextState[:,:,:i] if i==0 else predicted[:,:,max(0,i-args.window_size):i]],dim=-1), torch.cat([contPast[:,:,i:],action[:,:,max(0,i-args.window_size):i]],dim=-1), action[:,:,i].unsqueeze(2))
pred = model.getTransition(state_encoding, cont_encoding, action_encoding) if args.full else state_encoding + model.getTransition(state_encoding if i==0 else pred,cont_encoding,action_encoding)
if(i==0):
predicted = model.decode(pred)
else:
predicted = torch.cat([predicted, model.decode(pred)], dim=-1)
#print(predicted.size())
else:
state_encoding, cont_encoding, action_encoding = model.getEncodings(torch.cat([statePast[:,:,i:],nextState[:,:,:i]],dim=-1), torch.cat([contPast[:,:,i:],action[:,:,max(0,i-args.window_size):i]],dim=-1), action[:,:,i].unsqueeze(2))
pred = model.getTransition(state_encoding if i==0 else pred, cont_encoding, action_encoding) if args.full else state_encoding + model.getTransition(state_encoding if i==0 else pred,cont_encoding,action_encoding)
if(args.message_pass):
message_loss += model.get_l1_Message()
mse_loss = MSEloss(model.decode(pred), nextState[:,:,i].unsqueeze(-1))
objwise = helper.per_obj_mse(model.decode(pred), nextState[:,:,i].unsqueeze(-1))
loss += mse_loss
per_obj_loss += objwise
if(args.save_predictions):
state_gt.append(nextState[:,:,i].unsqueeze(-1))
state_pred.append(model.decode(pred))
if(i == action.shape[2] - 1):
state_groundTruth.append(torch.cat(state_gt, dim = -1))
state_predictions.append(torch.cat(state_pred, dim = -1))
#helper.save_predictions(nextState[:,:,i].unsqueeze(2), pred, i)
# helper.save_predictions(save_folder, used_params, nextState[:,:,i].unsqueeze(-1), model.decode(pred), i)
if(args.hierarchical_ls and args.layer_l1):
l1_term = args.l1*helper.getStages_norm(model)
elif(args.per_node_MLP and args.layer_l1):
l1_term = args.l1*helper.getTM_norm(model)
if(args.soft_decoder_l1):
decoder_params = [x.view(-1) for x in model.decoder.parameters()][0]
l1_term = args.decoder_l1*torch.norm(decoder_params,1)/decoder_params.size()[0]
if(args.message_pass and getLoss):
message_loss = args.message_l1*message_loss
loss += message_loss
if(getLoss):
loss_total += ((loss.item())*len(pred)+l1_term)
else:
loss_total += (loss.item())*len(pred)
objwise_mse += per_obj_loss*len(pred)
total_len+=len(pred)
if getLoss:
model.train()
return loss_total/float(total_len)
if(args.save_predictions):
state_groundTruth = torch.cat(state_groundTruth, dim = 0 )
state_predictions = torch.cat(state_predictions, dim = 0 )
helper.save_predictions(save_folder, used_params, state_groundTruth, state_predictions, 0)
objwise_mse_list = (objwise_mse/float(total_len)).tolist()
dump_object_wise = ['{}'.format(per_object) for per_object in objwise_mse_list]
save_name = 'result_M3.txt' if not args.sepCTRL else 'results_M4.txt'
re_loss = loss_total/float(total_len)
objwise_mse_list.append(re_loss)
results = np.expand_dims(np.array(objwise_mse_list),axis=0)
print('Reconstruction Loss {}'.format(loss_total/float(total_len)))
print('per_obj_mse: '+str(dump_object_wise))
return results[0]
def train_forecasting(model, args):
print('Starting model training...')
best_loss = 1e9
model.train()
for epoch in range(1, args.epochs+1):
train_loss = 0
mse_total = 0
for batch_idx, data_batch in enumerate(train_loader):
optimizer.zero_grad()
data_batch = [tensor.to(device) for tensor in data_batch]
statePast = data_batch[0].float()
contPast = data_batch[1].float()
action = data_batch[2].float()
nextState = data_batch[3].float()
# multi step
loss = 0
l1_term = 0
message_loss = 0
for i in range(action.shape[2]):
state_encoding, cont_encoding, action_encoding = model.getEncodings(torch.cat([statePast[:,:,i:],nextState[:,:,:i]],dim=-1),
torch.cat([contPast[:,:,i:],action[:,:,:i]],dim=-1), action[:,:,i].unsqueeze(2))
pred = model.getTransition(state_encoding if i==0 else pred, cont_encoding, action_encoding) if args.full else state_encoding + model.getTransition(state_encoding if i==0 else pred,cont_encoding,action_encoding)
if(args.message_pass):
message_loss += model.get_l1_Message()
mse_loss = MSEloss(model.decode(pred), nextState[:,:,i].unsqueeze(-1)) #+ args.l1*torch.norm(decoder_params, 1)
loss += mse_loss
if(args.hierarchical_ls and args.layer_l1):
l1_term = args.l1*helper.getStages_norm(model)
elif(args.per_node_MLP and args.layer_l1):
l1_term = args.l1*helper.getTM_norm(model)
if(args.soft_decoder_l1):
decoder_params = [x.view(-1) for x in model.decoder.parameters()][0]
l1_term = args.decoder_l1*torch.norm(decoder_params,1)/decoder_params.size()[0]
if(args.message_pass):
message_loss = args.message_l1*message_loss
loss += message_loss
loss += (loss*len(pred) + l1_term)
loss.backward()
train_loss += loss.item()
optimizer.step()
if(epoch<120):
optimizer.zero_grad()
if(args.soft_decoder_gl):
helper.applyGroupLassoDecoder(model, args.learning_rate, args.decoder_gl, args.embedding_dim, args.num_objects)
if(args.hierarchical_ls and args.layer_gl):
helper.applyGroupLassoStages(model, args.learning_rate, args.gl, args.embedding_dim)
elif(args.per_node_MLP and args.layer_gl):
helper.applyGroupLassoTM(model, args.learning_rate, args.gl, args.embedding_dim)
elif(args.baseline and args.layer_gl):
helper.applyGroupLassoBaseLine(model, args.learning_rate, args.gl, args.embedding_dim)
if batch_idx % args.log_interval == 0:
print('Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data_batch[0]),len(train_loader.dataset),100. * batch_idx / len(train_loader),loss.item()))
if batch_idx % args.save_interval == 0 and batch_idx > 0:
Valoss = evaluate_forecasting(model, validation_loader, args, getLoss = True)
if Valoss < best_loss:
best_loss = Valoss
torch.save(model.state_dict(), model_file)
print("saving model at:"+str(epoch)+","+str(best_loss))
model.train()
avg_loss = train_loss / len(train_loader.dataset)
print('====> Epoch: {} Average loss: {:.6f} '.format(epoch, avg_loss))
ratio = [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50, 0.6, 0.7, 0.8,0.9]
def save_results():
results=[]
val_results = evaluate_forecasting(model, validation_loader, args)
test_results = evaluate_forecasting(model, test_loader, args, isTest=True)
for val in val_results:
results.append(val)
for test in test_results:
results.append(test)
if args.ood:
save_file_name = save_folder+used_params+'_numbers_test_ood.csv'
else:
save_file_name = save_folder+used_params+'_numbers_iid.csv'
with open(save_file_name,'a') as f:
f.write(','.join([str(res) for res in results]))
f.write('\n \n')
if args.noise:
model.load_state_dict(torch.load(model_file))
for r in ratio:
print(r)
print(args.pert)
print("################################################")
if r==0:
print("no noise added!")
# evaluate_forecasting(model, | |
kp=Nz//2 #
ax4.clear()
# note: maxind will be used in the vertical cross sections
if btype == 'w': # not supported
ax4.contourf( xb[kp,:,:], yb[kp,:,:], b[kp,:,:] + Bb[kp,:,:] , blevs, zorder=1)
maxind = np.unravel_index( np.argmax(b[kp,:,:] , axis=None), b[kp,:,:].shape)
elif btype =='c':
cmid = 0.5*(c[kp,:,:]+c[kp-1,:,:])
ax4.contourf( xp[kp,:,:], yp[kp,:,:],cmid, blevs, zorder=1)
maxind = np.unravel_index(np.argmax(cmid, axis=None), cmid.shape)
ax4.contour( xw[kp,:,:], yw[kp,:,:], w[kp,:,:], wlevs, colors=['k',], zorder=3)
zeta = (vu[:,:,1:] - vu[:,:,:-1])/dx - (uv[:,1:,:] - uv[:,:-1,:])/dy
if kz and zeta[kz].max()>zeta[kz].min():
# ax4.contour( xp[kz,:,:], yp[kz,:,:], zeta[kz] , zetalevs, colors =['r',])
ax4.contourf( xp[kz,:,:], yp[kz,:,:], zeta[kz] , zetacolors,zorder=1)
vd = 1
ax4.quiver(xp[ kz, ::vd,::vd], yp[kz,::vd,::vd], U[kz,::vd,::vd], V[kz,::vd,::vd],
scale=1.0*Nx/vd,units='width',zorder=3,color='black')
else:
ax4.contourf( xp[kp,:,:], yp[kp,:,:],cmid, blevs, zorder=1)
#vd=1
#speedmax=1 # anticipate value of max
#_at_p = (u[kp,:,:-1]+u[kp,:,1:])*.5
#_at_p = (v[kp,:-1,:]+v[kp,1:,:])*.5
#Q2 = ax4.quiver(xp[ 1, ::vd,::vd], yp[kp,::vd,::vd], u_at_p[::vd,::vd], v_at_p[::vd,::vd],
# scale=speedmax*Nx/vd,units='width',zorder=3,color='black')
#speedf= "%7.3f" % speedmax
#ax4.quiverkey(Q2,.5,-.04 , speedmax, speedf,zorder=4, color='red', fontproperties={'size':12} )
locz = "z=%4.2f" % zb[kp,0,0]
### ax4.text(.83*xmax, -.04*ymax, locz, fontsize=18)
### ax4.text(0,-.05*ymax,'t={0:5.2f}'.format(t),fontsize=22)
# ax4.text(.83*xmax, 1.04*ymax, locz, fontsize=18)
ax0.text(.81,.8,'t={0:5.2f}'.format(t),fontsize=30)
expt = "%4.1f %d,%d,%d $\Gamma=$%5.3f $\\beta=$%4.1f $\epsilon$=%6.4f $R/Rc$=%5.1f " % (xmax, Nx,Ny,Nz,gamma,beta,diffusion_coef,ray_over_rayc)
ax0.text(.1, .8, expt, fontsize=24)
dtf = 'dt=%.5f k=%d' % (dt,kp)
ax0.text(.7,.5,dtf,fontsize=14)
if True: #xz cross-section
if jp is None: jp=maxind[0]
# ax2.contour(xp[:,jp,:],zp[:,jp,:],p[:,jp,:],.1*np.linspace(-2.1,2.1,22),colors=['w',],zorder=2)
if btype == 'c':
# CF = ax2.contourf(xp[:,jp,:] , zp[:,jp,:] , c[:,jp,:] , bcolors,zorder=1)
CF = ax2.contourf(xextn[:,jp,:] , zextn[:,jp,:] , cextn[:,jp,:] , blevs,zorder=1)
elif btype == 'w':
CF = ax2.contourf(xb[:,jp,:] , zb[:,jp,:] , b[:,jp,:] + Bb[:,jp,:] ,blevs,zorder=1)
# ax2.contour(xp[:,jp,:] , zp[:,jp,:] , c[:,jp,:] , colors=['w',] , zorder=1) #both
vd=1
speedmax=1 # anticipate value of max
# u_at_p = (u[:,jp,:-1]+u[:,jp,1:])*.5
# w_at_p = (w[:-1,jp,:]+w[1:,jp,:])*.5
if vectors:
Q = ax2.quiver(xp[::vd,jp,::vd] , zp[::vd,jp,::vd] , U[::vd,jp,::vd] , W[::vd,jp,::vd],
scale=speedmax*Nx/vd,units='width',zorder=3,width=.002)
# ax2.set_ylim(0,1)
if True: #yz cross-section
if ip is None: ip = maxind[1]
# CF = ax1.contourf(yb[:,:,ip],zb[:,:,ip],this[:,:,ip],bcolors,zorder=1)
# ax1.contour(yp[:,:,ip],zp[:,:,ip],p[:,:,ip],.1*np.linspace(-2.1,2.1,22),colors=['w',],zorder=2)
## ax1.contour(yp[:,:,ip],zp[:,:,ip],c[:,:,ip]- strat*(.5-zp[:,:,ip]) ,bcolors,colors=['k',],zorder=3)
if btype == 'c':
##### CF = ax1.contourf(yp[:,:,ip] , zp[:,:,ip] , c[:,:,ip] , bcolors,zorder=1)
# CF = ax1.contourf(yextn[:,:,ip] , zextn[:,:,ip] , cextn[:,:,ip] , bcolors,zorder=1)
#CF = ax1.contourf( zextn[:,:,ip], yextn[:,-1::-1,ip], cextn[:,:,ip] , bcolors,zorder=1)
CF = ax1.contourf( zextn[:,:,ip], yextn[:,:,ip], cextn[:,:,ip] , blevs,zorder=1)
elif btype == 'w':
ax1.contour(yb[:,:,ip] , zb[:,:,ip] , b[:,:,ip]+Bb[:,:,ip] , blevs,zorder=1)
vd=1
speedmax=1 # anticipate value of max
# v_at_p = (v[:,:-1,ip]+v[:,1:,ip])*.5
# w_at_p = (w[:-1,:,ip]+w[1:,:,ip])*.5
### Q1 = ax1.quiver(yp[::vd,::vd,ip],zp[::vd,::vd,ip],v_at_p[::vd,::vd],w_at_p[::vd,::vd],
### scale=speedmax*Nx/vd,units='width',zorder=3)
if vectors:
Q1 = ax1.quiver(zp[::vd,::vd,ip],yp[::vd,::vd,ip],W[::vd,::vd,ip],V[::vd,::vd,ip],
scale=speedmax*Ny/vd,units='height',color='k',zorder=4,width=.002)
# ax1.set_ylim(0,1)
speedf= "%7.3f" % speedmax
# ax2.quiverkey(Q,.8*xmax,-.04*xmax,speedmax, speedf,zorder=4)
# locy = "j=%d y=%4.2f" % (jp, yb[0,jp,0])
# ax2.text(.03*xmax, -.05*xmax, locy, fontsize=18)
# locx = "i=%d x=%4.2f" % (ip, xb[0,0,ip])
# ax1.text(.53*xmax, -.05*xmax, locx, fontsize=18)
# ax2.text(.53*xmax, -.05*xmax, locx, fontsize=18)
# ax2.quiverkey(Q,-.1,.5, speedmax, speedf, zorder=4, color='red', fontproperties={'size':12})
xlocmax = xp[0,0,ip]
ylocmax = yp[0,jp,0]
ax2.text(xlocmax,1.,'$\\uparrow$',horizontalalignment='center',zorder=0)
ax1.text(1.,ylocmax,'$\\rightarrow$',horizontalalignment='center',verticalalignment='center',zorder=0)
#DIAGNOSTICS
# U=u_to_p(u)
# V=v_to_p(v)
# W=w_to_p(w)
if btype=='c':
B = c
elif btype=='w':
B = w_to_p(b+Bb)
speed = np.sqrt(U*U + V*V + W*W)
kinEn = (.5*speed**2).mean()
ke.append(kinEn)
monitor1.append( zeta.max() )
monitor2.append( zeta.min() )
times.append(t)
minmax = "%4.2f %s %4.2f"
maxes1 = minmax % (U.min() ,'< u <',U.max())
ax0.text(0.1, .5, maxes1 , fontsize=18)
maxes1 = minmax % (V.min() ,'< v <',V.max())
ax0.text(0.3, .5, maxes1 , fontsize=18)
maxes1 = minmax % (W.min() ,'< w <',W.max())
ax0.text(0.5, .5, maxes1 , fontsize=18)
maxes1 = minmax % (cextn.min() ,'< c <',cextn.max())
ax0.text(.1, .3, maxes1 , fontsize=18)
maxes1 = minmax % (p.min() ,'< p <', p.max())
ax0.text(.3, .3, maxes1 , fontsize=18)
if kz : # plot vorticity
maxes1 = minmax % (zeta[kz].min() ,'< $\\zeta$ <', zeta[kz].max())
ax0.text(.5, .3, maxes1 , fontsize=18)
if not cbar_exists: #bad things happen if cbar is called more than once
cbar_exists = True
mycbar = myfig.colorbar(CF,ax=ax3,fraction=0.6,ticks=blevs)
mycbar.ax.yaxis.set_ticks_position('left')
sooner = mycbar.ax.yaxis.get_ticklabels()
for boomer in sooner:
boomer.set_fontsize(18)
deltaw = '$\\Delta w=$%3.1f' % wcinc
ax0.text(.7,.1,deltaw,fontsize=24)
ax1.axis('off')
stop_clipping(ax1)
ax2.axis('off')
stop_clipping(ax2)
ax4.axis('off')
stop_clipping(ax4)
ax0.axis('off')
ax3.axis('off')
clear_output(wait=True)
display(myfig)
if outdir != None:
timestamp = round(t,2)
pngname = outdir+'/%06d.png' % round(10*timestamp)
print(pngname)
if not os.path.exists(pngname) or overwrite:
myfig.savefig(pngname, dpi=72, facecolor='w', edgecolor='w', orientation='portrait')
# In[37]:
## doplot(kz=Nz//8,overwrite=False,kp=Nz//8)
# <hr/>
# # Ready to run at t=0
# In[38]:
#tstart = 6. # if needed to restart with tstart>0
if tstart==0:
u=ui.copy()
v=vi.copy()
w=wi.copy()
p=pi.copy() # not needed?
c=ci.copy()
t=0
if outdir != None:
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
print("recreate directory:", outdir)
else:
t = tstart
timestamp = round(t,2)
timestring = '%06d' % round(10*timestamp)
print('attempt to load arrays from files in',outdir)
c = np.load(outdir+'/c'+timestring+'.npy')
u = np.load(outdir+'/u'+timestring+'.npy')
v = np.load(outdir+'/v'+timestring+'.npy')
w = np.load(outdir+'/w'+timestring+'.npy')
p = np.load(outdir+'/p'+timestring+'.npy')
#dbdta=[None]*3
dudta=[None]*3
dvdta=[None]*3
dwdta=[None]*3
dcdta=[None]*3
ke=[] # for time series of total kinetic energy in the domain
monitor1 = [] # for recording a diagnostic quantity at every time step
monitor2 = []
times = [] # time of the diagnostic
nstep = 0
# interval between plots
tplot = t + dplot
dtlimad = 0.1*dz/anticipated_wmax
dtlimdiff = .5*dz*dz/diffusion_coef
print('dtlimad',dtlimad,' dtlimdiff',dtlimdiff)
dt = min(dtlimad,dtlimdiff )
print("using dt=",dt)
print("now t=",t)
# In[39]:
vu = v_to_u(v,'per')
uv = u_to_v(u,'per')
if t==0: doplot(kz=kz)
# <hr/>
# # Step forward in time
#
# You can repeatedly increase `tstop` and rerun the following cell.
# In[40]:
#dt=.001
# dplot = .1
tplot = t+ dplot
# normally, don't use commented lines above
tstop = tend # change tstop and rerun this cell if needed
while t < tstop + dt/2.:
bnd = 'slip'
uw = u_to_w(u,bnd)
vw = v_to_w(v,bnd)
if periodic: bnd='per'
vu = v_to_u(v,bnd)
wu = w_to_u(w,bnd)
uv = u_to_v(u,bnd)
wv = w_to_v(w,bnd)
if periodic:
dudt = advect_3dp(u,u,vu,wu,dx,dy,dz,'u')
dvdt = advect_3dp(v,uv,v,wv,dx,dy,dz,'v')
## dbdt = advect_3dp(b,uw,vw,w,dx,dy,dz,'w')
dwdt = advect_3dp(w,uw,vw,w,dx,dy,dz,'w')
dcdt = fluxform(c,u,v,w,dx,dy,dz,hfbot,hftop,diffu=diffusion_coef,gamma=gamma) # - w_to_p(w)*strat*(1-zp)
dcdtavg= hfbot - hftop
if dcdtavg!=0.:
dcdt -= dcdtavg
### dbdt += diffusion_coef*laplacian(b,dx,dy,dz,il=-1,ir=0,jb=-1,jt=0) # for zero b on boundaries
## dbdt += diffusion_coef*laplacian(b,dx,dy,dz,il=-1,ir=0,jb=-1,jt=0,kb=1,kt=-2) # for zero flux b on boundaries
dudt += diffusion_coef*laplacian(u,dx,dy,dz,il=-2,ir=1,jb=-1,jt=0,kb=0,kt=-1)
dvdt += diffusion_coef*laplacian(v,dx,dy,dz,il=-1,ir=0,jb=-2,jt=1,kb=0,kt=-1)
dwdt += diffusion_coef*laplacian(w,dx,dy,dz,il=-1,ir=0,jb=-1,jt=0)
else:
dudt = advect_3d(u,u,vu,wu,dx,dy,dz)
dvdt = advect_3d(v,uv,v,wv,dx,dy,dz)
## dbdt = advect_3d(b,uw,vw,w,dx,dy,dz)
dwdt = advect_3d(w,uw,vw,w,dx,dy,dz)
if fcoriolis!=0.: dudt += fcoriolis*vu #Coriolis term
if fcoriolis!=0.: dvdt += -fcoriolis*uv #Coriolis term
if btype == 'w': # nto supported anymore
dwdt[1:-1] += b[1:-1]
dbdt += -w*Bbz
elif btype == 'c':
dwdt[1:-1] += (c[:-1]+c[1:])*.5
# Ensure no normal component of velocity develops on the boundaries:
if not periodic:
dudt[:,:,0]=0.
dudt[:,:,-1]=0.
dvdt[:,0,:]=0.
dvdt[:,-1,:]=0.
dwdt[0,:,:]=0.
dwdt[-1,:,:]=0.
divaccel= div_Cgrid(dudt,dvdt,dwdt,dx,dy,dz)
divvel = div_Cgrid(u,v,w,dx,dy,dz)
p = poisson3d_p_fft(divaccel + .4*divvel/dt, inv_lapl_op, periodic=periodic)
p = p - p.mean()
dudt[:,:,1:-1] += ( p[:,:,:-1] - p[:,:,1:] )/dx
dvdt[:,1:-1,:] += ( p[:,:-1,:] - p[:,1:,:] )/dy
dwdt[1:-1,:,:] += ( p[:-1,:,:] - p[1:,:,:] )/dz
if periodic:
dudt[:,:,0] += ( p[:,:,-1] - p[:,:,0] )/dx
dvdt[:,0,:] += ( p[:,-1,:] - p[:,0,:] )/dy
dudt[:,:,-1] = dudt[:,:,0]
dvdt[:,-1,:] = dvdt[:,0,:]
dudta=[dudt.copy()]+dudta[:-1]
dvdta=[dvdt.copy()]+dvdta[:-1]
dwdta=[dwdt.copy()]+dwdta[:-1]
# dbdta=[dbdt.copy()]+dbdta[:-1]
dcdta=[dcdt.copy()]+dcdta[:-1]
# step forward in time:
nstep += 1
abnow = min(nstep,aborder)
## b += dt*ab_blend(dbdta,abnow)
c += dt*ab_blend(dcdta,abnow)
u += dt*ab_blend(dudta,abnow)
v += dt*ab_blend(dvdta,abnow)
w += dt*ab_blend(dwdta,abnow)
t += dt
if nodrift and periodic:
u = u - u.mean()
v = v - v.mean()
if tplot-dt/2. < t: # make plot and save arrays
doplot(overwrite=True,kz=kz,kp=kp)
tplot = tplot + dplot
timestamp = round(t,2)
timestring = '%06d' % round(10*timestamp)
np.save(outdir+'/c'+timestring,c)
np.save(outdir+'/u'+timestring,u)
np.save(outdir+'/v'+timestring,v)
np.save(outdir+'/w'+timestring,w)
np.save(outdir+'/p'+timestring,p)
plt.close()
# In[41]:
cextn[1:-1] = c
hprime = 0.
if gamma!= 0.:
cbar = cextn[1].mean()
cprime = cextn[1] - cbar
hprime = - gamma* cprime
cextn[0] = (hfbot+hprime)/diffusion_coef*dz*.5 + cextn[1]
cextn[-1] = -(hftop)/diffusion_coef*dz*.5 + cextn[-2]
jp = 1
xfig = plt.figure(figsize=(16,8),facecolor='lightgrey')
bx2 = xfig.add_axes([0.1, 0.1, 0.8, 1.6*zmax/xmax],frameon=False) # vertical cross-section
#plt.close()
jp=2
bx2.contourf(xextn[:,jp,:] , zextn[:,jp,:] , cextn[:,jp,:] , blevs,zorder=1)
bx2.contour(xw[:,jp,:] , zw[:,jp,:] , w[:,jp,:] , wlevs, colors=['k',], zorder=2)
bx2.axis('off')
stop_clipping(bx2)
clear_output(wait=True)
display(xfig)
plt.close()
print(gamma)
# In[42]:
# check heat flux at bottom
diffusion_coef*(cextn[0].mean()-cextn[1].mean())/(.5*dz)
# In[43]:
#kv=2
vu = v_to_u(v,'per')
uv = u_to_v(u,'per')
zeta = (vu[:,:,1:] - vu[:,:,:-1])/dx - (uv[:,1:,:] - uv[:,:-1,:])/dy
#plt.contour( xp[kv,:,:], yp[kv,:,:], zeta[kv] , zetalevs,colors =['k',])
# In[44]:
zeta.min(),zeta.max()
# In[45]:
vd=1
U=u_to_p(u)
V=v_to_p(v)
kh=3
more,this=plt.subplots(figsize=[10,10])
this.contourf(xp[kh],yp[kh],p[kh])
this.quiver(xp[ kh, ::vd,::vd], yp[kh,::vd,::vd], U[kh,::vd,::vd], V[kh,::vd,::vd],
scale=1.0*Nx/vd,units='width',zorder=3,color='black');
# In[46]:
plt.close()
d = []
for n in range(len(monitor1)):
d.append(monitor1[n]-abs(monitor2[n]))
plt.plot(times,monitor1)
plt.plot(times,monitor2)
plt.plot(times,d)
plt.title("zeta max,min");
# In[47]:
plt.plot(times,ke)
plt.title("KE");
# In[48]:
divvel=div_Cgrid(u,v,w,dx,dy,dz)
print("should be small:",divvel.max(),divvel.min())
# In[49]:
nloopmax = 0 # zero prevent this cell from running, set to 1 to see animation
pngdir = outdir
#pngdir = 'gamma0'
pngs = glob.glob(pngdir+'/*.png')
pngs.sort()
n=0
nloop = 0
while nloop<nloopmax:
png=pngs[n]
if not os.path.exists(png):
print("could not find:",png)
break
n+=1
if n>=len(pngs):
n = 0
nloop+=1
display(Image(filename=png))
Time.sleep(0.5)
clear_output(wait=True)
print("completed",nloop,"loops of animation",outdir)
# # External animation
# In[50]:
from janim import makeanim
import glob
# In[51]:
pngdir = outdir
#pngdir = 'r4w32'
pngs = | |
import contextlib
import fnmatch
import glob
import logging
import os
from pathlib import Path
import pkg_resources
import re
import shutil
import shlex
import subprocess
import sys
import tempfile
import threading
import time
import functools
import jellyfish
import pexpect
import requests
import termcolor
from . import _, get_local_path
from ._errors import *
from .authentication import authenticate, logout, run_authenticated
from . import config as lib50_config
__all__ = ["push", "local", "working_area", "files", "connect",
"prepare", "authenticate", "upload", "logout", "ProgressBar",
"fetch_config", "get_local_slugs", "check_github_status", "Slug", "cd"]
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
DEFAULT_PUSH_ORG = "me50"
AUTH_URL = "https://submit.cs50.io"
DEFAULT_FILE_LIMIT = 10000
def push(tool, slug, config_loader, repo=None, data=None, prompt=lambda question, included, excluded: True, file_limit=DEFAULT_FILE_LIMIT):
"""
Pushes to Github in name of a tool.
What should be pushed is configured by the tool and its configuration in the .cs50.yml file identified by the slug.
By default, this function pushes to https://github.com/org=me50/repo=<username>/branch=<slug>.
``lib50.push`` executes the workflow: ``lib50.connect``, ``lib50.authenticate``, ``lib50.prepare`` and ``lib50.upload``.
:param tool: name of the tool that initialized the push
:type tool: str
:param slug: the slug identifying a .cs50.yml config file in a GitHub repo. This slug is also the branch in the student's repo to which this will push.
:type slug: str
:param config_loader: a config loader for the tool that is able to parse the .cs50.yml config file for the tool.
:type config_loader: lib50.config.Loader
:param repo: an alternative repo to push to, otherwise the default is used: github.com/me50/<github_login>
:type repo: str, optional
:param data: key value pairs that end up in the commit message. This can be used to communicate data with a backend.
:type data: dict of strings, optional
:param prompt: a prompt shown just before the push. In case this prompt returns false, the push is aborted. This lambda function has access to an honesty prompt configured in .cs50,yml, and all files that will be included and excluded in the push.
:type prompt: lambda str, list, list => bool, optional
:param file_limit: maximum number of files to be matched by any globbing pattern.
:type file_limit: int
:return: GitHub username and the commit hash
:type: tuple(str, str)
Example usage::
from lib50 import push
import submit50
name, hash = push("submit50", "cs50/problems/2019/x/hello", submit50.CONFIG_LOADER)
print(name)
print(hash)
"""
if data is None:
data = {}
language = os.environ.get("LANGUAGE")
if language:
data.setdefault("lang", language)
slug = Slug.normalize_case(slug)
check_dependencies()
# Connect to GitHub and parse the config files
remote, (honesty, included, excluded) = connect(slug, config_loader, file_limit=DEFAULT_FILE_LIMIT)
# Authenticate the user with GitHub, and prepare the submission
with authenticate(remote["org"], repo=repo) as user, prepare(tool, slug, user, included):
# Show any prompt if specified
if prompt(honesty, included, excluded):
username, commit_hash = upload(slug, user, tool, data)
format_dict = {"username": username, "slug": slug, "commit_hash": commit_hash}
message = remote["message"].format(results=remote["results"].format(**format_dict), **format_dict)
return username, commit_hash, message
else:
raise Error(_("No files were submitted."))
def local(slug, offline=False, remove_origin=False, github_token=None):
"""
Create/update local copy of the GitHub repo indentified by slug.
The local copy is shallow and single branch, it contains just the last commit on the branch identified by the slug.
:param slug: the slug identifying a GitHub repo.
:type slug: str
:param offline: a flag that indicates whether the user is offline. If so, then the local copy is only checked, but not updated.
:type offline: bool, optional
:param remove_origin: a flag, that when set to True, will remove origin as a remote of the git repo.
:type remove_origin: bool, optional
:param github_token: a GitHub authentication token used to verify the slug, only needed if the slug identifies a private repo.
:type github_token: str, optional
:return: path to local copy
:type: pathlib.Path
Example usage::
from lib50 import local
path = local("cs50/problems/2019/x/hello")
print(list(path.glob("**/*")))
"""
# Parse slug
slug = Slug(slug, offline=offline, github_token=github_token)
local_path = get_local_path() / slug.org / slug.repo
git = Git().set("-C {path}", path=str(local_path))
if not local_path.exists():
run(Git()("init {path}", path=str(local_path)))
run(git(f"remote add origin {slug.origin}"))
if not offline:
# Get latest version of checks
run(git("fetch origin --depth 1 {branch}", branch=slug.branch))
# Tolerate checkout failure (e.g., when origin doesn't exist)
try:
run(git("checkout -f -B {branch} origin/{branch}", branch=slug.branch))
except Error:
pass
# Ensure that local copy of the repo is identical to remote copy
run(git("reset --hard HEAD"))
if remove_origin:
run(git(f"remote remove origin"))
problem_path = (local_path / slug.problem).absolute()
if not problem_path.exists():
raise InvalidSlugError(_("{} does not exist at {}/{}").format(slug.problem, slug.org, slug.repo))
return problem_path
@contextlib.contextmanager
def working_area(files, name=""):
"""
A contextmanager that copies all files to a temporary directory (the working area)
:param files: all files to copy to the temporary directory
:type files: list of string(s) or pathlib.Path(s)
:param name: name of the temporary directory
:type name: str, optional
:return: path to the working area
:type: pathlib.Path
Example usage::
from lib50 import working_area
with working_area(["foo.c", "bar.py"], name="baz") as area:
print(list(area.glob("**/*")))
"""
with tempfile.TemporaryDirectory() as dir:
dir = Path(Path(dir) / name)
dir.mkdir(exist_ok=True)
for f in files:
dest = (dir / f).absolute()
dest.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(f, dest)
yield dir
@contextlib.contextmanager
def cd(dest):
"""
A contextmanager for temporarily changing directory.
:param dest: the path to the directory
:type dest: str or pathlib.Path
:return: dest unchanged
:type: str or pathlib.Path
Example usage::
from lib50 import cd
import os
with cd("foo") as current_dir:
print(os.getcwd())
"""
origin = os.getcwd()
try:
os.chdir(dest)
yield dest
finally:
os.chdir(origin)
def files(patterns,
require_tags=("require",),
include_tags=("include",),
exclude_tags=("exclude",),
root=".",
limit=DEFAULT_FILE_LIMIT):
"""
Based on a list of patterns (``lib50.config.TaggedValue``) determine which files should be included and excluded.
Any pattern tagged with a tag:
* from ``include_tags`` will be included
* from ``require_tags`` can only be a file, that will then be included. ``MissingFilesError`` is raised if missing.
* from ``exclude_tags`` will be excluded
:param patterns: patterns that are processed in order, to determine which files should be included and excluded.
:type patterns: list of lib50.config.TaggedValue
:param require_tags: tags that mark a file as required and through that included
:type require_tags: list of strings, optional
:param include_tags: tags that mark a pattern as included
:type include_tags: list of strings, optional
:param exclude_tags: tags that mark a pattern as excluded
:type exclude_tags: list of strings, optional
:param root: the root directory from which to look for files. Defaults to the current directory.
:type root: str or pathlib.Path, optional
:param limit: Maximum number of files that can be globbed.
:type limit: int
:return: all included files and all excluded files
:type: tuple(set of strings, set of strings)
Example usage::
from lib50 import files
from lib50.config import TaggedValue
open("foo.py", "w").close()
open("bar.c", "w").close()
open("baz.h", "w").close()
patterns = [TaggedValue("*", "exclude"),
TaggedValue("*.c", "include"),
TaggedValue("baz.h", "require")]
print(files(patterns)) # prints ({'bar.c', 'baz.h'}, {'foo.py'})
"""
require_tags = list(require_tags)
include_tags = list(include_tags)
exclude_tags = list(exclude_tags)
# Ensure tags do not start with !
for tags in [require_tags, include_tags, exclude_tags]:
for i, tag in enumerate(tags):
tags[i] = tag[1:] if tag.startswith("!") else tag
with cd(root):
# Include everything but hidden paths by default
included = _glob("*", limit=limit)
excluded = set()
if patterns:
missing_files = []
# For each pattern
for pattern in patterns:
if not _is_relative_to(Path(pattern.value).expanduser().resolve(), Path.cwd()):
raise Error(_("Cannot include/exclude paths outside the current directory, but such a path ({}) was specified.")
.format(pattern.value))
# Include all files that are tagged with !require
if pattern.tag in require_tags:
file = str(Path(pattern.value))
if not Path(file).exists():
missing_files.append(file)
else:
try:
excluded.remove(file)
except KeyError:
pass
else:
included.add(file)
# Include all files that are tagged with !include
elif pattern.tag in include_tags:
new_included = _glob(pattern.value, limit=limit)
excluded -= new_included
included.update(new_included)
# Exclude all files that are tagged with !exclude
elif pattern.tag in exclude_tags:
new_excluded = _glob(pattern.value, limit=limit)
included -= new_excluded
excluded.update(new_excluded)
if missing_files:
raise MissingFilesError(missing_files)
# Exclude any files that are not valid utf8
invalid = set()
for file in included:
try:
file.encode("utf8")
except UnicodeEncodeError:
excluded.add(file.encode("utf8", "replace").decode())
invalid.add(file)
included -= invalid
return included, excluded
def connect(slug, config_loader, file_limit=DEFAULT_FILE_LIMIT):
"""
Connects to a GitHub repo indentified by slug.
Then parses the ``.cs50.yml`` config file with the ``config_loader``.
If not all required files are present, per the ``files`` tag in ``.cs50.yml``, an ``Error`` is raised.
:param slug: the slug identifying a GitHub repo.
:type slug: str
:param config_loader: a config loader that is able to parse the .cs50.yml config file for a tool.
:type config_loader: lib50.config.Loader
:param file_limit: The maximum number of files that | |
<filename>scripts/runransac.py
#!/usr/bin/env python3
import numpy as np
from numba import cuda, float32, float64, uint16, int32 # GPU Optimizations
from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32
import math
import copy
# My Classes
import libfileio as my_io
import libtimer as my_timer
NUM_ITERATIONS = 100
DISTANCE_THRESHOLD = 0.01#0.1
THREADS_PER_BLOCK = 256
# TODO::HACK::DEBUG: find a way to declare this within a class
RNG_STATES = create_xoroshiro128p_states(THREADS_PER_BLOCK * 6, seed=1)
@cuda.jit
def kernelRANSAC_1(point_cloud,plane_constants,rng_states):
"""
Kernel 1: Calculate Constants a,b,c,d from the point cloud
IN: [full_point_cloud]
OUT: [a,b,c,d]
Calculates the constants a,b,c,d from the point_cloud that fit the plane
equation: ax + by + cz + d = 0
"""
# Get the current thread ID
tx = cuda.threadIdx.x
stride = cuda.blockIdx.x*cuda.blockDim.x
# Check Bounds
if stride + tx >= NUM_ITERATIONS:
return
# Create an Array to store the point index
# rand_idx = cuda.local.array(shape=(3,1),dtype=int32)
pts = cuda.local.array(shape=(3,3),dtype=float32)
for i in range(3):
# Get a random number between [0,1] as a float
rand_num = xoroshiro128p_uniform_float32(rng_states, tx)
# Convert the float to an int for indexing
# Map: new_val = (old_val-old_min)/(old_max-old_min) * (new_max-new_min) + new_min
# rand_idx = (rand_num)/(1-0)*(point_cloud.shape[0]) + 0
rand_idx = int(rand_num*point_cloud.shape[0])
# Get the Points Corresponding to the random indexs
pts[i,0] = point_cloud[rand_idx,0]
pts[i,1] = point_cloud[rand_idx,1]
pts[i,2] = point_cloud[rand_idx,2]
# Calculate the Constants for the given points
# $$ a = [(y_2 - y_1)(z_3 - z_1) - (z_2 - z_1)(y_3 - y_2)] $$
a = (pts[1,1]-pts[0,1])*(pts[2,2]-pts[0,2]) - (pts[1,2]-pts[0,2])*(pts[2,1]-pts[1,1])
# $$ b = [(z_2 - z_1)(x_3 - x_1) - (x_2 - x_1)(z_3 - z_2)] $$
b = (pts[1,2]-pts[0,2])*(pts[2,0]-pts[0,0]) - (pts[1,0]-pts[0,0])*(pts[2,2]-pts[1,2])
# $$ c = [(x_2 - x_1)(y_3 - y_1) - (y_2 - y_1)(x_3 - x_2)] $$
c = (pts[1,0]-pts[0,0])*(pts[2,1]-pts[0,1]) - (pts[1,1]-pts[0,1])*(pts[2,0]-pts[1,0])
# $$ d = -(a * x_n + b * y_n + c * z_n) $$
d = -(a*pts[0,0] + b*pts[0,1] + c*pts[0,2])
# $$ psq = sqrt{(a^2) + (b^2 + (c^2)} $$
psq = max(0.1,(a*a + b*b + c*c)**0.5)
# Pass the Points back to the output array
plane_constants[stride+tx,0] = a
plane_constants[stride+tx,1] = b
plane_constants[stride+tx,2] = c
plane_constants[stride+tx,3] = d
plane_constants[stride+tx,4] = psq
return None
@cuda.jit
def kernelRANSAC_2(point_cloud,plane_constants,dist_thresh,count_constants):
"""
Kernel 2: Evaluate the fit for points a,b,c,d
IN: [point cloud], [a,b,c,d]
OUT: [inlier_points]
"""
# Get the current thread ID
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
stride_x = cuda.blockIdx.x*cuda.blockDim.x
stride_y = cuda.blockIdx.y*cuda.blockDim.y
# Check Boundaries
if stride_x + tx > NUM_ITERATIONS or stride_y + ty >= point_cloud.shape[0]:
return
# Get the plane_constants
a = plane_constants[stride_x+tx,0]
b = plane_constants[stride_x+tx,1]
c = plane_constants[stride_x+tx,2]
d = plane_constants[stride_x+tx,3]
psq = plane_constants[stride_x+tx,4]
# Calc distance between point and plane
dist = math.fabs(a*point_cloud[stride_y+ty,0] + b*point_cloud[stride_y+ty,1] + c*point_cloud[stride_y+ty,2] + d)/psq
# Check whether or no the point is a good fit
if (dist <= dist_thresh):
# Increment the counter for this group of constants
cuda.atomic.add(count_constants,(stride_x+tx,0),1)
return None
class RunRANSAC(object):
"""
This is an implementation of the RANSAD plane fitting algorithm for the CPU
and the GPU.
"""
def __init__(self,num_iterations,distance_thresh,path_to_data="../data/",threads_per_block=256):
# super(RunRANSAC, self).__init__()
self.io = my_io.WrapperFileIO(path_to_data,None)
self.num_iters = num_iterations
self.dist_thresh = distance_thresh
self.tpb = threads_per_block
self.x_train = None
self.y_train = None
self.x_eval = None
self.y_eval = None
def getData(self,file_pickle_train, verbose=False):
"""
Opens the pickle files corresponing to the point cloud.
"""
# Open the Pickle Files
train_dict = self.io.loadPickle(file_pickle_train)
if verbose:
print(train_dict)
# Split the Dictionaries
x_train = train_dict["points"]
y_train = train_dict["labels"]
# Perform any Preprocessing / Normalization
x_train = self.procData(x_train)
if verbose:
print(x_train)
# Assign to Class Variables
self.x_train = x_train
self.y_train = y_train
return x_train, y_train
def procData(self,data):
"""
Preprocesses the data before the KNN
Data is in the form [X,Y,Z,R,G,B,NormX,NormY,NormZ]
Data array content is defined in libopen3D
Normalization should occur for each column to aid the classification
"""
# Normalize the numpy array by column
data_norm = (data-data.min(0)) / data.ptp(0)
return data_norm
def saveData(self,file_name,verbose=False):
"""Saves the processes data as a pickle file"""
# Process the data as a dictionary
data_dict = {
"points" : self.x_eval,
"labels" : self.y_eval
}
if verbose: print(data_dict)
# Save the File
self.io.savePickle(data_dict,file_name)
return None
def selectCluster(self,point_cloud_array,label_array,verbose=True):
"""
This function selects the cluster corresponing to the most frequently
occuring label in the point cloud
"""
# Remove the negative labels
if verbose: print(label_array.shape,label_array)
label_clean = label_array[label_array>=0]
if verbose: print(label_clean.shape,label_clean)
# Determine the Biggest Cluster
max_label = np.bincount(label_clean).argmax()
if verbose: print("Most Frequent Label:",max_label)
# Store Internally as the evaluation data
self.x_eval = point_cloud_array[label_array==max_label]
self.y_eval = np.full(self.x_eval.shape[0],max_label)
# Return the max label
return max_label
def cpu(self,x_eval=None,y_eval=None,debug=True):
"""
Runs the CPU version of the RANSAC algorithm on the provided data. If no
arguments are passed for x_eval or y_eval, the algorithm will utilize
the data already loaded by the class with the getData() routine.
Setting the debug flag to [T] will cause statements to be printed to
the command line. It is recommended to set this flag to [F] when timing
THE MATH
A plane is defined as:
$$ ax + by + cz + d = 0 $$
Given three points, the constants can be determined:
$$ a = [(y_2 - y_1)(z_3 - z_1) - (z_2 - z_1)(y_3 - y_2)] $$
$$ b = [(z_2 - z_1)(x_3 - x_1) - (x_2 - x_1)(z_3 - z_2)] $$
$$ c = [(x_2 - x_1)(y_3 - y_1) - (y_2 - y_1)(x_3 - x_2)] $$
$$ d = -(a * x_n + b * y_n + c * z_n) $$
Consider all the other points in the point cloud and calculate the
distance to the fit plane
$$distance = \frac{a*x_4 + b*y_4 + c*z_5 + d}{\sqrt{a^2 + b^2 + c^2}}$$
REFERENCES
https://en.wikipedia.org/wiki/Random_sample_consensus
https://medium.com/@ajithraj_gangadharan/3d-ransac-algorithm-for-lidar-pcd-segmentation-315d2a51351
"""
if debug: print("Running CPU Version")
# Assign the Local Data (If applicable)
if x_eval is not None: self.x_eval = x_eval
if y_eval is not None: self.y_eval = y_eval
# Get up Variables
# num_pts_train = self.x_train.shape[0]
# y_eval = np.zeros(num_pts_eval)
# Perform the Iterations
pts_idx_inliers = [] # For points already on the plane
pts_best = []
constants_best = []
for i in range(self.num_iters):
# Get a Random Sampling of Candidate Indexs
pts_idx = np.random.randint(0,self.x_eval.shape[0],(3))
# pts_idx_inliers(pts_idx)
if debug: print("Candidate Points Index:\n",pts_idx)
# Get the Candidate Points
pts = self.x_eval[pts_idx,:]
if debug: print("Candidate Points:\n",pts)
# Calculate the Constants for the given points
# $$ a = [(y_2 - y_1)(z_3 - z_1) - (z_2 - z_1)(y_3 - y_2)] $$
a = (pts[1,1]-pts[0,1])*(pts[2,2]-pts[0,2]) - (pts[1,2]-pts[0,2])*(pts[2,1]-pts[1,1])
# $$ b = [(z_2 - z_1)(x_3 - x_1) - (x_2 - x_1)(z_3 - z_2)] $$
b = (pts[1,2]-pts[0,2])*(pts[2,0]-pts[0,0]) - (pts[1,0]-pts[0,0])*(pts[2,2]-pts[1,2])
# $$ c = [(x_2 - x_1)(y_3 - y_1) - (y_2 - y_1)(x_3 - x_2)] $$
c = (pts[1,0]-pts[0,0])*(pts[2,1]-pts[0,1]) - (pts[1,1]-pts[0,1])*(pts[2,0]-pts[1,0])
# $$ d = -(a * x_n + b * y_n + c * z_n) $$
d = -(a*pts[0,0] + b*pts[0,1] + c*pts[0,2])
# $$ psq = sqrt{(a^2) + (b^2 + (c^2)} $$
psq = max(0.1,np.sqrt(a*a + b*b + c*c))
if debug: print(a,b,c,d)
# Evaluate the Performance of the Fit
pts_inliers = None
for j in range(self.x_eval.shape[0]):
if debug: print(j,self.x_eval.shape[0])
# We do not want to compare with points used to calc a,b,c,d
# if j in pts_idx: continue
# Calc distance between point and plane
if debug: print(j)
dist = math.fabs(a*self.x_eval[j,0] + b*self.x_eval[j,1] + c*self.x_eval[j,2] + d)/psq
# Check whether or no the point is a good fit
if (dist <= self.dist_thresh):
# Add to the list if inlier points
if pts_inliers is None:
pts_inliers = self.x_eval[j,:]
else:
pts_inliers = np.vstack((pts_inliers,self.x_eval[j,:]))
# Check if the current inliers is better than the best so far
if len(pts) > len(pts_best):
pts_best = pts
constants_best = np.array([a,b,c,d])
return pts_best, constants_best
def gpu(self,x_eval=None,y_eval=None,do_time_gpu=True,debug=True):
"""
This function implements the GPU version of the RANSAC plane fitting
algorithm. This routine calls two separate CUDA kernels. The first
kernel generates a matrix of constants corresponding to the plane. The
second kernel then determine which set of points generates the optimal
fit for the point cloud.
"""
print("Running GPU Version")
# Assign the Local Data (If applicable)
if x_eval is not None: self.x_eval = x_eval
if y_eval is | |
if(verbose) :
print('image too small for cropping (validation) : ', data[1] + '/' + data[0])
else :
break
if crop:
# crop image
if rand_crop:
crop_width = random.randint(0, image.size[0]-self.size-1)
crop_height = random.randint(0, image.size[1]-self.size-1)
else:
crop_width = 0
crop_height = 0
box = (crop_width, crop_height, crop_width+self.size, crop_height+self.size)
image = image.crop(box)
# image transform
#image.save(self.dir+'/'+str(self.train_iterator -1)+'.jpg')
orientation = [ Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270]
flip = [Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM]
if (random_flip_flop == True) :
if (random.choice([True, False])) :
image = image.transpose(random.choice(flip))
if (random_rotate == True) :
if (random.choice([True, False])) :
image = image.transpose(random.choice(orientation))
# convert the image into a array
image = np.asarray(image)
# extract green
if( self.nb_channels == 1 and len(image.shape) > 2) :
image = self.extract_channel(image,1)
else:
image = np.asarray(image)
# extract green
if( self.nb_channels == 1 and len(image.shape) > 2) :
image = self.extract_channel(image,1)
# convert to float image
image = image.astype(np.float32) / 255.
#image = image.reshape(1, self.size, self.size, 3)
if self.size == None:
image = image.reshape(image.shape[0], image.shape[1], self.nb_channels)
else:
image = image.reshape(self.size, self.size, self.nb_channels)
# buils class label
label = np.zeros(len(self.image_class))
pos = self.image_class.index(data[1])
label[pos] = 1.0
# return image and label
return (image, label)
def get_batch_validation(self, batch_size = 50, crop = True, random_flip_flop = False, random_rotate = False) :
batch_image = np.empty([batch_size, self.size, self.size, self.nb_channels])
batch_label = np.empty([batch_size, self.nb_class])
for i in range(0,batch_size) :
data = self.get_next_validation(crop, self.rand_crop, random_flip_flop,random_rotate, verbose=False)
batch_image[i] = data[0]
batch_label[i] = data[1]
return (batch_image.astype(np.float32),batch_label)
def export_splicing(self, export_path, nb_images, radius = 300):
batch_size = 10
if not os.path.exists(export_path):
os.mkdir(export_path)
i = 0
while(i < nb_images):
batch = []
batch.append([])
batch.append([])
k1 = 0
k2 = 0
while k1 < batch_size or k2 < batch_size:
data = self.file_test[self.test_iterator]
self.test_iterator += 1
if self.test_iterator >= len(self.file_test) :
self.test_iterator = 0
file_name = self.dir + '/test/' + data[1] + '/' + data[0]
image = np.array(Image.open(file_name))
# print(data[1])
if data[1] == 'Real' and k1 < batch_size:
batch[0].append(image)
k1 += 1
if data[1] == 'CGG' and k2 < batch_size:
batch[1].append(image)
k2 += 1
for j in range(batch_size):
image_real = batch[0][j]
image_cgg = batch[1][j]
shape_cgg = image_cgg.shape
shape_real = image_real.shape
adding = np.zeros(shape_real)
r = radius
a_cgg, b_cgg = random.randint(radius, shape_cgg[0] - radius), random.randint(radius, shape_cgg[1] - radius)
y,x = np.ogrid[-a_cgg:shape_cgg[0]-a_cgg, -b_cgg:shape_cgg[1]-b_cgg]
mask_cgg = x*x + y*y <= r*r
a_real, b_real = random.randint(radius, shape_real[0] - radius), random.randint(radius, shape_real[1] - radius)
y,x = np.ogrid[-a_real:shape_real[0]-a_real, -b_real:shape_real[1]-b_real]
mask_real = x*x + y*y <= r*r
image_real[mask_real] = 0
adding[mask_real] = image_cgg[mask_cgg]
result = image_real + adding
print(result.shape)
exp = Image.fromarray(result.astype(np.uint8)).convert('RGB')
exp.save(export_path + str(i) + '.jpg')
i+=1
print(str(i) + " images exported")
def export_database(self, export_path, nb_train, nb_test, nb_validation, proportion = 0.5):
train_dir = export_path + 'train/'
test_dir = export_path + 'test/'
validation_dir = export_path + 'validation/'
if not os.path.exists(export_path):
os.mkdir(export_path)
os.mkdir(train_dir)
os.mkdir(train_dir + 'CGG/')
os.mkdir(train_dir + 'Real/')
os.mkdir(test_dir)
os.mkdir(test_dir + 'CGG/')
os.mkdir(test_dir + 'Real/')
os.mkdir(validation_dir)
os.mkdir(validation_dir + 'CGG/')
os.mkdir(validation_dir + 'Real/')
print("Exporting training set : " + str(nb_train) + " images to process...")
batch_size = 100
i = 0
n_class0 = 0
n_class1 = 0
while(i < nb_train):
batch = self.get_next_train_batch(batch_size)
for j in range(batch_size):
save = True
if batch[1][j][0] == 0.:
name_class = self.image_class[1]
n_class0 += 1
if(n_class0 > int(nb_train/2)):
save = False
else:
name_class = self.image_class[0]
n_class1 += 1
if(n_class1 > int(nb_train/2)):
save = False
if save :
if self.nb_channels == 1:
exp = Image.fromarray((batch[0][j]*255).astype(np.uint8).reshape(self.size, self.size))
else:
exp = Image.fromarray((batch[0][j]*255).astype(np.uint8).reshape(self.size, self.size, self.nb_channels))
exp.save(export_path + '/train/' + name_class + '/' + 'train' + str(i) + '.jpg')
i+=1
print(str(i) + " images exported")
print("Exporting testing set : " + str(nb_test) + " images to process...")
batch_size = 100
i = 0
n_class0 = 0
n_class1 = 0
while(i < nb_test):
batch = self.get_batch_test(batch_size)
for j in range(batch_size):
save = True
if batch[1][j][0] == 0.:
name_class = self.image_class[1]
n_class0 += 1
if(n_class0 > int(nb_test/2)):
save = False
else:
name_class = self.image_class[0]
n_class1 += 1
if(n_class1 > int(nb_test/2)):
save = False
if save:
if self.nb_channels == 1:
exp = Image.fromarray((batch[0][j]*255).astype(np.uint8).reshape(self.size, self.size))
else:
exp = Image.fromarray((batch[0][j]*255).astype(np.uint8).reshape(self.size, self.size, self.nb_channels))
exp.save(export_path + '/test/' + name_class + '/' + 'test' + str(i) + '.jpg')
i+=1
print(str(i) + " images exported")
print("Exporting validation set : " + str(nb_validation) + " images to process...")
batch_size = 100
i = 0
n_class0 = 0
n_class1 = 0
while(i < nb_validation):
batch = self.get_batch_validation(batch_size)
for j in range(batch_size):
save = True
if batch[1][j][0] == 0.:
name_class = self.image_class[1]
n_class0 += 1
if(n_class0 > int(nb_validation/2)):
save = False
else:
name_class = self.image_class[0]
n_class1 += 1
if(n_class1 > int(nb_validation/2)):
save = False
if save:
if self.nb_channels == 1:
exp = Image.fromarray((batch[0][j]*255).astype(np.uint8).reshape(self.size, self.size))
else:
exp = Image.fromarray((batch[0][j]*255).astype(np.uint8).reshape(self.size, self.size, self.nb_channels))
exp.save(export_path + '/validation/' + name_class + '/' + 'validation' + str(i) + '.jpg')
i+=1
print(str(i) + " images exported")
class Test_loader:
def __init__(self, directory, subimage_size, only_green = True):
self.dir = directory # directory with the train / test / validation sudirectories
self.subimage_size = subimage_size # size of the sub image that should be croped
self.nb_channels = 3 # return only the green channel of the images
if(only_green == True) :
self.nb_channels = 1
self.iterator = 0 # iterator over the test images
self.validation_iterator = 0 # iterator over the validation images
self.seed = 42
self.image_class = ['Real', 'CGG']
self.nb_class = len(self.image_class)
print(' number of classes :', self.nb_class, ' ', self.image_class)
self.file_test = self.load_images_in_dir(self.dir,self.image_class)
def get_immediate_subdirectories(self,a_dir) :
# return the list of sub directories of a directory
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def load_images_in_dir(self, dir_name, image_class) :
valid_image_extension = [".jpg",".gif",".png",".tga",".tif", ".JPG", ".jpeg"]
file_list = []
for c in image_class :
nb_image_per_class = 0
file_list_by_class = []
for filename in os.listdir(dir_name+'/'+c):
# check if the file is an image
extension = os.path.splitext(filename)[1]
if extension.lower() in valid_image_extension:
file_list_by_class.append(filename)
for i in range(int(len(file_list_by_class))):
file_list.append((file_list_by_class[i],c))
nb_image_per_class += 1
print(' ',c,nb_image_per_class,'images loaded')
random.seed(self.seed)
random.shuffle(file_list)
return file_list
def extract_subimages(self, image_file, subimage_size):
image = Image.open(self.dir + image_file)
subimages = []
width = image.size[0]
height = image.size[1]
current_height = 0
while current_height + subimage_size <= height:
current_width = 0
while current_width + subimage_size <= width:
box = (current_width, current_height,
current_width + subimage_size,
current_height + subimage_size)
sub = np.asarray(image.crop(box))
if len(sub.shape) > 2:
if self.nb_channels == 1:
subimages.append(sub[:,:,1].astype(np.float32)/255)
else:
subimages.append(sub.astype(np.float32)/255)
else:
subimages.append(sub.astype(np.float32)/255)
current_width += subimage_size
current_height += subimage_size
nb_subimages = len(subimages)
print('Image of size ' + str(width) + 'x' + str(height) +
' cropped at ' + str(subimage_size) + 'x' + str (subimage_size) +
' : ' + str(nb_subimages) + ' outputed subimages.')
return((np.reshape(np.array(subimages), (nb_subimages, subimage_size, subimage_size, self.nb_channels)), width, height))
def get_next_image(self):
if self.iterator >= len(self.file_test):
self.iterator = 0
labeled_image = self.file_test[self.iterator]
image_file = labeled_image[1] + '/' + labeled_image[0]
self.iterator += 1
subimages, width, height = self.extract_subimages(image_file, self.subimage_size)
original = Image.open(self.dir + image_file)
return((subimages, labeled_image[1], width, height, original, image_file))
def get_image_filename_from_dir(directory_path) :
# file extension accepted as image data
valid_image_extension = [".jpg",".gif",".png",".tga",".tif", ".JPG"]
# random_prefix = ''.join(random.choice('0123456789ABCDEF') for i in range(7))
image_list = []
# load the images
print("loading images in: ", directory_path)
for filename in os.listdir(directory_path):
# check if the file is an image
extension = os.path.splitext(filename)[1]
if extension.lower() not in valid_image_extension:
continue
# open the image
image = Image.open(os.path.join(directory_path,filename))
print( '\n')
print( ' filename :', filename)
print( ' width :', image.size[0] )
print( ' height :', image.size[1] )
print( ' mode :', image.mode )
print( ' format :', image.format )
image_list.append(image)
return image_list
def compute_useless_images(directory_path, image_size, nb_images = 100, treshold = 0.3):
data = Database_loader(directory_path, image_size, only_green=True)
i = 0
batch_size = 50
max_height = np.zeros((nb_images,))
while(i < nb_images):
batch = data.get_next_train_batch(batch_size, False)
ind_batch = 0
for image in batch[0]:
image = np.reshape(image, [-1])
hist = np.histogram(image, 256, [0.,1.])[0]
# print(hist)
# print(image)
# print(batch[1][ind_batch])
# plt.imshow(np.reshape(image, [image_size, image_size]))
# plt.show()
max_height[i] = max(hist)/(image_size**2)
i+=1
ind_batch+=1
nb_useless = | |
<filename>src/baskerville/models/base_spark.py
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import datetime
import itertools
import os
from baskerville.models.base import PipelineBase
from baskerville.models.feature_manager import FeatureManager
from baskerville.spark.helpers import save_df_to_table, reset_spark_storage, \
set_unknown_prediction
from baskerville.spark.schemas import rs_cache_schema
from baskerville.util.helpers import TimeBucket, FOLDER_CACHE, \
instantiate_from_str, load_model_from_path
from pyspark.sql import types as T, DataFrame
from baskerville.spark import get_or_create_spark_session
from dateutil.tz import tzutc
from collections import OrderedDict
from baskerville.util.baskerville_tools import BaskervilleDBTools
from baskerville.util.enums import Step
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
from baskerville.db.models import RequestSet
from baskerville.models.request_set_cache import RequestSetSparkCache
class SparkPipelineBase(PipelineBase):
"""
The base class for all pipelines that use spark. It initializes spark
session and provides basic implementation for some of the main methods
"""
def __init__(self,
db_conf,
engine_conf,
spark_conf,
clean_up=True,
group_by_cols=('client_request_host', 'client_ip'),
*args,
**kwargs
):
super().__init__(db_conf, engine_conf, clean_up)
self.start_time = datetime.datetime.utcnow()
self.request_set_cache = None
self.spark = None
self.tools = None
self.spark_conf = spark_conf
self.data_parser = self.engine_conf.data_config.parser
self.group_by_cols = list(set(group_by_cols))
self.group_by_aggs = None
self.post_group_by_aggs = None
self.columns_to_filter_by = None
self._can_predict = False
self._is_initialized = False
self.drop_if_missing_filter = None
self.cols_to_drop = None
self.cache_columns = [
'target',
'ip',
'first_ever_request',
'old_subset_count',
'old_features',
'old_num_requests',
]
self.cache_config = {
'db_url': self.db_url,
'db_driver': self.spark_conf.db_driver,
'user': self.db_conf.user,
'password': <PASSWORD>
}
self.step_to_action = OrderedDict(
zip([
Step.preprocessing,
Step.group_by,
Step.feature_calculation,
Step.label_or_predict,
Step.trigger_challenge,
Step.save
], [
self.preprocessing,
self.group_by,
self.feature_calculation,
self.label_or_predict,
self.trigger_challenge,
self.save
]))
self.remaining_steps = list(self.step_to_action.keys())
self.time_bucket = TimeBucket(self.engine_conf.time_bucket)
self.feature_manager = FeatureManager(self.engine_conf)
self.model_index = None
self.model = None
def load_test(self):
"""
If the user has set the load_test configuration, then multiply the
traffic by `self.engine_conf.load_test` times to do load testing.
:return:
"""
if self.engine_conf.load_test:
df = self.logs_df.persist(self.spark_conf.storage_level)
for i in range(self.engine_conf.load_test - 1):
df = df.withColumn(
'client_ip', F.round(F.rand(42)).cast('string')
)
self.logs_df = self.logs_df.union(df).persist(
self.spark_conf.storage_level
)
df.unpersist()
del df
self.logger.info(
f'---- Count after multiplication: {self.logs_df.count()}'
)
def reset(self):
"""
Unpersist rdds and dataframes and call GC - see broadcast memory
release issue
:return:
"""
import gc
reset_spark_storage()
gc.collect()
def initialize(self):
"""
Set the basics:
- Connect to the database
- Initialize spark session
- Get active model and scaler and set them to broadcast variables
- Get active features with their active columns, update columns etc and
set the relevant broadcast variables
- Set the _can_predict flag
- Instantiate the accumulators (for metrics)
- Instantiate request set cache.
:return:
"""
# initialize db access tools
self.tools = BaskervilleDBTools(self.db_conf)
self.tools.connect_to_db()
# initialize spark session
self.spark = self.instantiate_spark_session()
self.feature_manager.initialize()
self.drop_if_missing_filter = self.data_parser.drop_if_missing_filter()
# set up cache
self.request_set_cache = self.set_up_request_set_cache()
# gather calculations
self.group_by_aggs = self.get_group_by_aggs()
self.columns_to_filter_by = self.get_columns_to_filter_by()
self.cols_to_drop = set(
self.feature_manager.active_feature_names +
self.feature_manager.active_columns +
list(self.group_by_aggs.keys()) +
self.feature_manager.update_feature_cols
).difference(RequestSet.columns)
if self.engine_conf.model_id:
self.model_index = self.tools.get_ml_model_from_db(
self.engine_conf.model_id)
self.model = instantiate_from_str(self.model_index.algorithm)
self.model.load(bytes.decode(
self.model_index.classifier, 'utf8'), self.spark)
self.model.set_logger(self.logger)
elif self.engine_conf.model_path:
self.model = load_model_from_path(
self.engine_conf.model_path, self.spark
)
self.model.set_logger(self.logger)
else:
self.model = None
self._is_initialized = True
def get_columns_to_filter_by(self):
"""
Gathers all the columns that need to be present in the dataframe
for the processing to complete.
group_by_cols: the columns to group data on
active_columns: the columns that the active features have declared as
necessary
timestamp_column: the time column - all logs need to have a time column
:return: a set of the column names that need to be present in the
dataframe
:rtype: set[str]
"""
cols = self.group_by_cols + self.feature_manager.active_columns
cols.append(self.engine_conf.data_config.timestamp_column)
return set(cols)
def get_group_by_aggs(self):
"""
Gathers all the group by arguments:
basic_aggs:
- first_request
- last_request
- num_requests
column_aggs: the columns the features need for computation are gathered
as lists
feature_aggs: the columns the features need for computation
Priority: basic_aggs > feature_aggs > column_aggs
The basic aggs have a priority over the feature and column aggs.
The feature aggs have a priority over the column aggs (if a feature
has explicitly asked for a computation for a specific column it relies
upon, then the computation will be stored instead of the column
aggregation as list)
:return: a dictionary with the name of the group by aggregation columns
as keys and the respective Column aggregation as values
:rtype: dict[string, pyspark.Column]
"""
basic_aggs = {
'first_request': F.min(F.col('@timestamp')).alias('first_request'),
'last_request': F.max(F.col('@timestamp')).alias('last_request'),
'num_requests': F.count(F.col('@timestamp')).alias('num_requests')
}
column_aggs = {
c: F.collect_list(F.col(c)).alias(c)
for c in self.feature_manager.active_columns
}
feature_aggs = self.feature_manager.get_feature_group_by_aggs()
basic_aggs.update(
{k: v for k, v in feature_aggs.items() if k not in basic_aggs}
)
basic_aggs.update(
{k: v for k, v in column_aggs.items() if k not in basic_aggs}
)
return basic_aggs
def get_post_group_by_calculations(self):
"""
Gathers the columns and computations to be performed after the grouping
of the data (logs_df)
Basic post group by columns:
- `id_runtime`
- `time_bucket`
- `start`
- `stop`
- `subset_count`
if there is an ML Model defined:
- `model_version`
- `classifier`
- `scaler`
- `model_features`
Each feature can also define post group by calculations using the
post_group_by_calcs dict.
:return: A dictionary with the name of the result columns as keys and
their respective computations as values
:rtype: dict[string, pyspark.Column]
"""
if self.post_group_by_aggs:
return self.post_group_by_aggs
post_group_by_columns = {
'id_runtime': F.lit(self.runtime.id),
'time_bucket': F.lit(self.time_bucket.sec),
'start': F.when(
F.col('first_ever_request').isNotNull(),
F.col('first_ever_request')
).otherwise(F.col('first_request')),
'stop': F.col('last_request'),
'subset_count': F.when(
F.col('old_subset_count').isNotNull(),
F.col('old_subset_count')
).otherwise(F.lit(0))
}
if self.model_index:
post_group_by_columns['model_version'] = F.lit(
self.model_index.id
)
# todo: what if a feature defines a column name that already exists?
# e.g. like `subset_count`
post_group_by_columns.update(
self.feature_manager.post_group_by_calculations
)
return post_group_by_columns
def __getitem__(self, name):
if name == 'run':
if not self._is_initialized:
raise RuntimeError(
f'__getitem__: {self.__class__.__name__} '
f'has not been initialized yet.'
)
return getattr(self, name)
def __getattribute__(self, name):
if name == 'run':
if not self._is_initialized:
raise RuntimeError(
f'__getattribute__:{self.__class__.__name__} '
f'has not been initialized yet.'
)
return super().__getattribute__(name)
def filter_cache(self):
"""
Use the current logs to find the past request sets - if any - in the
request set cache
:return:
"""
df = self.logs_df.select(
F.col('target'),
F.col('ip'),
).distinct().alias('a') # ppp.persist(self.spark_conf.storage_level)
self.request_set_cache.filter_by(df)
df.unpersist()
del df
def run(self):
"""
Runs the configured steps.
:return:
"""
self.logger.info(
f'Spark UI accessible at:{self.spark.sparkContext.uiWebUrl}'
)
self.create_runtime()
self.get_data()
self.process_data()
def process_data(self):
"""
Splits the data into time bucket length windows and executes all the
steps
:return:
"""
if self.logs_df.count() == 0:
self.logger.info('No data in to process.')
else:
for window_df in self.get_window():
self.logs_df = window_df
self.logs_df = self.logs_df.repartition(
*self.group_by_cols
).persist(self.spark_conf.storage_level)
self.remaining_steps = list(self.step_to_action.keys())
for step, action in self.step_to_action.items():
self.logger.info('Starting step {}'.format(step))
action()
self.logger.info('Completed step {}'.format(step))
self.remaining_steps.remove(step)
self.reset()
def get_window(self):
from pyspark.sql import functions as F
df = self.logs_df.withColumn('timestamp',
F.col('@timestamp').cast('timestamp'))
df = df.sort('timestamp')
current_window_start = df.agg({"timestamp": "min"}).collect()[0][0]
stop = df.agg({"timestamp": "max"}).collect()[0][0]
window_df = None
current_end = current_window_start + self.time_bucket.td
while True:
if window_df:
window_df.unpersist(blocking=True)
del window_df
filter_ = (
(F.col('timestamp') >= current_window_start) &
(F.col('timestamp') < current_end)
)
window_df = df.where(filter_)
# ppp.persist(
# self.spark_conf.storage_level
# )
if not window_df.rdd.isEmpty():
self.logger.info(f'# Request sets = {window_df.count()}')
yield window_df
else:
self.logger.info(f'Empty window df for {str(filter_._jc)}')
current_window_start = current_window_start + self.time_bucket.td
current_end = current_window_start + self.time_bucket.td
if current_window_start >= stop:
window_df.unpersist(blocking=True)
del window_df
break
def create_runtime(self):
"""
Create a Runtime in the Baskerville database.
:return:
"""
raise NotImplementedError(
'SparkPipelineBase does not have an implementation '
'for _create_runtime.'
)
def get_data(self):
"""
Get dataframe of log data
:return:
"""
raise NotImplementedError(
'SparkPipelineBase does not have an implementation '
'for _get_data.'
)
def preprocessing(self):
"""
Fill missing values, add calculation cols, and filter.
:return:
"""
self.handle_missing_columns()
self.normalize_host_names()
self.rename_columns()
self.filter_columns()
self.handle_missing_values()
self.add_calc_columns()
def group_by(self):
"""
Group the logs df by the given group-by columns (normally IP, host).
:return: None
"""
self.logs_df = self.logs_df.withColumn('ip', F.col('client_ip'))
self.logs_df = self.logs_df.withColumn(
'target', F.col('client_request_host')
)
self.logs_df = self.logs_df.groupBy(
'ip', 'target'
).agg(
*self.group_by_aggs.values()
)
def feature_calculation(self):
"""
Add calculation cols, extract features, and update.
:return:
"""
self.add_post_groupby_columns()
self.feature_extraction()
self.feature_update()
def label_or_predict(self):
"""
Apply label from MISP or predict label.
#todo: use separate steps for this
:return:
"""
from pyspark.sql import functions as F
from pyspark.sql.types import IntegerType
if self.engine_conf.cross_reference:
self.cross_reference()
else:
self.logs_df = self.logs_df.withColumn(
'label', F.lit(None).cast(IntegerType()))
self.predict()
def get_challenges(self, df, challenge_threshold):
def challenge_decision(num_normals, num_anomalies, threshold):
if num_anomalies >= threshold * (num_anomalies + num_normals):
return 1
return 0
challenge_decision_udf = udf(challenge_decision, IntegerType())
df = df.select(['target', 'prediction'])\
.groupBy(['target', 'prediction'])\
.count()\
.groupBy('target')\
.pivot('prediction').agg(F.first('count'))\
.withColumn(
'challenge',
challenge_decision_udf(
F.col('0'), F.col('1'), F.lit(challenge_threshold)
)
).select(['target', 'challenge'])
return | |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
from tqdm import tqdm
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torchvision.datasets import CIFAR10
import torchvision.models as models
import torch.nn.functional as F
import numpy as np
import moco.loader as loader
import moco.builder as builder
import datetime
import json
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[120, 160], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default=None, type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=1, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--save-dir', default='', type=str, metavar='PATH',
help='path to save checkpoint (default: none)')
# moco specific configs:
parser.add_argument('--bimoco', action='store_true',
help='use two branches MoCo')
parser.add_argument('--bimoco-gamma', default=0.5, type=float,
help='fraction of MoCo v2 loss')
parser.add_argument('--moco-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-k', default=65536, type=int,
help='queue size; number of negative keys (default: 65536)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--moco-t', default=0.07, type=float,
help='softmax temperature (default: 0.07)')
# options for moco v2
parser.add_argument('--mlp', action='store_true',
help='use mlp head')
parser.add_argument('--mixup', action='store_true',
help='use mixup data augmentation')
parser.add_argument('--aug-color-only', action='store_true',
help='use only color data augmentation')
parser.add_argument('--aug-geo', action='store_true',
help='use only geometric data augmentation')
parser.add_argument('--geo-plus', action='store_true',
help='use only geometric data augmentation')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
# MixUp augmentation configs:
parser.add_argument('--mixup-p', default=None, type=float,
help='the prob to apply a mixup aug in a certain iteration')
parser.add_argument('--replace', action='store_true',
help='whether replace the original loss with mixup loss or not')
parser.add_argument('--rui', action='store_true',
help='use Rui method')
# options for mix precision training
parser.add_argument('--amp-opt-level', type=str, default='O0', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
# knn monitor
parser.add_argument('--knn-k', default=200, type=int, help='k in kNN monitor')
parser.add_argument('--knn-t', default=0.1, type=float, help='softmax temperature in kNN monitor; could be different with moco-t')
def main():
global args
args = parser.parse_args()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
path = os.path.join(args.save_dir, "config.json")
with open(path, 'w') as f:
json.dump(vars(args), f, indent=2)
print("Full config saved to {}".format(path))
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
print('Using distributed training')
ngpus_per_node = torch.cuda.device_count()
print('there is/are {} GPUs per nodes'.format(ngpus_per_node))
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
print("=> creating model '{}'".format(args.arch))
if args.bimoco:
model = builder.BiMoCo(models.__dict__[args.arch],
args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp)
else:
model = builder.MoCo(models.__dict__[args.arch],
args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp)
print(model)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.amp_opt_level != "O0":
if amp is None:
print("apex is not installed but amp_opt_level is set to {args.amp_opt_level}, ignoring.\n"
"you should install apex from https://github.com/NVIDIA/apex#quick-start first")
args.amp_opt_level = "O0"
else:
model, optimizer = amp.initialize(model.cuda(), optimizer, opt_level=args.amp_opt_level)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
# comment out the following line for debugging
raise NotImplementedError("Only DistributedDataParallel is supported.")
else:
# AllGather implementation (batch shuffle, queue update, etc.) in
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'].state_dict())
# optimizer = checkpoint['optimizer']
if args.amp_opt_level != "O0" and checkpoint['args'].amp_opt_level != "O0":
amp.load_state_dict(checkpoint['amp'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
normalize = transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
# MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709
augmentation = [
transforms.RandomResizedCrop(32),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
base_augmentation = [
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([
transforms.RandomRotation([90, 90]),
], p=0.5),
transforms.RandomApply([
transforms.RandomRotation([180, 180]),
], p=0.5)
]
key_augmentation = [
transforms.RandomResizedCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
geo_augmentation = [
transforms.RandomResizedCrop(32),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.RandomRotation([90, 90]),
], p=0.5),
transforms.RandomApply([
transforms.RandomRotation([180, 180]),
], p=0.5),
transforms.ToTensor(),
normalize
]
test_aug = [
transforms.RandomResizedCrop(32),
transforms.ToTensor(),
normalize
]
if args.aug_color_only:
if args.bimoco:
train_dataset = CIFAR10(root='data', train=True,
transform=loader.ThreeCropsTransform(transforms.Compose(base_augmentation),
transforms.Compose(augmentation),
transforms.Compose(key_augmentation),
transforms.Compose(augmentation)),
download=True)
print('Using BiMoCo with only MoCo v2 augmentation')
else:
train_dataset = CIFAR10(root='data', train=True,
transform=loader.TwoCropsTransform(transforms.Compose(augmentation)),
download=True)
print('Using MoCo v2')
elif args.aug_geo:
train_dataset = CIFAR10(root='data', train=True,
transform=loader.TwoCropsTransform(transforms.Compose(geo_augmentation)),
download=True)
print('Using MoCo v2 with only geometric augmentation')
else:
if args.bimoco or args.mixup:
train_dataset = CIFAR10(root='data', train=True,
transform=loader.ThreeCropsTransform(transforms.Compose(base_augmentation),
transforms.Compose(geo_augmentation),
transforms.Compose(key_augmentation),
transforms.Compose(augmentation)),
download=True)
print('Using BiMoCo/MoCo-mixup with Geo and Color transformation')
else:
if args.geo_plus:
geo_plus_augmentation = [
transforms.RandomResizedCrop(32),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.RandomRotation([90, 90]),
], p=0.5),
transforms.RandomApply([
transforms.RandomRotation([180, 180]),
], p=0.5),
transforms.ToTensor(),
normalize
]
train_dataset = CIFAR10(root='data', train=True,
transform=loader.TwoCropsTransform(transforms.Compose(geo_plus_augmentation)),
download=True)
print('Using MoCo v2 with a geo-plus augmentation')
else:
train_dataset = CIFAR10(root='data', train=True,
transform=loader.TwoCropsTransform(transforms.Compose(augmentation)),
download=True)
print('Using MoCo v2')
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
if train_sampler is None:
print('loader using shuffle')
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
memory_dataset = CIFAR10(root='data', train=True, transform=transforms.Compose(test_aug), download=True)
memory_loader = torch.utils.data.DataLoader(memory_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=2, pin_memory=True)
test_dataset = CIFAR10(root='data', train=False, transform=transforms.Compose(test_aug), | |
self._expand(
active_ctx, active_property, e, options,
inside_list=inside_list,
inside_index=inside_index,
type_scoped_ctx=type_scoped_ctx)
if inside_list and _is_array(e):
e = {'@list': e}
# drop None values
if e is not None:
if _is_array(e):
rval.extend(e)
else:
rval.append(e)
return rval
# handle scalars
if not _is_object(element):
# drop free-floating scalars that are not in lists
if (not inside_list and (active_property is None or
self._expand_iri(
active_ctx, active_property, vocab=True) == '@graph')):
return None
# expand element according to value expansion rules
return self._expand_value(active_ctx, active_property, element, options)
# expand the active property
expanded_active_property = self._expand_iri(
active_ctx, active_property, vocab=True)
# get any property-scoped context for activeProperty
property_scoped_ctx = JsonLdProcessor.get_context_value(
active_ctx, active_property, '@context')
# second, determine if any type-scoped context should be reverted; it
# should only be reverted when the following are all true:
# 1. `element` is not a value or subject reference
# 2. `inside_index` is false
if not type_scoped_ctx and active_ctx.get('previousContext'):
type_scoped_ctx = active_ctx
must_revert = not inside_index
if (must_revert and type_scoped_ctx and
len(element) <= 2 and '@context' not in element):
for key, value in sorted(element.items()):
expanded_property = self._expand_iri(
type_scoped_ctx, key, vocab=True)
if expanded_property == '@value':
# value found, ensure type-scoped context is used to expand it
must_revert = False
active_ctx = type_scoped_ctx
break
if expanded_property == '@id' and len(element) == 1:
# subject reference found, do not revert
must_revert = False
break
if must_revert:
active_ctx = self._revert_to_previous_context(active_ctx)
# apply property-scoped context after reverting term-scoped context
if property_scoped_ctx is not None:
active_ctx = self._process_context(
active_ctx, property_scoped_ctx, options,
override_protected=True)
# recursively expand object
# if element has a context, process it
if '@context' in element:
active_ctx = self._process_context(
active_ctx, element['@context'], options)
# set the type-scoped context to the context on input, for use later
type_scoped_ctx = active_ctx
# Remember the first key found expanding to @type
type_key = None
# look for scoped context on @type
for key, value in sorted(element.items()):
expanded_property = self._expand_iri(
active_ctx, key, vocab=True)
if expanded_property == '@type':
if not type_key:
type_key = key
# set scoped contexts from @type
types = [t for t in JsonLdProcessor.arrayify(element[key]) if _is_string(t)]
for type_ in sorted(types):
ctx = JsonLdProcessor.get_context_value(
type_scoped_ctx, type_, '@context')
if ctx is not None:
active_ctx = self._process_context(
active_ctx, ctx, options, propagate=False)
# process each key and value in element, ignoring @nest content
rval = {}
self._expand_object(
active_ctx, active_property, expanded_active_property,
element, rval, options,
inside_list,
type_key,
type_scoped_ctx)
# get property count on expanded output
count = len(rval)
if '@value' in rval:
# @value must only have @language or @type
if '@type' in rval and ('@language' in rval or '@direction' in rval):
raise JsonLdError(
'Invalid JSON-LD syntax; an element containing '
'"@value" may not contain both "@type" and either "@language" or "@direction".',
'jsonld.SyntaxError', {'element': rval},
code='invalid value object')
valid_count = count - 1
if '@type' in rval:
valid_count -= 1
if '@index' in rval:
valid_count -= 1
if '@language' in rval:
valid_count -= 1
if '@direction' in rval:
valid_count -= 1
if valid_count != 0:
raise JsonLdError(
'Invalid JSON-LD syntax; an element containing "@value" '
'may only have an "@index" property and at most one other '
'property which can be "@type" or "@language".',
'jsonld.SyntaxError', {'element': rval},
code='invalid value object')
values = JsonLdProcessor.get_values(rval, '@value')
types = JsonLdProcessor.get_values(rval, '@type')
# drop None @values
if self._processing_mode(active_ctx, 1.1) and '@json' in types and len(types) == 1:
# any @value is okay if @type: @json
rval = rval
elif rval['@value'] is None:
rval = None
# if @language is present, @value must be a string
elif '@language' in rval and not all(_is_string(val) or _is_empty_object(val) for val in values):
raise JsonLdError(
'Invalid JSON-LD syntax; only strings may be '
'language-tagged.', 'jsonld.SyntaxError',
{'element': rval}, code='invalid language-tagged value')
elif not all(
_is_empty_object(type) or
_is_string(type) and
_is_absolute_iri(type) and
not type.startswith('_:') for type in types):
raise JsonLdError(
'Invalid JSON-LD syntax; an element containing "@value" '
'and "@type" must have an absolute IRI for the value '
'of "@type".', 'jsonld.SyntaxError', {'element': rval},
code='invalid typed value')
# convert @type to an array
elif '@type' in rval and not _is_array(rval['@type']):
rval['@type'] = [rval['@type']]
# handle @set and @list
elif '@set' in rval or '@list' in rval:
if count > 1 and not (count == 2 and '@index' in rval):
raise JsonLdError(
'Invalid JSON-LD syntax; if an element has the '
'property "@set" or "@list", then it can have at most '
'one other property, which is "@index".',
'jsonld.SyntaxError', {'element': rval},
code='invalid set or list object')
# optimize away @set
if '@set' in rval:
rval = rval['@set']
count = len(rval)
# drop objects with only @language
elif count == 1 and '@language' in rval:
rval = None
# drop certain top-level objects that do not occur in lists
if (_is_object(rval) and not options.get('keepFreeFloatingNodes') and
not inside_list and
(active_property is None or
expanded_active_property == '@graph')):
# drop empty object or top-level @value/@list,
# or object with only @id
if (count == 0 or '@value' in rval or '@list' in rval or
(count == 1 and '@id' in rval)):
rval = None
return rval
def _expand_object(
self, active_ctx, active_property, expanded_active_property,
element, expanded_parent, options,
inside_list=False,
type_key=None,
type_scoped_ctx=None):
"""
Expand each key and value of element adding to result.
:param active_ctx: the context to use.
:param active_property: the property for the element, None for none.
:param expanded_active_property: the expansion of active_property
:param element: the element to expand.
:param expanded_parent: the expanded result into which to add values.
:param options: the expansion options.
:param inside_list: True if the property is a list, False if not.
:return: the expanded value.
"""
nests = []
unexpanded_value = None
is_json_type = False
if type_key in element:
if element[type_key] and _is_array(element[type_key]):
t = element[type_key][0]
else:
t = element[type_key]
is_json_type = self._expand_iri(
active_ctx, t, vocab=True) == '@json'
for key, value in sorted(element.items()):
if key == '@context':
continue
# expand key to IRI
expanded_property = self._expand_iri(
active_ctx, key, vocab=True)
# drop non-absolute IRI keys that aren't keywords
if (expanded_property is None or
not (
_is_absolute_iri(expanded_property) or
_is_keyword(expanded_property))):
continue
if _is_keyword(expanded_property):
if expanded_active_property == '@reverse':
raise JsonLdError(
'Invalid JSON-LD syntax; a keyword cannot be used as '
'a @reverse property.',
'jsonld.SyntaxError', {'value': value},
code='invalid reverse property map')
if (expanded_property in expanded_parent and
expanded_property != '@included' and
expanded_property != '@type'):
raise JsonLdError(
'Invalid JSON-LD syntax; colliding keywords detected.',
'jsonld.SyntaxError', {'keyword': expanded_property},
code='colliding keywords')
# syntax error if @id is not a string
if expanded_property == '@id':
if not _is_string(value):
if not options.get('isFrame'):
raise JsonLdError(
'Invalid JSON-LD syntax; "@id" value must be a '
'string.', 'jsonld.SyntaxError',
{'value': value}, code='invalid @id value')
if _is_object(value):
if not _is_empty_object(value):
raise JsonLdError(
'Invalid JSON-LD syntax; "@id" value must be a '
'string or an empty object or array of strings.',
'jsonld.SyntaxError',
{'value': value}, code='invalid @id value')
elif _is_array(value):
if not all(_is_string(v) for v in value):
raise JsonLdError(
'Invalid JSON-LD syntax; "@id" value an empty object or array of strings, if framing',
'jsonld.SyntaxError',
{'value': value}, code='invalid @id value')
else:
raise JsonLdError(
'Invalid JSON-LD syntax; "@id" value an empty object or array of strings, if framing',
'jsonld.SyntaxError',
{'value': value}, code='invalid @id value')
expanded_values = []
for v in JsonLdProcessor.arrayify(value):
expanded_values.append(v if \
_is_object(v) else \
self._expand_iri(active_ctx, v, base=options.get('base', '')))
JsonLdProcessor.add_value(
expanded_parent, '@id', expanded_values,
{'propertyIsArray': options['isFrame']})
continue
if expanded_property == '@type':
if _is_object(value):
# if framing, can be a default object, but need to expand
# key to determine that
new_value = {}
for k, v in value.items():
ek = self._expand_iri(type_scoped_ctx, k, vocab=True)
ev = [self._expand_iri(type_scoped_ctx, vv, vocab=True, base=options.get('base', ''))
for vv in JsonLdProcessor.arrayify(v)]
new_value[ek] = ev
value = new_value
else:
value = JsonLdProcessor.arrayify(value)
_validate_type_value(value, options.get('isFrame'))
expanded_values = []
for v in JsonLdProcessor.arrayify(value):
expanded_values.append(self._expand_iri(type_scoped_ctx, v, vocab=True, base=options.get('base', '')) if _is_string(v) else v)
JsonLdProcessor.add_value(
expanded_parent, '@type', expanded_values,
{'propertyIsArray': options['isFrame']})
continue
# Included blocks are treated as an array of separate object nodes sharing
# the same referencing active_property.
# For 1.0, it is skipped as are other unknown keywords
if (expanded_property == '@included' and
self._processing_mode(active_ctx, | |
#!/usr/bin/python
##
# @file
# This file is part of SeisSol.
#
# @author <NAME> (rettenbs AT in.tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger,_M.Sc.)
#
# @section LICENSE
# Copyright (c) 2013, SeisSol Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import collections
import datetime
import numpy
class Coords(collections.Sequence):
def __init__(self, mesh):
self.__mesh = mesh
self.__len1 = [2*i + 1 for i in self.__mesh.size()] # number of coords in each dimension
self.__len = reduce(lambda x, y: x*y, self.__len1)
def len1(self):
return self.__len1
def __len__(self):
return self.__len
def __getitem__(self, index):
if index >= self.__len:
raise StopIteration
def scale(x, dim):
return (x - (self.__len1[dim]-1)/2.) * 5. / self.__mesh.size()[dim]
x = scale(index % self.__len1[0], 0)
y = scale((index / self.__len1[0]) % self.__len1[1], 1)
z = scale(index / (self.__len1[0] * self.__len1[1]), 2)
return (x, y, z)
class Elements(collections.Sequence):
def __init__(self, mesh):
self.__mesh = mesh
self.__len = 40 * reduce(lambda x, y: x*y, self.__mesh.size())
self.__len1 = [2 * i for i in self.__mesh.size()] # number of cubes in one dimension
self.__checkOrientation()
def __checkOrientation(self):
"""Checks the correct orientation for the different tetrahedra"""
for i in range(10):
vertices = self[i]
coords = [None]*4
for j in range(4):
coords[j] = numpy.array(self.__mesh.coords()[vertices[j]])
n = numpy.cross(-coords[0]+coords[1], -coords[0]+coords[2])
orientation = numpy.dot(n, -coords[0]+coords[3])
if orientation <= 0:
raise Exception('Wrong orientation in element '+str(i))
def __len__(self):
return self.__len
def tetUnitCube(self, index, even):
"""Returns the coordinates of one of the 5 tetrahedron in an even or odd unit cube"""
if even:
if index == 0:
return ((0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1))
if index == 1:
return ((1, 0, 0), (0, 1, 0), (1, 1, 1), (1, 1, 0))
if index == 2:
return ((1, 0, 0), (1, 1, 1), (0, 0, 1), (1, 0, 1))
if index == 3:
return ((0, 1, 0), (0, 1, 1), (0, 0, 1), (1, 1, 1))
# index == 4
return ((1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 1))
if index == 0:
return ((0, 0, 0), (0, 1, 0), (0, 1, 1), (1, 1, 0))
if index == 1:
return ((0, 0, 0), (1, 1, 0), (1, 0, 1), (1, 0, 0))
if index == 2:
return ((0, 0, 0), (1, 0, 1), (0, 1, 1), (0, 0, 1))
if index == 3:
return ((1, 1, 0), (1, 0, 1), (1, 1, 1), (0, 1, 1))
# index == 4
return ((0, 0, 0), (1, 1, 0), (0, 1, 1), (1, 0, 1))
def __getitem__(self, index):
if index >= self.__len:
raise StopIteration
# Index of the cube we are currently working on
cIndex = index/5
cx = cIndex % self.__len1[0]
cy = (cIndex / self.__len1[0]) % self.__len1[1]
cz = cIndex / (self.__len1[0] * self.__len1[1])
# The index inside the cube
i = index % 5
# Odd cube?
odd = (cx+cy+cz) % 2
# Number of coords in one dimension
coordLength = self.__mesh.coords().len1()
def crd((x, y, z)):
return x+cx + ((y+cy) + (z+cz) * coordLength[1]) * coordLength[0]
return map(crd, self.tetUnitCube(i, not odd))
class Group:
def __init__(self, name, material, mesh):
self._name = name
self._size = len(mesh.elements())
self._material = material
self._cells = xrange(self._size)
def __getitem__(self, key):
return getattr(self, '_'+str(key))
class Boundary:
class Sides(collections.Sequence):
"""Generates the boundary condition for all face elements"""
def __init__(self, mesh):
self.__len1 = [8 * i for i in # Number of faces on one side
[mesh.size()[0]*mesh.size()[1], # Top / Bottom
mesh.size()[0]*mesh.size()[2], # Left / Right behind
mesh.size()[1]*mesh.size()[2]]] # Right / Left behind
self.__len = 2 * reduce(lambda x, y: x+y, self.__len1)
self.__mesh = mesh
def face(points):
"""Returns the number of the face or None if its not a face"""
if points == [0, 0, 0, 1]:
return 1
if points == [0, 0, 1, 0]:
return 2
if points == [0, 1, 0, 0]:
return 4
if points == [1, 0, 0, 0]:
return 3
return None
self.__top = []
self.__left = []
self.__right = []
self.__bot = []
self.__rightb = []
self.__leftb = []
for i in range(5):
element1 = mesh.elements().tetUnitCube(i, 1)
f = face(map(lambda e: e[0], element1))
if f:
self.__right.append((i, f))
f = face(map(lambda e: e[1], element1))
if f:
self.__left.append((i, f))
f = face(map(lambda e: e[2], element1))
if f:
self.__top.append((i, f))
for i in range(5):
element0 = mesh.elements().tetUnitCube(i, 0)
f = face(map(lambda e: e[0], element0))
if f:
self.__right.append((i, f))
f = face(map(lambda e: e[1], element0))
if f:
self.__left.append((i, f))
f = face(map(lambda e: e[2], element0))
if f:
self.__top.append((i, f))
f = face(map(lambda e: 1-e[0], element0))
if f:
self.__leftb.append((i, f))
f = face(map(lambda e: 1-e[1], element0))
if f:
self.__rightb.append((i, f))
f = face(map(lambda e: 1-e[2], element0))
if f:
self.__bot.append((i, f))
for i in range(5):
element1 = mesh.elements().tetUnitCube(i, 1)
f = face(map(lambda e: 1-e[0], element1))
if f:
self.__leftb.append((i, f))
f = face(map(lambda e: 1-e[1], element1))
if f:
self.__rightb.append((i, f))
f = face(map(lambda e: 1-e[2], element1))
if f:
self.__bot.append((i, f))
assert(len(self.__top) == 4)
assert(len(self.__left) == 4)
assert(len(self.__right) == 4)
assert(len(self.__bot) == 4)
assert(len(self.__rightb) == 4)
assert(len(self.__leftb) == 4)
def __len__(self):
return self.__len
def __getitem__(self, index):
if index >= self.__len:
raise StopIteration
len1All = self.__len1 + self.__len1
for side in range(6):
if index < len1All[side]:
break
index -= len1All[side]
def getFace(index, sizeX, sizeY):
x = (index / 2) % sizeX
y = (index / (2 * sizeX)) % sizeY
face = index % 4
if y%2 == 1:
# odd rows ...
face = (face+2)%4
return (x, y, face)
size = [2 * i for i in self.__mesh.size()] # number of cubes in each dimension
if side == 0: # top (x = x; y = y)
(x, y, face) = getFace(index, size[0], size[1])
offset = (x + y*size[0]) * 5
f = self.__top[face]
elif side == 1: # left (x = x; y = z)
(x, y, face) = getFace(index, size[0], size[2])
offset = (x + y*size[1]*size[0]) * 5
f = self.__left[face]
elif side == 2: # right (x = y; y = z)
(x, y, face) = getFace(index, size[1], size[2])
offset = (x + y*size[1]) * size[0] * 5
f = self.__right[face]
elif side == 3: # bottom (x = x; y = y)
(x, y, face) = getFace(index, size[0], size[1])
offset = (x + (y + (size[2]-1)*size[1])*size[0]) * 5
f = self.__bot[face]
elif side == 4: # right behind (x = | |
55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEB_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s) #
simulated = np.append(simulated,y_hat)
PGEB_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,PGEB_p)
#print a[0]**2
# Residuals
PGEBresiduals = PGEB_p - peaks
PGEB_y = peaks
# RMSE
RMSE = (np.sum((PGEBresiduals**2))/len(PGEBresiduals))**.5
#Collect residuals from load regression
R = np.column_stack((BPAresiduals,SDGEresiduals,SCEresiduals,PGEVresiduals,PGEBresiduals))
ResidualsLoad = R[0:3*365,:]
###################################
# PATH 46
###################################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/46_daily.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path46'}, inplace=True)
df_data.rename(columns={4:'Weekday'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
y = df_data.loc[:,'Path46']
#multivariate regression
jan_reg_46 = linear_model.LinearRegression()
feb_reg_46 = linear_model.LinearRegression()
mar_reg_46 = linear_model.LinearRegression()
apr_reg_46 = linear_model.LinearRegression()
may_reg_46 = linear_model.LinearRegression()
jun_reg_46 = linear_model.LinearRegression()
jul_reg_46 = linear_model.LinearRegression()
aug_reg_46 = linear_model.LinearRegression()
sep_reg_46 = linear_model.LinearRegression()
oct_reg_46 = linear_model.LinearRegression()
nov_reg_46 = linear_model.LinearRegression()
dec_reg_46 = linear_model.LinearRegression()
# Train the model using the training sets
jan_reg_46.fit(jan.loc[:,'Weekday':],jan.loc[:,'Path46'])
feb_reg_46.fit(feb.loc[:,'Weekday':],feb.loc[:,'Path46'])
mar_reg_46.fit(mar.loc[:,'Weekday':],mar.loc[:,'Path46'])
apr_reg_46.fit(apr.loc[:,'Weekday':],apr.loc[:,'Path46'])
may_reg_46.fit(may.loc[:,'Weekday':],may.loc[:,'Path46'])
jun_reg_46.fit(jun.loc[:,'Weekday':],jun.loc[:,'Path46'])
jul_reg_46.fit(jul.loc[:,'Weekday':],jul.loc[:,'Path46'])
aug_reg_46.fit(aug.loc[:,'Weekday':],aug.loc[:,'Path46'])
sep_reg_46.fit(sep.loc[:,'Weekday':],sep.loc[:,'Path46'])
oct_reg_46.fit(oct.loc[:,'Weekday':],oct.loc[:,'Path46'])
nov_reg_46.fit(nov.loc[:,'Weekday':],nov.loc[:,'Path46'])
dec_reg_46.fit(dec.loc[:,'Weekday':],dec.loc[:,'Path46'])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Weekday':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jan_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = feb_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = mar_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = apr_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = may_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jun_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jul_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = aug_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = sep_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = oct_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = nov_reg_46.predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = dec_reg_46.predict(s)
predicted = np.append(predicted,p)
Path46_p = predicted
# Residuals
residuals = predicted - y.values
Residuals46 = np.reshape(residuals[730:],(1095,1))
Path46_y = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
##R2
#a=st.pearsonr(y,predicted)
#print a[0]**2
###############################
# NW PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/NW_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Weekday'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
H = df_data
#df_data.to_excel('Synthetic_demand_pathflows/cX.xlsx')
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path8'}, inplace=True)
df_data.rename(columns={4:'Path14'}, inplace=True)
df_data.rename(columns={5:'Path3'}, inplace=True)
df_data.rename(columns={6:'BPA_wind'}, inplace=True)
df_data.rename(columns={7:'BPA_hydro'}, inplace=True)
df_data.rename(columns={8:'Weekday'}, inplace=True)
df_data.rename(columns={9:'Salem_HDD'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path8','Path14','Path3']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
NWPaths_p= np.zeros((len(cX),num_lines))
NWPaths_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name='jan_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='feb_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='mar_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='apr_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='may_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jun_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jul_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='aug_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='sep_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='oct_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='nov_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='dec_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
# Train the model using the training sets
name='jan_reg_NW' + str(line)
locals()[name].fit(jan.loc[:,'BPA_wind':],jan.loc[:,line])
name='feb_reg_NW' + str(line)
locals()[name].fit(feb.loc[:,'BPA_wind':],feb.loc[:,line])
name='mar_reg_NW' + str(line)
locals()[name].fit(mar.loc[:,'BPA_wind':],mar.loc[:,line])
name='apr_reg_NW' + str(line)
locals()[name].fit(apr.loc[:,'BPA_wind':],apr.loc[:,line])
name='may_reg_NW' + str(line)
locals()[name].fit(may.loc[:,'BPA_wind':],may.loc[:,line])
name='jun_reg_NW' + str(line)
locals()[name].fit(jun.loc[:,'BPA_wind':],jun.loc[:,line])
name='jul_reg_NW' + str(line)
locals()[name].fit(jul.loc[:,'BPA_wind':],jul.loc[:,line])
name='aug_reg_NW' + str(line)
locals()[name].fit(aug.loc[:,'BPA_wind':],aug.loc[:,line])
name='sep_reg_NW' + str(line)
locals()[name].fit(sep.loc[:,'BPA_wind':],sep.loc[:,line])
name='oct_reg_NW' + str(line)
locals()[name].fit(oct.loc[:,'BPA_wind':],oct.loc[:,line])
name='nov_reg_NW' + str(line)
locals()[name].fit(nov.loc[:,'BPA_wind':],nov.loc[:,line])
name='dec_reg_NW' + str(line)
locals()[name].fit(dec.loc[:,'BPA_wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'BPA_wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jan_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='feb_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='mar_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='apr_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='may_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jun_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jul_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='aug_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='sep_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='oct_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='nov_reg_NW' + | |
<filename>src/ny_artsy_date/ny_artsy_date.py<gh_stars>1-10
import googlemaps
import os
import numpy as np
import requests
import lxml
import pandas as pd
import gmaps
from pandas import json_normalize
import re
def geocoding(my_location, google_maps_key):
"""
Function to obtain latitude and longitude as needed given simple address data. This package interacts with google maps API
to create our visualization, thus an API key is required.
Args
----
Required:
my_location(str): Starting point address - must be within NYC Metro Location
google_maps_key (str): Google maps API key needed to geocode your location
To obtain a google maps API key, please refer to https://developers.google.com/maps
Returns
---
Latitude and Longitude
Fields:
lat(float): Latitude of location
lon(float): Longitude of location
Usage Example
---
[in]:
geocode_events("Met Museum", google_maps_key)
[out]:
[40.7794, -73.9632]
"""
if google_maps_key.startswith('AI'):
google_maps = googlemaps.Client(key=google_maps_key)
else:
raise ValueError("Invalid Google Maps Key. Please Refer to https://developers.google.com/maps to obtain a valid key.")
geocode_result = google_maps.geocode(my_location)
lat = geocode_result[0]['geometry']['location']['lat']
lon = geocode_result[0]['geometry']['location']['lng']
return lat, lon
def map_events(df, google_maps_key, lat_column, long_column, name_column, start_lon = None, start_lat = None):
"""
Function to map latitude and longitude as needed given simple address dataframe. This package interacts with google maps API
to create our visualization, thus an API key is required.
Args
----
Required:
df(str): Starting point address - must be within NYC Metro Location
google_maps_key (str): Google maps API key needed to geocode your location
To obtain a google maps API key, please refer to https://developers.google.com/maps
lat_column(str): name of df column containing latitude data we need (events, restaurants, etc)
long_column(str): name of df column containing latitude data we need (events, restaurants, etc)
To obtain a google maps API key, please refer to https://developers.google.com/maps
name_column(str): name of df column containing name data we need (events, restaurants, etc)
Optional:
start_lon: longitudinal data of starting point
start_lat: latitudinal data of starting point
Returns
---
Google Map with markers at the specified points
Usage Example
---
[in]:
map_events(df, google_maps_key,'Lat','Long','Name')
[out]:
Map (Optional): Interactive Google Maps Output with Hover Text Description of Data Point
"""
marker_locations = df[[lat_column, long_column]]
hover_info = df[name_column]
central_coordinates = df[[lat_column, long_column]].median()
fig = gmaps.figure(center=central_coordinates, zoom_level=14)
markers = gmaps.marker_layer(marker_locations, hover_text = hover_info)
fig.add_layer(markers)
# Optional to add starting point - default when working with other package functions
if start_lat != None and start_lon != None:
start_locations = [[start_lat, start_lon]]
start_layer = gmaps.symbol_layer(start_locations, fill_color='blue', stroke_color='blue', hover_text = 'Starting Point')
fig.add_layer(start_layer)
gmaps.configure(api_key=google_maps_key)
return fig
def find_my_art_events(my_location = None, google_maps_key = None, lat = None, lon = None, free_only = 0, max_results = 10, search_range = '500m', mapping = False):
"""
Function to obtain art events data in the NY Metro area near a specified location (address OR latitude and longitude format),
using the NY ArtBeat API found at https://www.nyartbeat.com/resources/doc/api. Returns table with events matching a specified radius
from a specified location as well as map if requested.
Args
----
Required:
my_location(str): Address starting point - Must be within NYC Metro Location
Default: None
google_maps_key (str): Google maps API key needed to geocode your location
To obtain a google maps API key, please refer to https://developers.google.com/maps
Default: None, OR
lat(float): Latitude of starting point - Must be within NYC Metro Location
Default: None
lon(float): Longitude of starting point - Must be within NYC Metro Location
Default: None
Optional:
google_maps_key (str): Google maps API key needed to geocode your location OPTIONAL if you have lat-lon location data
To obtain a google maps API key, please refer to https://developers.google.com/maps
Default: None
free_only(bool): Boolean param specifying whether to only return free events
Default: False or 0
max_results(int): Max results to be returned in Query - can be 5, 10, 20, 50
Default: 10
search_range(str/float): distance (in meters) from location for events queried - can be '500m',"1000m","1500m","3000m"
Default: 500m
mapping(bool): Boolean param specifying whether user wants a simple interactive map returned of matching locations
Returns
---
DataFrame with [max_results] art events in the New York Metro area in the [radius] of the [specified location]
Fields:
Event_Name(str): Name of Event
Event_Description(str): Details about event
Event_Price_Adult(float): Price for tickets
Event_Price_Detailed(str)): Price for tickets (detailed info)
DateEnd(date): Last date for exhibit or installation
Event_Lat(float): Latitude of event
Event_Lon(float): Longitude of event
Event_Address(str): Address for event - requires geocoding; google maps key (above).
Map (Optional): Interactive Google Maps Output with Markers for each event; Hover Text Description of Event
Usage Example
---
[in]:
find_my_art_events(lat = 40.78, lon = -73.96, search_range = '3000m', max_results = 5)
[out]:
df
| Event_Name | Event_Description | Price | DateEnd | Distance | Latitude | Longitude | Event_Address |
|------------|---------------------------|------------|---------|----------|----------|-----------|-------------------------------|
| The.. | The Costume Institute’s...| Adults $25 | 2022-09 | 90.158| 40.779| -73.96 | 1005 5th Ave, New York... |
[out]:
Interactive Map
"""
valid_search_range = {'500m',"1000m","1500m","3000m"}
valid_max_results = {np.NaN,5,10,20,50}
try:
float(search_range)
search_range = str(search_range)+"m"
except:
None
if search_range not in valid_search_range:
raise ValueError("search range must be one of %r." % valid_search_range)
if max_results != np.NaN and max_results not in valid_max_results:
raise ValueError("max results must be one of %r." % valid_max_results)
assert my_location != None or (lat != None and lon != None), 'Please make sure there are valid address parameters (address or lat/lon).'
if lat == None or lon == None:
lat,lon = geocoding(my_location, google_maps_key)
response = requests.get('http://www.nyartbeat.com/list/event_searchNear',
params={'Latitude': lat, 'Longitude': lon,'MaxResults': max_results, 'searchrange':search_range}
)
try:
df = pd.read_xml(response.content)
except ValueError:
print("No Results Found. Please try increasing your search range and make sure you are searching within the metro area.")
return pd.DataFrame()
try:
df['Event_Price_Adult'] = ""
j = -1
list = [match.group(3) for match in [re.search(r'(adult|adults|admission)(\s|\s\$|\s\:|\:\s|\:\s\$|\:\$)(\d{1,2})', l) for l in df['Price'].str.lower()] if match]
for i in df.index:
string = df.at[i,'Price'].lower()
if (string == 'free'):
df.at[i,'Event_Price_Adult'] = 0
elif ('suggest' in string) and ('donate' or 'donation' or 'contribute' or 'contribution' in string) : # e.g., suggested donate
df.at[i,'Event_Price_Adult'] = 0
elif [re.search(r'(adult|adults|admission)(\s|\s\$|\s\:|\:\s|\:\s\$|\:\$)(\d{1,2})', string)] != [None]:
j = j+1
df.at[i,'Event_Price_Adult'] = list[j]
else:
df.at[i,'Event_Price_Adult'] = np.NaN
except:
None
try:
if free_only == 1 or free_only == True:
df = df[df['Event_Price_Adult']==0]
elif free_only == 0 or free_only == False:
None
except:
raise ValueError("search range must be 0,1, True, or False")
df['url'] = df['href']
df = df.rename(columns = {'Name':'Event_Name','Description':'Event_Description','Latitude':'Event_Lat','Longitude':'Event_Lon','Price':'Event_Price_Detailed'})
df = df[['Event_Name','Event_Description','DateEnd','Distance','Event_Lat','Event_Lon','Event_Price_Adult','url','Event_Price_Detailed']]
if google_maps_key != None:
if google_maps_key.startswith('AI'):
google_maps = googlemaps.Client(key=google_maps_key)
try:
df['Event_Address'] = df.apply(lambda x: google_maps.reverse_geocode((x.Event_Lat,\
x.Event_Lon)), axis=1)[0][1]['formatted_address']
except:
None
else:
raise ValueError("Invalid Google Maps Key. Please Refer to https://developers.google.com/maps to obtain a valid key.")
df = df.sort_values(by = 'Distance')
if mapping == True:
nymap = map_events(df, google_maps_key = google_maps_key, start_lat = lat, start_lon = lon, lat_column = "Event_Lat", long_column = "Event_Lon", name_column = 'Event_Name')
return df, nymap
else:
return df
def find_my_dinner(google_maps_key, my_location = None, mapping = False, lat = None, lon = None, search_range = 500, min_rating = 4.3):
"""
Returns Restaurant List within a specified range at a specified minimum rating (address OR latitude and longitude format).
Args
----
Required:
my_location(str): Address starting point - Must be within NYC Metro Location
Default: None
google_maps_key (str): Google maps API key needed to geocode your location
To obtain a google maps API key, please refer to https://developers.google.com/maps
Default: None, OR
lat(float): Latitude of starting point - Must be within NYC Metro Location
Default: None
lon(float): Longitude of starting point - Must be within NYC Metro Location
Default: None
Optional:
search_range(float): Distance from starting point (radius for search, meters)
Default: 500
min_rating(float): should be 1-5
Default: 4.3
mapping(bool): Boolean param specifying whether user wants a simple | |
<filename>phobos/utils/editing.py
#!/usr/bin/python3
# coding=utf-8
# -------------------------------------------------------------------------------
# This file is part of Phobos, a Blender Add-On to edit robot models.
# Copyright (C) 2020 University of Bremen & DFKI GmbH Robotics Innovation Center
#
# You should have received a copy of the 3-Clause BSD License in the LICENSE file.
# If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# -------------------------------------------------------------------------------
"""
Contains the utility functions for editing objects and Phobos models.
"""
import bpy
import mathutils
import math
from phobos.phoboslog import log
import phobos.utils.selection as sUtils
import phobos.utils.naming as nUtils
import phobos.utils.blender as bUtils
import phobos.utils.io as ioUtils
import phobos.defs as defs
def dissolveLink(obj, delete_other=False):
"""Remove the selected link and reparent all links, inertia, visual and collisions to its effective Parent.
Args:
obj(bpy.types.Object): the link to dissolve
delete_other: (Default value = False)
Returns:
"""
# Store original layers and show all layers
originallayers = {}
for name, coll in bpy.context.window.view_layer.layer_collection.children.items():
originallayers[name] = coll.exclude
coll.exclude = False
if not obj.phobostype == 'link':
log('Selected object {} is not a link!'.format(obj.name), 'ERROR')
return
else:
delete = [obj]
# Get all children
children = sUtils.getRecursiveChildren(
obj, phobostypes=('link', 'inertial', 'visual', 'collision'), include_hidden=True
)
if delete_other:
other_children = sUtils.getRecursiveChildren(
obj,
recursion_depth=2,
phobostypes=('motor', 'controller', 'sensor', 'submodel'),
include_hidden=True,
)
delete += [child for child in other_children if child not in children]
# Get the parent
parent = obj.parent
# If parent is not None ( Root )
if obj.parent:
# Reparent
parentObjectsTo(children, parent, clear=True)
# Delete the objects
sUtils.selectObjects(delete, clear=True, active=-1)
bpy.ops.object.delete()
# Restore original layers
for key, value in originallayers.items():
bpy.context.window.view_layer.layer_collection.children[key].exclude = value
def getCombinedTransform(obj, effectiveparent):
"""Get the combined transform of the object relative to the effective parent.
This combines all transformations in the parenting hierarchy up to the specified effective
parent.
Note, that the scale transformation of the effective parent is used anyway, as it scales the
local matrix of the child object.
Args:
obj(bpy.types.Object): the child object
effectiveparent(bpy.types.Object): the effective parent of the child object
Returns:
: bpy.types.Matrix -- the combined transformations of the child object
"""
parent = obj.parent
matrix = obj.matrix_local
# use the parents absolute scale to scale the relative matrix
if parent:
scale_mat = mathutils.Matrix.Identity(4)
scale_mat[0][0], scale_mat[1][1], scale_mat[2][2] = parent.matrix_world.to_scale()
matrix = scale_mat @ matrix
# combine transformations up to effective parent
while parent is not None:
if parent == effectiveparent:
break
# use relative rotation
matrix = parent.matrix_local @ matrix
parent = parent.parent
return matrix
def restructureKinematicTree(link, root=None):
"""Restructures a tree such that the ``link`` provided becomes the root of the tree.
If no root object is provided, :func:`phobos.utils.selection.getRoot` will be used.
For instance, the following tree::
A
/ \\
B C
/ \ \\
D E F
would, using the call restructureKinematicsTree(C), become::
C
/ \\
A F
/
B
/ \\
D E
Currently, this function ignores all options such as unselected or hidden objects.
Args:
link(bpy.types.Object): the link which will become the new root object
root(bpy.types.Object, optional): the current root object (Default value = None)
Returns:
None: None
"""
if not root:
root = sUtils.getRoot(link)
links = [link]
obj = link
# stop right now when the link is already root
if not obj.parent:
log('No restructure necessary. Link is already root.', 'INFO')
return
# gather chain of links ascending the tree
while obj.parent.name != root.name:
obj = obj.parent
if obj.phobostype == 'link':
links.append(obj)
links.append(root)
log("Unparenting objects for restructure: " + str([link.name for link in links]) + ".", 'DEBUG')
# unparent all links
sUtils.selectObjects(links, True)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
log("Restructuring objects for new hierarchy.", 'DEBUG')
for i in range(len(links) - 1):
parent = links[i]
child = links[i + 1]
parentObjectsTo(child, parent)
log("Copying model information from old root.", 'DEBUG')
# copy properties
if 'model/name' in root:
link['model/name'] = root['model/name']
del root['model/name']
if 'model/version' in root:
link['model/version'] = root['model/version']
del root['model/version']
log("Restructured kinematic tree to new root: {}.".format(link.name), 'INFO')
def parentObjectsTo(objects, parent, clear=False):
"""Parents the specified objects to the parent object.
Depending on their phobostype the objects are parented either *bone relative* or *object*.
If *clear* is set, the parenting of the objects will be cleared (keeping the transform), before
parenting.
Args:
objects(list(bpy.types.Object): objects to set parent of
parent(bpy.types.Object): parent object
clear(bool, optional): if True, the parenting of the objects will be cleared (Default value = False)
Returns:
"""
if not isinstance(objects, list):
objects = [objects]
# Store original layers
#originallayers = list(bpy.context.scene.layers)
# Select all layers
#bpy.context.scene.layers = [True for i in range(20)]
# Restore original layers
#bpy.context.scene.layers = originallayers
if clear:
sUtils.selectObjects(objects, active=0, clear=True)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
sUtils.selectObjects([parent] + objects, active=0, clear=True)
if parent.phobostype == 'link':
bpy.ops.object.parent_set(type='BONE_RELATIVE')
else:
bpy.ops.object.parent_set(type='OBJECT')
def getNearestCommonParent(objs):
"""Returns hierarchically lowest common parent of the provided objects
Args:
objs: list of objects (bpy_types.Object)
Returns:
"""
anchor = objs[0] # pick one link as the anchor link
rest = objs[1:] # get other links to iterate over
in_all = False # this will be true if all 'rest' branches have parent as a common parent
parent = anchor # the candidate for common parent
inter_objects = set()
while not in_all and parent.parent:
in_all = True
parent = parent.parent # go up the anchor branch
inter_objects.add(parent)
for obj in rest: # start at base of each rest branch
o = obj
while (
o.parent and o.parent != parent
): # as long as there is a parent that is not the candidate parent
o = o.parent
inter_objects.add(o)
if (
o.parent != parent
): # check which break condition happened, break if not arrived at parent
in_all = False
break
if not in_all: # this is only true if none of the branches set it to False and broke afterwards
return None
else:
inter_objects.remove(parent)
return parent, list(inter_objects)
def instantiateSubmodel(submodelname, instancename, size=1.0):
"""Creates an instance of the submodel specified by the submodelname.
The instance receives the definitions of the group as it is generated.
Args:
submodelname: name of the submodel (Blender group) to create an
instance of
instancename: name the instance object will receive
size: (Default value = 1.0)
Returns:
"""
submodel = None
interfaces = None
# find the existing group for submodel and interface
for group in bpy.data.groups:
# search for namespaced groups with the exact name
if ':' in group.name and submodelname == group.name:
submodel = group
if group.name.startswith('interfaces:') and submodelname.split(':')[1] in group.name:
interfaces = group
if not submodel:
log('Selected submodel is not defined.', 'ERROR')
if not interfaces:
log('No interfaces defined for this submodel.', 'INFO')
# add the submodel and write in data
bpy.ops.object.group_instance_add(group=submodel.name)
submodelobj = bpy.context.active_object
submodelobj.phobostype = 'submodel'
submodelobj['submodeltype'] = submodel.name.split(':')[0]
# TODO currently this works only by name binding, we should add links to
# the group here
submodelobj['submodel/name'] = submodelname
submodelobj['submodelname'] = submodelname
# copy custom props from group to instance
for key in submodel.keys():
submodelobj[key] = submodel[key]
submodelobj.name = instancename
submodelobj.empty_draw_size = size
# add the interfaces if available
if interfaces:
# create group and make real
bpy.ops.object.group_instance_add(group=interfaces.name)
bpy.ops.object.duplicates_make_real()
# write interface parameters and change namespace
for obj in bpy.context.selected_objects:
nUtils.addNamespace(obj, instancename)
obj.name = obj.name.rsplit('.')[0]
obj['submodeltype'] = 'interface'
bUtils.toggleTransformLock(obj, True)
# parent interfaces to submodel empty
parentObjectsTo(bpy.context.selected_objects, submodelobj)
# delete empty parent object of interfaces
sUtils.selectObjects(
objects=[
a
for a in bpy.context.selected_objects
if a.type == 'EMPTY' and 'submodeltype' in a and a['submodeltype'] == 'interface'
],
clear=True,
active=0,
)
bpy.ops.object.delete(use_global=False)
return submodelobj
def defineSubmodel(submodelname, submodeltype, version='', objects=None):
"""Defines a new submodule group with the specified name and type.
The group will be named like so:
'submodeltype:submodelname/version'
Objects with the phobostype 'interface' (if present) are handled separately
and put into a respective submodel group (which features the 'interface'
submodeltype).
If the version is omitted, the respective part of the name is dropped, too.
If no object list is provided the objects are derived from selection.
The submodeltype is also added as dict entry to the group in Blender.
The selected objects are moved to the respective layer for submodels or
interfaces.
Args:
submodelname: descriptive name of the submodel
submodeltype: type of the submodel (e.g. | |
= var055_tangent_b_z
f_log_intermediate[56] = var056_tangent_b_y
f_log_intermediate[57] = var057_tangent_t_z
f_log_intermediate[58] = var058
f_log_intermediate[59] = var059
f_log_intermediate[60] = var060
f_log_intermediate[61] = var061
f_log_intermediate[62] = var062
f_log_intermediate[63] = var063
f_log_intermediate[64] = var064
f_log_intermediate[65] = var065
f_log_intermediate[66] = var066
f_log_intermediate[67] = var067
f_log_intermediate[68] = var068
f_log_intermediate[69] = var069_cross_tangent_z
f_log_intermediate[70] = var070_tangent_t_x
f_log_intermediate[71] = var071
f_log_intermediate[72] = var072
f_log_intermediate[73] = var073_tangent_b_x
f_log_intermediate[74] = var074
f_log_intermediate[75] = var075_cross_tangent_x
f_log_intermediate[76] = var076
f_log_intermediate[77] = var077
f_log_intermediate[78] = var078
f_log_intermediate[79] = var079_cross_tangent_y
f_log_intermediate[80] = var080
f_log_intermediate[81] = var081
f_log_intermediate[82] = var082
f_log_intermediate[83] = var083
f_log_intermediate[84] = var084
f_log_intermediate[85] = var085
f_log_intermediate[86] = var086
f_log_intermediate[87] = var087
f_log_intermediate[88] = var088
f_log_intermediate[89] = var089_dhdu
f_log_intermediate[90] = var090_small_t_x
f_log_intermediate[91] = var091
f_log_intermediate[92] = var092
f_log_intermediate[93] = var093_cos
f_log_intermediate[94] = var094
f_log_intermediate[95] = var095
f_log_intermediate[96] = var096
f_log_intermediate[97] = var097
f_log_intermediate[98] = var098_normal_y
f_log_intermediate[99] = var099_normal_z
f_log_intermediate[100] = var100_dhdv
f_log_intermediate[101] = var101_small_b_x
f_log_intermediate[102] = var102
f_log_intermediate[103] = var103
f_log_intermediate[104] = var104
f_log_intermediate[105] = var105
f_log_intermediate[106] = var106
f_log_intermediate[107] = var107
f_log_intermediate[108] = var108
f_log_intermediate[109] = var109
f_log_intermediate[110] = var110
f_log_intermediate[111] = var111
f_log_intermediate[112] = var112
f_log_intermediate[113] = var113
f_log_intermediate[114] = var114_small_t_y
f_log_intermediate[115] = var115
f_log_intermediate[116] = var116
f_log_intermediate[117] = var117
f_log_intermediate[118] = var118_normal_x
f_log_intermediate[119] = var119_small_b_y
f_log_intermediate[120] = var120
f_log_intermediate[121] = var121
f_log_intermediate[122] = var122
f_log_intermediate[123] = var123
f_log_intermediate[124] = var124
f_log_intermediate[125] = var125
f_log_intermediate[126] = var126
f_log_intermediate[127] = var127
f_log_intermediate[128] = var128
f_log_intermediate[129] = var129_small_t_z
f_log_intermediate[130] = var130
f_log_intermediate[131] = var131
f_log_intermediate[132] = var132
f_log_intermediate[133] = var133_small_b_z
f_log_intermediate[134] = var134
f_log_intermediate[135] = var135
f_log_intermediate[136] = var136
f_log_intermediate[137] = var137
f_log_intermediate[138] = var138
f_log_intermediate[139] = var139
f_log_intermediate[140] = var140
f_log_intermediate[141] = var141
f_log_intermediate[142] = var142
f_log_intermediate[143] = var143
f_log_intermediate[144] = var144
f_log_intermediate[145] = var145
f_log_intermediate[146] = var146
f_log_intermediate[147] = var147
f_log_intermediate[148] = var148
f_log_intermediate[149] = var149
f_log_intermediate[150] = var150
f_log_intermediate[151] = var151
f_log_intermediate[152] = var152
f_log_intermediate[153] = var153
f_log_intermediate[154] = var154
f_log_intermediate[155] = var155
f_log_intermediate[156] = var156
f_log_intermediate[157] = var157
f_log_intermediate[158] = var158
f_log_intermediate[159] = var159
f_log_intermediate[160] = var160
f_log_intermediate[161] = var161
f_log_intermediate[162] = var162
f_log_intermediate[163] = var163
f_log_intermediate[164] = var164
f_log_intermediate[165] = var165
f_log_intermediate[166] = var166
f_log_intermediate[167] = var167
f_log_intermediate[168] = var168
f_log_intermediate[169] = var169
f_log_intermediate[170] = var170
f_log_intermediate[171] = var171
f_log_intermediate[172] = var172
f_log_intermediate[173] = var173
f_log_intermediate[174] = var174
f_log_intermediate[175] = var175
f_log_intermediate[176] = var176
f_log_intermediate[177] = var177
f_log_intermediate[178] = var178
f_log_intermediate[179] = var179_cos
f_log_intermediate[180] = var180
f_log_intermediate[181] = var181
f_log_intermediate[182] = var182
f_log_intermediate[183] = var183_sin
f_log_intermediate[184] = var184
f_log_intermediate[185] = var185
f_log_intermediate[186] = var186
f_log_intermediate[187] = var187
f_log_intermediate[188] = var188_color_2_x
f_log_intermediate[189] = var189
f_log_intermediate[190] = var190
f_log_intermediate[191] = var191
f_log_intermediate[192] = var192
f_log_intermediate[193] = var193
f_log_intermediate[194] = var194
f_log_intermediate[195] = var195
f_log_intermediate[196] = var196_light_dir_x
f_log_intermediate[197] = var197_unit_new_normal_x
f_log_intermediate[198] = var198
f_log_intermediate[199] = var199_Nl
f_log_intermediate[200] = var200
f_log_intermediate[201] = var201
f_log_intermediate[202] = var202
f_log_intermediate[203] = var203
f_log_intermediate[204] = var204
f_log_intermediate[205] = var205
f_log_intermediate[206] = var206_light_dir_y
f_log_intermediate[207] = var207_unit_new_normal_y
f_log_intermediate[208] = var208
f_log_intermediate[209] = var209_light_dir_z
f_log_intermediate[210] = var210_unit_new_normal_z
f_log_intermediate[211] = var211
f_log_intermediate[212] = var212
f_log_intermediate[213] = var213
f_log_intermediate[214] = var214
f_log_intermediate[215] = var215_our_sign
f_log_intermediate[216] = var216
f_log_intermediate[217] = var217
f_log_intermediate[218] = var218
f_log_intermediate[219] = var219
f_log_intermediate[220] = var220_our_sign_down
f_log_intermediate[221] = var221
f_log_intermediate[222] = var222
f_log_intermediate[223] = var223
f_log_intermediate[224] = var224
f_log_intermediate[225] = var225
f_log_intermediate[226] = var226
f_log_intermediate[227] = var227
f_log_intermediate[228] = var228_R_x
f_log_intermediate[229] = var229
f_log_intermediate[230] = var230
f_log_intermediate[231] = var231
f_log_intermediate[232] = var232_R_y
f_log_intermediate[233] = var233
f_log_intermediate[234] = var234
f_log_intermediate[235] = var235_R_z
f_log_intermediate[236] = var236
f_log_intermediate[237] = var237
f_log_intermediate[238] = var238
f_log_intermediate[239] = var239
f_log_intermediate[240] = var240
f_log_intermediate[241] = var241_our_sign
f_log_intermediate[242] = var242_our_select
f_log_intermediate[243] = var243
f_log_intermediate[244] = var244
f_log_intermediate[245] = var245_color_y
f_log_intermediate[246] = var246
f_log_intermediate[247] = var247
f_log_intermediate[248] = var248
f_log_intermediate[249] = var249_color_1_y
f_log_intermediate[250] = var250_color_2_y
f_log_intermediate[251] = var251_our_select
f_log_intermediate[252] = var252
f_log_intermediate[253] = var253
f_log_intermediate[254] = var254_color_z
f_log_intermediate[255] = var255
f_log_intermediate[256] = var256
f_log_intermediate[257] = var257
f_log_intermediate[258] = var258_color_1_z
f_log_intermediate[259] = var259_color_2_z
return
def bricks_f(X, f_log_intermediate):
var006 = X[7]
var005__log_is_intersect = var006
var004_our_sign_up = tf_np_sign(var005__log_is_intersect)
var003 = ((var004_our_sign_up)*(0.5))
var001 = ((var003)+(0.5))
var029 = X[2]
var028_tex_coords_y = var029
var027 = tf.floormod(var028_tex_coords_y, 10.0)
var026 = ((5.0)-(var027))
var025_our_sign = tf_np_sign(var026)
var024 = ((0.5)*(var025_our_sign))
var023 = ((0.5)-(var024))
var021 = ((var023)==(1.0))
var033 = X[1]
var032_tex_coords_x = var033
var030 = tf.floormod(var032_tex_coords_x, 20.0)
var031_our_sign = tf_np_sign(var030)
var022 = ((var030)*(var031_our_sign))
var019 = ((var021)*(var022))
var034 = ((1)-(var021))
var036 = ((var022)+(10.0))
var035 = tf.floormod(var036, 20.0)
var020 = ((var034)*(var035))
var018 = ((var019)+(var020))
var017 = ((0.5)-(var018))
var016_our_sign_up = tf_np_sign(var017)
var015 = ((var016_our_sign_up)*(0.5))
var013 = ((var015)+(0.5))
var014_mortar_color_x = 0.7
var011 = ((var013)*(var014_mortar_color_x))
var037 = ((1)-(var013))
var046 = tf.floormod(var028_tex_coords_y, 5.0)
var047_our_sign = tf_np_sign(var046)
var045 = ((var046)*(var047_our_sign))
var044 = ((0.5)-(var045))
var043_our_sign_up = tf_np_sign(var044)
var042 = ((var043_our_sign_up)*(0.5))
var041 = ((var042)+(0.5))
var039 = ((var041)*(var014_mortar_color_x))
var048 = ((1)-(var041))
var052_brick_color_light_x = 0.49
var056_gabor_noise = simplex_noise(var032_tex_coords_x, var028_tex_coords_y)
#var056_gabor_noise = gabor_noise(1.0,0.05,0.05,0.0,64.0,256,var032_tex_coords_x,var028_tex_coords_y)
var055 = ((0.5)*(var056_gabor_noise))
var054 = ((0.5)+(var055))
var053_noise = var054
var050 = ((var052_brick_color_light_x)*(var053_noise))
var057_brick_color_dark_x = 0.7
var058 = ((1.0)-(var053_noise))
var051 = ((var057_brick_color_dark_x)*(var058))
var049 = ((var050)+(var051))
var040 = ((var048)*(var049))
var038 = ((var039)+(var040))
var012 = ((var037)*(var038))
var009 = ((var011)+(var012))
var066_light_dir_x = 0.228085776381
var067_normal_x = 0.0
var064 = ((var066_light_dir_x)*(var067_normal_x))
var068_light_dir_y = 0.608228737016
var069_normal_y = 0.0
var065 = ((var068_light_dir_y)*(var069_normal_y))
var062 = ((var064)+(var065))
var070_light_dir_z = 0.76028592127
var071_normal_z = 1.0
var063 = ((var070_light_dir_z)*(var071_normal_z))
var060 = ((var062)+(var063))
var072 = ((0.0)-(var060))
var075_our_sign = tf_np_sign(var072)
var074 = ((var075_our_sign)*(0.5))
var073 = ((var074)+(0.5))
var061 = ((var072)*(var073))
var059 = ((var060)+(var061))
var010_LN = var059
var007 = ((var009)*(var010_LN))
var080_our_sign_down = tf_np_sign(var010_LN)
var079 = ((var080_our_sign_down)*(0.5))
var077 = ((var079)+(0.5))
var092 = ((2.0)*(var010_LN))
var091 = ((var092)*(var067_normal_x))
var090 = ((var091)-(var066_light_dir_x))
var088_R_x = var090
var089 = X[4]
var086 = ((var088_R_x)*(var089))
var096 = ((var092)*(var069_normal_y))
var095 = ((var096)-(var068_light_dir_y))
var093_R_y = var095
var094 = X[5]
var087 = ((var093_R_y)*(var094))
var084 = ((var086)+(var087))
var100 = ((var092)*(var071_normal_z))
var099 = ((var100)-(var070_light_dir_z))
var097_R_z = var099
var098 = X[6]
var085 = ((var097_R_z)*(var098))
var082 = ((var084)+(var085))
var101 = ((0.0)-(var082))
var104_our_sign = tf_np_sign(var101)
var103 = ((var104_our_sign)*(0.5))
var102 = ((var103)+(0.5))
var083 = ((var101)*(var102))
var081 = ((var082)+(var083))
var078 = pow(var081, 25.0)
var076 = ((var077)*(var078))
var008_specular_intensity = var076
var002 = ((var007)+(var008_specular_intensity))
var000_our_select = var001 * var002
var111_mortar_color_y = 0.7
var109 = ((var013)*(var111_mortar_color_y))
var113 = ((var041)*(var111_mortar_color_y))
var118_brick_color_light_y = 0.02
var116 = ((var118_brick_color_light_y)*(var053_noise))
var119_brick_color_dark_y = 0.02
var117 = ((var119_brick_color_dark_y)*(var058))
var115 = ((var116)+(var117))
var114 = ((var048)*(var115))
var112 = ((var113)+(var114))
var110 = ((var037)*(var112))
var108 = ((var109)+(var110))
var107 = ((var108)*(var010_LN))
var106 = ((var107)+(var008_specular_intensity))
var105_our_select = var001 * var106
var126_mortar_color_z = 0.7
var124 = ((var013)*(var126_mortar_color_z))
var128 = ((var041)*(var126_mortar_color_z))
var133_brick_color_light_z = 0.11
var131 = ((var133_brick_color_light_z)*(var053_noise))
var134_brick_color_dark_z = 0.05;
var132 = ((var134_brick_color_dark_z)*(var058))
var130 = ((var131)+(var132))
var129 = ((var048)*(var130))
var127 = ((var128)+(var129))
var125 = ((var037)*(var127))
var123 = ((var124)+(var125))
var122 = ((var123)*(var010_LN))
var121 = ((var122)+(var008_specular_intensity))
var120_our_select = var001 * var121
f_log_intermediate[0] = var000_our_select
f_log_intermediate[1] = var001
f_log_intermediate[2] = var002
f_log_intermediate[3] = var003
f_log_intermediate[4] = var004_our_sign_up
f_log_intermediate[5] = var005__log_is_intersect
f_log_intermediate[6] = var006
f_log_intermediate[7] = var007
f_log_intermediate[8] = var008_specular_intensity
f_log_intermediate[9] = var009
f_log_intermediate[10] = var010_LN
f_log_intermediate[11] = var011
f_log_intermediate[12] = var012
f_log_intermediate[13] = var013
f_log_intermediate[14] = var014_mortar_color_x
f_log_intermediate[15] = var015
f_log_intermediate[16] = var016_our_sign_up
f_log_intermediate[17] = var017
f_log_intermediate[18] = var018
f_log_intermediate[19] = var019
f_log_intermediate[20] = var020
f_log_intermediate[21] = var021
f_log_intermediate[22] = var022
f_log_intermediate[23] = var023
f_log_intermediate[24] = var024
f_log_intermediate[25] = var025_our_sign
f_log_intermediate[26] = var026
f_log_intermediate[27] = var027
f_log_intermediate[28] = var028_tex_coords_y
f_log_intermediate[29] = var029
f_log_intermediate[30] = var030
f_log_intermediate[31] = var031_our_sign
f_log_intermediate[32] = var032_tex_coords_x
f_log_intermediate[33] = var033
f_log_intermediate[34] = var034
f_log_intermediate[35] = var035
f_log_intermediate[36] = var036
f_log_intermediate[37] = var037
f_log_intermediate[38] = var038
f_log_intermediate[39] = var039
f_log_intermediate[40] = var040
f_log_intermediate[41] = var041
f_log_intermediate[42] = var042
f_log_intermediate[43] = var043_our_sign_up
f_log_intermediate[44] = var044
f_log_intermediate[45] = var045
f_log_intermediate[46] = var046
f_log_intermediate[47] = var047_our_sign
f_log_intermediate[48] = var048
f_log_intermediate[49] = var049
f_log_intermediate[50] = var050
f_log_intermediate[51] = var051
f_log_intermediate[52] = var052_brick_color_light_x
f_log_intermediate[53] = var053_noise
f_log_intermediate[54] = var054
f_log_intermediate[55] = var055
f_log_intermediate[56] = var056_gabor_noise
f_log_intermediate[57] = var057_brick_color_dark_x
f_log_intermediate[58] = var058
f_log_intermediate[59] = var059
f_log_intermediate[60] = var060
f_log_intermediate[61] = var061
f_log_intermediate[62] = var062
f_log_intermediate[63] = var063
f_log_intermediate[64] = var064
f_log_intermediate[65] = var065
f_log_intermediate[66] = var066_light_dir_x
f_log_intermediate[67] = var067_normal_x
f_log_intermediate[68] = var068_light_dir_y
f_log_intermediate[69] = var069_normal_y
f_log_intermediate[70] = var070_light_dir_z
f_log_intermediate[71] = var071_normal_z
f_log_intermediate[72] = var072
f_log_intermediate[73] = var073
f_log_intermediate[74] = var074
f_log_intermediate[75] = var075_our_sign
f_log_intermediate[76] = var076
f_log_intermediate[77] = var077
f_log_intermediate[78] = var078
f_log_intermediate[79] = var079
f_log_intermediate[80] = | |
5)) + 2) / 2
def_ = ((random.randint(1, 5)) + 3) / 2
spd = ((random.randint(0, 6)) + 7) / 2
class_w = float(9)
print("And the stats are:", atk, "attack", def_, "defense", "and", spd, "speed")
print(" ")
time.sleep(.25)
if clase == 6:
print("So, a healer, got it!")
print(" ")
clase_str = "healer"
time.sleep(.25)
atk = ((random.randint(1, 5)) + .5) / 2
def_ = ((random.randint(1, 6)) + 6.5) / 2
spd = ((random.randint(1, 5)) + 5) / 2
class_w = float(5)
print("And the stats are:", atk, "attack", def_, "defense", "and", spd, "speed")
print(" ")
time.sleep(.25)
if clase == 7:
print("So, an assassin, got it!")
print(" ")
clase_str = "assassin"
time.sleep(.25)
atk = ((random.randint(1, 6)) + 6) / 2
def_ = ((random.randint(1, 5)) + 0) / 2
spd = ((random.randint(1, 5)) + 5) / 2
class_w = float(1)
print("And the stats are:", atk, "attack", def_, "defense", "and", spd, "speed")
print(" ")
time.sleep(.25)
if clase == 8:
print("So, a beast, got it!")
print(" ")
clase_str = "beast"
time.sleep(.25)
atk = ((random.randint(1, 6)) + 3) / 2
def_ = ((random.randint(1, 5)) + 2) / 2
spd = ((random.randint(1, 5)) + 4) / 2
class_w = float(4)
print("And the stats are:", atk, "attack", def_, "defense", "and", spd, "speed")
print(" ")
time.sleep(.25)
if clase == 9:
class_w = float(8)
print("So, a mefolk, got it!")
clase_str = "merfolk"
print(" ")
time.sleep(.25)
atk = ((random.randint(1, 5)) + 3) / 2
def_ = ((random.randint(1, 5)) + 1) / 2
spd = ((random.randint(1, 6)) + 6) / 2
print("And the stats are:", atk, "attack", def_, "defense", "and", spd, "speed")
print(" ")
time.sleep(.25)
if clase == 10:
print("So, a shador, got it!")
clase_str = "shador"
print(" ")
time.sleep(.25)
atk = ((random.randint(0, 5)) + 6) / 2
def_ = ((random.randint(1, 5)) + 2) / 2
spd = ((random.randint(0, 6)) + 5) / 2
class_w = float(7)
print("And the stats are:", atk, "attack", def_, "defense", "and", spd, "speed")
print(" ")
time.sleep(.25)
if clase == 11:
print("So, a demon, got it!")
print(" ")
clase_str = "demon"
time.sleep(.25)
atk = ((random.randint(0, 9)) + 5) / 2
def_ = ((random.randint(1, 5)) + 0) / 2
spd = ((random.randint(2, 12)) + 0) / 2
class_w = float(13)
print("And the stats are:", atk, "attack", def_, "defense", "and", spd, "speed")
print(" ")
time.sleep(.25)
if clase == 13:
print("So, a brawler, cool!")
print(" ")
clase_str = "brawler"
time.sleep(.25)
atk = ((random.randint(0, 7)) + 7) / 2
def_ = ((random.randint(1, 5)) + 5) / 2
spd = ((random.randint(1, 5)) + 0) / 2
class_w = float(6)
print("And the stats are:", atk, "attack", def_, "defense", "and", spd, "speed")
print(" ")
time.sleep(.25)
if clase == 12:
print("So, an angel, got it!")
print(" ")
clase_str = "angel"
atk = ((random.randint(0, 5)) + 0) / 2
def_ = ((random.randint(0, 7)) + 8) / 2
spd = ((random.randint(1, 5)) + 4) / 2
class_w = float(13)
clase = float(12)
print("And the stats are:", atk, "attack", def_, "defense", "and", spd, "speed")
print(" ")
time.sleep(.25)
spd_2 = spd
atk_2 = atk
def_2 = def_
return[clase,atk,def_,spd,class_w,spd_2,atk_2,def_2,clase_str]
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
def get_enemy_class(en_1_class,en_1_class2,en1_1_class_w):
en_1_class = random.randint(1, 13)
if en_1_class == 1:
en_1_class = ("knight")
en_1_class2 = 1
en1_1_class_w = random.randint(11,12)
if en_1_class == 2:
en_1_class = ("mage")
en_1_class2 = 2
en1_1_class_w = float(10)
if en_1_class == 3:
en_1_class = ("giant")
en_1_class2 = 3
en1_1_class_w = float(2)
if en_1_class == 4:
en_1_class = ("gunner")
en_1_class2 = 4
en1_1_class_w = float(3)
if en_1_class == 5:
en_1_class = ("techster")
en_1_class2 = 5
en1_1_class_w = float(9)
if en_1_class == 6:
en_1_class = ("healer")
en_1_class2 = 6
en1_1_class_w = float(5)
if en_1_class == 7:
en_1_class = ("assassin")
en_1_class2 = 7
en1_1_class_w = float(1)
if en_1_class == 8:
en_1_class = ("beast")
en_1_class2 = 8
en1_1_class_w = float(4)
if en_1_class == 9:
en_1_class = ("merfolk")
en_1_class2 = 9
en1_1_class_w = float(8)
if en_1_class == 10:
en_1_class = ("shador")
en_1_class2 = 10
en1_1_class_w = float(7)
if en_1_class == 11:
en_1_class = ("demon")
en_1_class2 = 11
en1_1_class_w = float(13)
if en_1_class == 12:
en_1_class = ("angel")
en_1_class2 = 12
en1_1_class_w = float(13)
if en_1_class == 13:
en_1_class = ("brawler")
en_1_class2 = 13
en1_1_class_w = float(6)
return[en_1_class,en_1_class2,en1_1_class_w]
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
def get_enemy_stats(e1_atk,e1_def,e1_spd):
e1_atk = random.randint(1, 5)
e1_def = random.randint(1, 4)
e1_spd = float(random.randint(1, 5))
return[e1_atk,e1_def,e1_spd]
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
def Dance(wa):
wa.penup()
for all in range(4):
wa.forward(30)
time.sleep(.1)
wa.backward(30)
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
def Attack_sequence1(true_turns,atk,def_,spd,atk2,def2_,e1_atk,e1_def,e1_spd):
if e1_spd > int(0) and spd > int(0) or e1_spd > int(0) and def_ > e1_atk:
if spd < 1 and def_ > e1_atk:
print("Your defense is too strong to break.")
print(" ")
time.sleep(.25)
print("Opponent Turn.")
print(" ")
time.sleep(.25)
spd = (spd - e1_atk/10)
print("They attacked" " Your speed is now", spd, ".")
print(" ")
time.sleep(.25)
print("You can attack now though")
print(" ")
time.sleep(.25)
move = int(input("You or your partner(1 or 2):"))
print(" ")
time.sleep(.25)
luck2 = random.randint(1,2)
if move == 1:
if luck2 == 2:
print("The next attack is a critical hit!")
print(" ")
time.sleep(.25)
atk = (atk * 2)
e1_spd = (e1_spd - (atk/10))
print("Opponent speed is now", e1_spd, ".")
print(" ")
time.sleep(.25)
if luck2 == 2:
atk = (atk / 2)
if move == 2:
if luck2 == 2:
print("The next attack is a critical hit!")
print(" ")
time.sleep(.25)
atk2 = (atk2 * 2)
e1_spd = (e1_spd - (atk2/10))
print("Opponent speed is now", e1_spd, ".")
print(" ")
time.sleep(.25)
if luck2 == 2:
atk2 = (atk2 / 2)
return[true_turns,atk,def_,spd,atk2,def2_,e1_atk,e1_def,e1_spd]
else:
true_turns = 0
return[true_turns,atk,def_,spd,atk2,def2_,e1_atk,e1_def,e1_spd]
def Player_attack(atk,atk2,atk3,e1_spd):
print(" ")
move = int(input("Who's attacking; you or your partner(1, 2 or 3):"))
print(" ")
time.sleep(.25)
luck2 = random.randint(1,2)
if move == 1:
if luck2 == 2:
print("The next attack is a critical hit!")
print(" ")
time.sleep(.25)
atk = (atk * 2)
e1_spd = (e1_spd - (atk/10))
print("Opponent speed is now", e1_spd, ".")
print(" ")
time.sleep(.25)
if luck2 == 2:
atk = (atk / 2)
if move == 2:
if luck2 == 2:
print("The next attack is a critical hit!")
print(" ")
time.sleep(.25)
atk2 = (atk2 * 2)
e1_spd = (e1_spd - (atk2/10))
print("Opponent speed is now", e1_spd, ".")
print(" ")
time.sleep(.25)
if luck2 == 2:
atk2 = (atk2 / 2)
if move == 3:
if luck2 == 2:
print("The next attack is a critical hit!")
print(" ")
time.sleep(.25)
atk3 = (atk3 * 2)
e1_spd = (e1_spd - (atk3/10))
print("Opponent speed is now", e1_spd, ".")
print(" ")
time.sleep(.25)
if luck2 == 2:
atk3 = (atk3 / 2)
elif move > 3:
time.sleep(.25)
print("You missed–bruh.")
print(" ")
return[atk,atk2,atk3,e1_spd]
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
def Draw_healthbars(position,wa,spd,color):
wa.penup()
wa.goto(position)
wa.pendown()
wa.begin_fill()
for all in range(2):
wa.forward(spd * 30)
wa.right(90)
wa.forward(10)
wa.right(90)
wa.end_fill()
wa.penup()
wa.fillcolor(color)
return(spd)
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
def Draw_healthbars2(position,wa,spd,color):
wa.fillcolor("maroon")
Draw_healthbars(position,wa,spd,color)
return(spd)
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
def attack_who(spd,spd2,def2_,Name2,spd3,def3_,Name3,e1_atk):
attack_who = random.randint(1,2)
if attack_who == 1:
spd = (spd - ( e1_atk/ 10))
print("They attack you.")
print(" ")
time.sleep(.5)
print("Your speed is now",spd,".")
if attack_who == 2:
if spd2 > 0 or def2_ > e1_atk:
spd2 = (spd2 - (e1_atk / 10))
print("They attack",Name2,".")
print(" ")
time.sleep(.5)
print("Their speed is now",spd2,".")
else:
spd = (spd - (e1_atk / 10))
print("They attack you.")
print(" ")
time.sleep(.5)
print("Your speed is now",spd,".")
elif attack_who == 3:
if spd3 > 0 or def3_ > e1_atk:
spd3 = (spd3 - (e1_atk / 10))
print("They attack",Name3,".")
print(" ")
time.sleep(.5)
print(Name3,"' speed is now",spd3,".")
else:
spd = (spd - (e1_atk / 10))
print("They attack you.")
print(" ")
time.sleep(.5)
print("Your speed is now",spd,".")
return[spd,spd2,def2_,Name2,spd3,def3_,Name3,e1_atk]
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
def Boss_weakness(atk,atk2,atk3,en1_1_class_w,BOSS_name,clase,clase2,clase3,Name2,Name3):
if en1_1_class_w == clase:
atk = (atk * 2)
print(BOSS_name,"is a", en_1_class, ".")
print(" ")
time.sleep(.25)
print("You deal double damage to",BOSS_name,"!")
print(" ")
time.sleep(.25)
if en1_1_class_w == clase2:
atk2 = (atk2 * 2)
print(BOSS_name,"is a", en_1_class, ".")
print(" ")
time.sleep(.25)
print(Name2, "deals double damage to",BOSS_name,"!")
print(" ")
time.sleep(.25)
if en1_1_class_w == clase3:
atk3 = (atk3 * 2)
print(BOSS_name,"is a", en_1_class, ".")
print(" ")
time.sleep(.25)
print(Name3, "deals double damage to",BOSS_name,"!")
print(" ")
time.sleep(.25)
else:
print(BOSS_name,"is a", en_1_class, ".")
print(" ")
time.sleep(.25)
return[atk,atk2,atk3,en1_1_class_w,BOSS_name,clase,clase2,clase3,Name2,Name3]
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
def weakness_enemy(en1_1_class_w,clase,clase2,clase3,atk,atk2,atk3,Name2,Name3):
if en1_1_class_w == clase:
atk = (atk * 2)
print("The enemy | |
import os
import re
import glob
import numpy as np
import matplotlib.pylab as plt
import matplotlib
from scipy.spatial import ConvexHull
from scipy.interpolate import interp1d
from itertools import chain, count
from collections import defaultdict
from os import makedirs
from os.path import isdir, isfile, join
from plot_util import *
from plot_other import *
# ------------------------------------------------------------------------------
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
method_labels_map = {
'FH': 'FH',
'FH_Minus': 'FH$^-$',
'NH': 'NH',
'FH_wo_S': 'FH-wo-S',
'FH_Minus_wo_S': 'FH$^{-}$-wo-S',
'NH_wo_S': 'NH-wo-S',
'EH': 'EH',
'Orig_EH': 'EH',
'BH': 'BH',
'Orig_BH': 'BH',
'MH': 'MH',
'Orig_MH': 'MH',
'Random_Scan': 'Random-Scan',
'Sorted_Scan': 'Sorted-Scan',
'Linear': 'Linear-Scan'
}
dataset_labels_map = {
'Yelp': 'Yelp',
'Music': 'Music-100',
'GloVe100': 'GloVe',
'Tiny1M': 'Tiny-1M',
'Msong': 'Msong',
'MovieLens150': 'MovieLens',
'Netflix300': 'Netflix',
'Yahoo300': 'Yahoo',
'Mnist': 'Mnist',
'Sift': 'Sift',
'Gaussian': 'Gaussian',
'Gist': 'Gist',
}
# datasets = ['Yelp', 'GloVe100']
datasets = ['Yelp', 'Music', 'GloVe100', 'Tiny1M', 'Msong']
dataset_labels = [dataset_labels_map[dataset] for dataset in datasets]
method_colors = ['red', 'blue', 'green', 'purple', 'deepskyblue', 'darkorange',
'olive', 'deeppink', 'dodgerblue', 'dimgray']
method_markers = ['o', '^', 's', 'd', '*', 'p', 'x', 'v', 'D', '>']
# ------------------------------------------------------------------------------
def calc_width_and_height(n_datasets, n_rows):
'''
calc the width and height of figure
:params n_datasets: number of dataset (integer)
:params n_rows: number of rows (integer)
:returns: width and height of figure
'''
fig_width = 0.55 + 3.333 * n_datasets
fig_height = 0.80 + 2.5 * n_rows
return fig_width, fig_height
# ------------------------------------------------------------------------------
def get_filename(input_folder, dataset_name, method_name):
'''
get the file prefix 'dataset_method'
:params input_folder: input folder (string)
:params dataset_name: name of dataset (string)
:params method_name: name of method (string)
:returns: file prefix (string)
'''
name = '%s%s_%s.out' % (input_folder, dataset_name, method_name)
return name
# ------------------------------------------------------------------------------
def parse_res(filename, chosen_top_k):
'''
parse result and get info such as ratio, qtime, recall, index_size,
chosen_k, and the setting of different methods
BH: m=2, l=8, b=0.90
Indexing Time: 2.708386 Seconds
Estimated Memory: 347.581116 MB
cand=10000
1 5.948251 2.960960 0.000000 0.000000 0.844941
5 4.475743 2.954690 0.400000 0.000200 0.845279
10 3.891794 2.953910 0.900000 0.000899 0.845703
20 3.289422 2.963460 0.950000 0.001896 0.846547
50 2.642880 2.985980 0.900000 0.004478 0.849082
100 2.244649 3.012860 0.800000 0.007922 0.853307
cand=50000
1 3.905541 14.901140 6.000000 0.000120 4.222926
5 2.863510 14.905370 4.800000 0.000480 4.223249
10 2.626913 14.910181 5.300000 0.001061 4.223649
20 2.392440 14.913270 4.850000 0.001941 4.224458
50 2.081206 14.931760 4.560000 0.004558 4.227065
100 1.852284 14.964050 4.500000 0.008987 4.231267
'''
setting_pattern = re.compile(r'\S+\s+.*=.*')
setting_m = re.compile(r'.*(m)=(\d+).*')
setting_l = re.compile(r'.*(l)=(\d+).*')
setting_M = re.compile(r'.*(M)=(\d+).*')
setting_s = re.compile(r'.*(s)=(\d+).*')
setting_b = re.compile(r'.*(b)=(\d+\.\d+).*')
param_settings = [setting_m, setting_l, setting_M, setting_s, setting_b]
index_time_pattern = re.compile(r'Indexing Time: (\d+\.\d+).*')
memory_usage_pattern = re.compile(r'Estimated Memory: (\d+\.\d+).*')
candidate_pattern = re.compile(r'.*cand=(\d+).*')
records_pattern = re.compile(r'(\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)')
params = {}
with open(filename, 'r') as f:
for line in f:
res = setting_pattern.match(line)
if res:
for param_setting in param_settings:
tmp_res = param_setting.match(line)
if tmp_res is not None:
# print(tmp_res.groups())
params[tmp_res.group(1)] = tmp_res.group(2)
# print("setting=", line)
res = index_time_pattern.match(line)
if res:
chosen_k = float(res.group(1))
# print('chosen_k=', chosen_k)
res = memory_usage_pattern.match(line)
if res:
memory_usage = float(res.group(1))
# print('memory_usage=', memory_usage)
res = candidate_pattern.match(line)
if res:
cand = int(res.group(1))
# print('cand=', cand)
res = records_pattern.match(line)
if res:
top_k = int(res.group(1))
ratio = float(res.group(2))
qtime = float(res.group(3))
recall = float(res.group(4))
precision = float(res.group(5))
fraction = float(res.group(6))
# print(top_k, ratio, qtime, recall, precision, fraction)
if top_k == chosen_top_k:
yield ((cand, params), (top_k, chosen_k, memory_usage,
ratio, qtime, recall, precision, fraction))
# ------------------------------------------------------------------------------
def getindexingtime(res):
return res[1]
def getindexsize(res):
return res[2]
def getratio(res):
return res[3]
def gettime(res):
return res[4]
def getrecall(res):
return res[5]
def getprecision(res):
return res[6]
def getfraction(res):
return res[7]
def get_cand(res):
return int(res[0][0])
def get_l(res):
return int(res[0][1]['l'])
def get_m(res):
return int(res[0][1]['m'])
def get_s(res):
return int(res[0][1]['s'])
def get_time(res):
return float(res[1][4])
def get_recall(res):
return float(res[1][5])
def get_precision(res):
return float(res[1][6])
def get_fraction(res):
return float(res[1][7])
# ------------------------------------------------------------------------------
def lower_bound_curve(xys):
'''
get the time-recall curve by convex hull and interpolation
:params xys: 2-dim array (np.array)
:returns: time-recall curve with interpolation
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
# hull_vs = np.array(sorted(hull_vs, key=lambda x:x[1]))
# print("hull_vs: ", hull_vs)
# find max pair (maxv0) and min pairs (v1s) from the convex hull
v1s = []
maxv0 = [-1, -1]
for v0, v1 in zip(hull_vs, chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1)
if v0[1] > v1[1] and v0[0] > v1[0]:
v1s = np.append(v1s, v1, axis=-1)
if v0[1] > maxv0[1]:
maxv0 = v0
# print(v1s, maxv0)
# interpolation: vs[:, 1] -> recall (x), vs[:, 0] -> time (y)
vs = np.array(np.append(maxv0, v1s)).reshape(-1, 2) # 2-dim array
f = interp1d(vs[:, 1], vs[:, 0])
minx = np.min(vs[:, 1]) + 1e-6
maxx = np.max(vs[:, 1]) - 1e-6
x = np.arange(minx, maxx, 1.0) # the interval of interpolation: 1.0
y = list(map(f, x)) # get time (y) by interpolation
return x, y
# ------------------------------------------------------------------------------
def upper_bound_curve(xys, interval, is_sorted):
'''
get the time-ratio and precision-recall curves by convex hull and interpolation
:params xys: 2-dim array (np.array)
:params interval: the interval of interpolation (float)
:params is_sorted: sort the convex hull or not (boolean)
:returns: curve with interpolation
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
xs = xys[:, 0]
if len(xs) > 2 and xs[-1] > 0:
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
if is_sorted:
hull_vs = np.array(sorted(hull_vs, key=lambda x:x[1]))
print("hull_vs: ", hull_vs)
# find max pair (maxv0) and min pairs (v1s) from the convex hull
v1s = []
maxv0 = [-1, -1]
for v0, v1 in zip(hull_vs, chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1)
if v0[1] > v1[1] and v0[0] < v1[0]:
v1s = np.append(v1s, v1, axis=-1)
if v0[1] > maxv0[1]:
maxv0 = v0
print(v1s, maxv0)
# interpolation: vs[:, 1] -> recall (x), vs[:, 0] -> time (y)
vs = np.array(np.append(maxv0, v1s)).reshape(-1, 2) # 2-dim array
if len(vs) >= 2:
f = interp1d(vs[:, 1], vs[:, 0])
minx = np.min(vs[:, 1]) + 1e-6
maxx = np.max(vs[:, 1]) - 1e-6
x = np.arange(minx, maxx, interval)
y = list(map(f, x)) # get time (y) by interpolation
return x, y
else:
return xys[:, 0], xys[:, 1]
else:
return xys[:, 0], xys[:, 1]
# ------------------------------------------------------------------------------
def lower_bound_curve2(xys):
'''
get the querytime-indexsize and querytime-indextime curve by convex hull
:params xys: 2-dim array (np.array)
:returns: querytime-indexsize and querytime-indextime curve
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
xs = xys[:, 0]
if len(xs) > 2 and xs[-1] > 0:
# conduct convex hull to find the curve
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
# print("hull_vs: ", hull_vs)
ret_vs = []
for v0, v1, v2 in zip(chain(hull_vs[-1:], hull_vs[:-1]), hull_vs, \
chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1, v2)
if v0[0] < v1[0] or v1[0] < v2[0]:
ret_vs = np.append(ret_vs, v1, axis=-1)
# sort the results in ascending order of x without interpolation
ret_vs = ret_vs.reshape((-1, 2))
ret_vs = np.array(sorted(ret_vs, key=lambda x:x[0]))
return ret_vs[:, 0], ret_vs[:, 1]
else:
return xys[:, 0], xys[:, 1]
# ------------------------------------------------------------------------------
def plot_time_fraction_recall(chosen_top_k, methods, input_folder, output_folder):
'''
draw the querytime-recall curves and fraction-recall curves for all methods
on all datasets
:params chosen_top_k: top_k value for drawing figure (integer)
:params methods: a list of method (list)
:params input_folder: input folder (string)
:params output_folder: output folder (string)
:returns: None
'''
n_datasets = len(datasets)
fig_width, fig_height = calc_width_and_height(n_datasets, 2)
plt_helper = PlotHelper(plt, fig_width, fig_height)
plt_helper.plot_subplots_adjust() # define a window for a figure
method_labels = [method_labels_map[method] for method in methods]
for di, (dataset, dataset_label) in enumerate(zip(datasets, dataset_labels)):
# set up two sub-figures
ax_recall = plt.subplot(2, n_datasets, di+1)
plt.title(dataset_label) # title
plt.xlabel('Recall (%)') # label of x-axis
plt.xlim(0, 100) # limit (or range) of x-axis
ax_fraction = plt.subplot(2, n_datasets, n_datasets+di+1)
plt.xlabel('Recall (%)') # label of x-axis
plt.xlim(0, 100) # limit (or range) of x-axis
if di == 0:
ax_recall.set_ylabel('Query Time (ms)')
ax_fraction.set_ylabel('Fraction (%)')
min_t_y = 1e9; max_t_y = -1e9
min_f_y = 1e9; max_f_y = -1e9
for method_idx, method, method_label, method_color, method_marker in \
zip(count(), methods, method_labels, method_colors, method_markers):
# get file name for this method on this dataset
filename = get_filename(input_folder, dataset, method)
if filename is None: continue
print(filename)
# get time-recall and fraction-recall results from disk
time_recalls = []
fraction_recalls = []
for _,res in parse_res(filename, chosen_top_k):
time_recalls += [[gettime(res), getrecall(res)]]
fraction_recalls += [[getfraction(res), getrecall(res)]]
time_recalls = np.array(time_recalls)
fraction_recalls = np.array(fraction_recalls)
# print(time_recalls, fraction_recalls)
# get the time-recall curve by convex hull and interpolation
lower_recalls, lower_times = lower_bound_curve(time_recalls)
min_t_y = min(min_t_y, np.min(lower_times))
max_t_y = max(max_t_y, np.max(lower_times))
ax_recall.semilogy(lower_recalls, lower_times, '-',
color=method_color, marker=method_marker,
label=method_label if di==0 else "", markevery=10,
markerfacecolor='none', markersize=10)
| |
<gh_stars>1-10
#!/usr/bin/env python
"""
Spherical Brain Mapping of 3D Brain Images.
3D brain imaging, such as MRI or PET produces a huge amount of data that is
currently analysed using uni or multivariate approaches.
The main aim of SBM is to provide a new framework that allows the mapping
of a 3D brain image to a two-dimensional space by means of some statistical
measures. The system is based on a conversion from 3D spherical to 2D rectangular
coordinates. For each spherical coordinate pair (theta,phi), a vector
containing all voxels in the radius is selected, and a number of values are
computed, including statistical values (average, entropy, kurtosis) and
morphological values (tissue thickness, distance to the central point, number of
non-zero blocks). These values conform a two-dimensional image that can be
computationally or even visually analysed.
The simplest approach is to use whichever measure that we want, and then apply
SBM to a brain image object, for example, imported using nibabel:
import nibabel as nib
img = nib.load('MRIimage.nii')
We create an sbm object:
import mapBrain
sbm = mapBrain.SphericalBrainMapping()
And then, perform the SBM using 'average':
map = sbm.doSBM(img.get_data(), measure='average', show=True)
<NAME>, Spring 2015
REFs:
[1] - <NAME>-Murcia et al. Projecting MRI Brain images for the
detection of Alzheimer's Disease. Innovation in Medicine and
Healthcare 207:225 2014.
[2] - <NAME>-Murcia et al. A Spherical Brain Mapping of MR Images
for the Detection of Alzheimer's Disease. Journal
of Current Alzheimer's Research. 13(5):575-88. 2016.
"""
import numpy
from scipy.stats import kurtosis, skew
class SphericalBrainMapping(object):
"""
Performs a Spherical Brain Mapping of a 3D Brain Image
"""
def __init__(self, resolution=1, deformation=0.0, ithreshold=0, nlayers=1):
"""
Initializes a SBM instance, saving all parameters as attributes of the
instance.
resolution: Angle resolution at which each mapping vector is
computed (default 1 degree)
deformation: Rate of unequally distributed mapping vectors, to be used
when the surface to be mapped is not spherical but ellipsoid (a float
between 0-1, default 0 -> no deformation).
ithreshold: Intensity threshold for the projections needing it (default 0)
nlayers: Nummber of equally distributed layers (default 1)
"""
self.resolution = resolution
self.deformation = deformation
self.ithreshold = ithreshold
self.nlayers = nlayers
def vsetResolution(self, resolution=1):
""" vsets the angular resolution at which the map will be computed
:param resolution: Angular resolution at which each mapping vector
will be computed (default 1).
"""
self.resolution = resolution
def vsetDeformation(self, deformation=0.0):
""" vsets the deformation rate to be used in SBM.
:param deformation: Deformation rate (float 0-1)
"""
self.deformation = deformation
def vsetIThreshold(self, ithreshold=0):
""" vsets the intensity threshold to be used in SBM.
:param ithreshold: Intensity Threshold
"""
self.ithreshold = ithreshold
def vsetNLayers(self, nlayers=1):
""" vsets the number of layers to be mapped
:param nlayers: Nummber of equally distributed layers (default 1)
"""
self.nlayers = nlayers
def getResolution(self):
""" Returns the resolution used in SBM. """
return self.resolution
def getDeformation(self):
""" Returns the current deformation rate used in SBM """
return self.deformation
def getIThreshold(self):
""" Returns the Intensity Threshold used in SBM """
return self.ithreshold
def getNLayers(self):
""" Returns the number of layers used in SBM """
return self.nlayers
def computeMappingVectors(self):
""" Computes the mapping vectors azim and elev
"""
spaceVector = 1 - self.deformation*numpy.cos(numpy.deg2rad(numpy.arange(-2*180,2*180+self.resolution,self.resolution*2)))
azim = numpy.deg2rad(numpy.cumsum(spaceVector)*self.resolution-270)
elev = numpy.deg2rad(numpy.arange(-90, 90+self.resolution, self.resolution))
return azim, elev
def surface(self, vset):
""" Returns the surface of all mapped voxels
:param vset: set of mapped voxels' intensity
"""
val = numpy.argwhere(vset>self.ithreshold)
if len(val)==0:
val=numpy.zeros(1)
return numpy.nanmax(val)
def thickness(self, vset):
""" Returns the thickness of the layer of mapped voxels
:param vset: set of mapped voxels' intensity
"""
aux = numpy.argwhere(vset>self.ithreshold)
if aux.size>0:
thickness = numpy.nanmax(aux) - numpy.nanmin(aux)
else:
thickness = 0
return thickness
def numfold(self, vset):
""" Returns the number of non-connected subvsets in the mapped voxels
:param vset: set of mapped voxels' intensity
"""
return numpy.ceil(len(numpy.argwhere(numpy.bitwise_xor(vset[:-1]>self.ithreshold, vset[1:]>self.ithreshold)))/2.)
def average(self, vset):
""" Returns the average of the sampling vset
:param vset: set of mapped voxels' intensity
"""
return numpy.nanmean(vset)
def variance(self, vset):
""" Returns the variance of the sampling vset
:param vset: set of mapped voxels' intensity
"""
return numpy.nanvar(vset)
def skewness(self, vset):
""" Returns the skewness of the sampling vset
:param vset: set of mapped voxels' intensity
"""
return skew(vset, bias=False)
def entropy(self, vset):
""" Returns the entropy of the sampling vset
:param vset: set of mapped voxels' intensity
"""
return sum(numpy.multiply(vset[vset>self.ithreshold],numpy.log(vset[vset>self.ithreshold])))
def kurtosis(self, vset):
""" Returns the kurtosis of the sampling vset
:param vset: set of mapped voxels' intensity
"""
return kurtosis(vset, fisher=False, bias=False)
def _interp_single_gray_level(self, p, imag):
''' Interpolates the gray level found at the point p
using interpolation by percentage of superposition of
pixels.
'''
# We create the array of surrounding points:
a = numpy.array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]])
points = (numpy.floor(p)+a).astype(int)
# Extract the colours at each point:
c = imag[points[:,0], points[:,1], points[:,2]]
# Calculate the weights as distances:
w = numpy.prod(1-numpy.abs(p-points),axis=1)
# And sum the different values:
ci = numpy.sum(c*w)
return ci
def _interp_gray_level(self, p, imag):
''' Interpolates the gray level found at the point array p
by using superposition interpolation.
'''
if p.ndim==3:
ci = numpy.zeros(p.shape[:2])
for i in range(p.shape[0]):
ci[i,:] = numpy.array([self._interp_single_gray_level(punto,imag) for punto in p[i,:,:]])
elif p.ndim==2:
ci = numpy.array([self._interp_single_gray_level(punto,imag) for punto in p])
return ci
def _get_points_centering(self, center, n):
'''
Gets the array of centerpoints in a given
direction (n), in this order:
-------------
| 1 | 2 | 3 |
|------------
| 8 | 0 | 4 |
|-----------|
| 7 | 6 | 5 |
-------------
'''
u11 = numpy.sqrt(n[1]**2/(n[0]**2+n[1]**2))
u12 = -numpy.sqrt(n[1]**2/(n[0]**2+n[1]**2))
u21 = numpy.sqrt(1-u11**2)
u22 = numpy.sqrt(1-u12**2)
u1 = numpy.array([u11, u21, 0])
u2 = numpy.array([u12, u22, 0])
if numpy.dot(u1,n)<1e-10:
u = u1
else:
u = u2
v = numpy.cross(n,u)
p0 = center
p1 = center - u + v
p2 = center + v
p3 = center + u + v
p4 = center + u
p5 = center + u - v
p6 = center - v
p7 = center - u - v
p8 = center - u
return numpy.array([p0, p1, p2, p3, p4, p5, p6, p7, p8])
def _posterizeImage(self, ndarray, numLevels = 16 ):
'''
Posterizes the image to number of levels
'''
#Gray-level resizing
numLevels = numLevels-1 #don't touch. Logical adding issue.
minImage = numpy.nanmin(ndarray)
ndarray = ndarray-(minImage)
maxImage = numpy.nanmax(ndarray)
tempShift = maxImage/numLevels
ndarray = numpy.floor(ndarray/tempShift)
ndarray=ndarray + 1
numLevels = numLevels + 1 # don't touch. Logical adding issue.
return ndarray
def computeTexture(self, p, imag, center, distances=1):
'''
Computes the texture around vector p
'''
from mahotas.features import texture
origins = self._get_points_centering(center, p[1,:])
puntos = numpy.array([p+cent for cent in origins])
select = (puntos<numpy.array(imag.shape)-1).all(axis=2).all(axis=0)
p_real = puntos[:,select,:]
colors = self._interp_gray_level(p_real, imag)
ndarray = self._posterizeImage(colors)
# Prevent errors with iterative list:
if (type(distances) is not list) and (type(distances) is not numpy.ndarray):
distances = [distances]
glcm=[]
for dis in distances:
glcm.append(texture.cooccurence(ndarray.astype(int)-1, 0, distance=dis, symmetric=False))
features = texture.haralick_features(glcm)
labels = texture.haralick_labels
return features, labels
def showMap(self, map, measure, cmap='gray'):
""" Shows the computed maps in a window using pyplot
:param map: map or array of maps to be shown
"""
import matplotlib.pyplot as plt
minimum = numpy.min(map)
maximum = numpy.max(map)
if self.nlayers>1:
imgplot = plt.figure()
ncol = numpy.floor(self.nlayers/numpy.ceil(self.nlayers**(1.0/3)))
nrow = numpy.ceil(self.nlayers/ncol)
for nl in range(self.nlayers):
plt.subplot(nrow,ncol,nl+1)
plt.imshow(numpy.rot90(map[nl]),cmap=cmap, vmin=minimum, vmax=maximum)
plt.title(measure+'-SBM ('+str(nl)+')')
plt.colorbar()
plt.show()
elif measure=='texture':
imgplot = plt.figure()
ncol = numpy.floor(13/numpy.ceil(13**(1.0/3)))
nrow = numpy.ceil(13/ncol)
for nl in range(13):
plt.subplot(nrow,ncol,nl+1)
plt.imshow(numpy.rot90(map[nl,:,:]),cmap=cmap, vmin=minimum, vmax=maximum)
plt.title(measure+'-SBM ('+str(nl)+')')
plt.colorbar()
plt.show()
else:
imgplot | |
<reponame>okazaki2007/Proyecto-final_Jeff-Juan-david
"""
**********************************************************
* *
* Instituto Tecnológico de Costa Rica *
* Área Académica de Ing. en Computadores *
* *
* Desarrollo de juego simple con Python *
* *
* Estudiantes: *
* <NAME>-2019044684 *
* <NAME> - 2021012480 *
* *
* *
* *
* II Proyecto Taller de Programacion *
* *
**********************************************************
"""
'''Modules imports'''
from tkinter import *
from tkinter import messagebox
import pygame
import random
import time
from ordenamiento_puntaje import *
pygame.init()
#====================================================Ordenamiento de Puntajes=============================================================#
names_list=[]
Scores_list=[]
def ordenar_puntajes():
global names_list, Scores_list
names_list = []
Scores_list = []
file= open("Scores.txt","r")
for line in file.readlines():
name =""
score = ""
split = False
for char in line:
if char == "$":
split = True
elif split:
score+=char
else:
name+=char
names_list.append(name)
Scores_list.append(int(score))
quick_sort_desc(Scores_list,names_list,0,len(Scores_list)-1)
#=======================================================Clase para el jugador=======================================================================================#
class Nave():
def __init__(self, x, y): #Constructor
#Puntos de vida
self.vidas = 3
self.hp = 20
self.immune = False #inmunidad despues de chocar contra un meteorito
self.x = x #Posicion en eje horizontal
self.y = y #Posicion en eje vertical
self. speedy = 0 #Velocidad en eje vertical
self. speedx = 0 #Velocidad en eje horizontal
self.score = 0 #puntaje
self.scorespeed = 1#cuantos puntos gana por segundo
self.rect = None #Inicialmente se asigna None a esta variable, mas adelante se le asignara un pygame.Rect
self.master = None #pantalla
img = pygame.image.load("player.png")
self.img = pygame.transform.scale(img,(50,50))
self.color = (100,100,100)
#=============================================crea al jugador en la pantalla por primera vez=========================================================================#
def create_self(self):
self.rect = pygame.Rect(self.x,self.y,50,50)
#pygame.draw.rect(self.master,self.color, self.rect)
self.master.blit(self.img,(self.rect.left,self.rect.top))
#move revisa la posicion del jugador relativo a la pantalla, y lo mueve segun su velocidad
#E: Ancho y altura de la pantalla
#S: Movimiento del jugador
#R: Ints //no se revisa
def move(self,width,height):
borderx= self.rect.left +self.speedx #borde del jugador en el eje horizontal
bordery = self.rect.top +self.speedy #borde del jugador en el eje vertical
if (-10>=borderx or borderx>=width-40) or (-10>=bordery or bordery>=height-50): #Si el fuera a salir de la pantalla
self.speedy = 0
self.speedx = 0
#Ambos componentes de la velocidad se devuelven a 0, pues el jugador no deberia poder salir de la pantalla
else:#Si el jugador esta dentro de la pantalla
self.rect.move_ip(self.speedx,self.speedy)#mover jugador
#pygame.draw.rect(self.master,self.color, self.rect) #dibujar jugador en la pantalla
self.master.blit(self.img,(self.rect.left,self.rect.top))
#========================================================Clase para los proyectiles que esquiva el jugador========================================================#
class Meteoro():
def __init__(self,x,master,width,soundon): #Constructor
self.soundon = soundon
self.x = x #posicion en eje x
self.speedx = random.randint(-15,15) #velocidad aleatoria a la izquiera o la derecha
a=30 +random.randint(0,10) #tamaño del meteorito (minimo 30, maximo 40)
self.y=-40 #se posiciona inicialmente arriba de la pantalla, para que parezca que salen del espacio a
self.rect = pygame.Rect(self.x,self.y,a,a)#rectangulo
self.master = master#pantalla de juego
self.width = width#se refiere al ancho de la pantalla de juego
#pygame.draw.rect(master,(110,0,0),self.rect)#Dibujar rectangulo en la pantalla de juego, que representa el borde del meteorito
self.bounce = 2 #Cantidad de veces que el meteorito puede chocar contra el borde de la pantalla
self.speedy = random.randint(3,8) #Velocidad aleatoria en el eje vertical
self.not_crashed = True #Flag si ha chocado contra el jugadro
img= pygame.image.load("meteor.png")#Imagen del meteoro
self.img = pygame.transform.scale(img,(a,a))
#Sonido del meteoro
self.sonido = pygame.mixer.Sound("meteor.wav")
#================Class Meteoro Methods================================================================================================================================#
def move(self):#mover el meteorito
if self.not_crashed:#Solo se mueve si no ha chocado
x_Check = (self.rect.left < 10 or self.rect.left > self.width - 10)
if x_Check and self.bounce>=0 : #Puede rebotar con el borde de la pantalla un total de 2 veces
if self.soundon: #Sonar el sonido
self.sonido.play()
self.bounce-=1
self.speedx = -1*self.speedx
#El meteorito se deja de dibujar en la pantalla una vez ha chocado
self.rect.move_ip(self.speedx,self.speedy)
self.master.blit(self.img,(self.rect.left,self.rect.top))
if self.rect.top > 530 or ( x_Check and self.bounce < 0):#Revisa si el meteorito sale de la pantalla
self.not_crashed = False#Se dice que choca al salir de la pantalla para dejar de dibujarlo
#Colores en (red, green, blue)
white = (255,255,255)
black =(0,0,0)
#Posibles canciones para nivel, dependiendo de cual sea el actual
Music_list = ["Level1.wav","Level2.wav","Level3.wav"]
#==============================================Clase principal del juego==============================================================================================#
class Juego():
def __init__(self,width,height,level,player_name,window,soundon):#Constructor
pygame.init()
self.soundon = soundon #Si el sonido esta encendido o no
self.playername = player_name #Nombre del jugador
self.width = width #Ancho de la pantalla
self.height = height #Alto de la pantalla
self.playing = True #Flag si el jugador aun sigue en el juego
self.Screen = None #Mas adelante se convierte en un pygame.display
self.window = window #Ventana principal (Tkinter)
self.level=level #Nivel acutal
self.title = f"Game: Nivel {self.level}" #Titulo de la pantalla, muestra el nivel del juego
self.FPS = 30 #30fps
self.clock = pygame.time.Clock() #reloj de pygame
self.player = Nave(self.width/2-15,self.height-60) #Jugador
self.Meteor_list=[] #Lista de meteoros, inicialmente vacia
self.font = pygame.font.SysFont("TimesNewRoman",16) #Letra para mostrar las cosas
self.current_time = 0 #Tiempo actual de juego
bg_img = pygame.image.load("bg.png")#Fondo de pantalla
self.bg = pygame.transform.scale(bg_img,(self.width,self.height))
self.current_frame = 0 #Frame actual (El juego corre a 30fps)
#=======================================Class Juego Methods===================================#
def Crear_meteoro(self): #Crear_meteoro crea un meteoro en la pantalla de juego en una posicion horizontal aleatoria
if self.current_frame<(30+7.5*(self.level-1)):
self.current_frame +=1
else:
self.current_frame=0
for j in range(0,self.level+2):
self.Meteor_list.append(Meteoro(random.randint(100,self.width-100),self.Screen,self.width,self.soundon)) #Cada meteorito se añade a la lista de meteoros del juego
def Show_Score(self):#Show_Score muestra el puntaje actual del jugador en la pantalla del juego
self.score = self.font.render(f"Puntaje: {self.player.score}",True,black)
self.Screen.blit(self.score,(10,self.height-20))
def Show_HP(self): #Show_HP muestra tanto las vidas del jugador como su hp en la pantalla de juego
self.player_HP =self.font.render(f"HP: {self.player.hp}",True,black)
self.player_vidas =self.font.render(f"Vidas: {self.player.vidas}",True,black)
self.Screen.blit(self.player_HP,(100,self.height-20))
self.Screen.blit(self.player_vidas,(170,self.height-20))
def Show_player_name(self): #Show_player_name muestra el nombre del jugador en la pantalla del juego
self.player_name = self.font.render(f"Jugador: {self.playername}",True,black)
self.Screen.blit(self.player_name,(340,self.height-20))
def Show_Time(self): #Show_Time muestra el tiempo en segundos que ha pasado desde que comenzo el juego
#Ademas, actualiza el puntaje del jugador cada segundo
if time.time()- self.timer > self.current_time+1:
self.current_time+=1
self.player.score+=self.player.scorespeed
self.current_level_score+=self.player.scorespeed
self.Time = self.font.render(f"Tiempo: {self.current_time}s",True,black)
self.Screen.blit(self.Time,(250,self.height-20))
def Save_Score(self):#Save score salva el nombre del jugador y su puntaje separados por un $, ademas informa al jugador
#si esta entre los 10 mejores puntajes
ordenar_puntajes()
for i in range(0,11 if len(Scores_list)>10 else len(Scores_list)):
if self.player.score > Scores_list[i]:
messagebox.showinfo(title= f"Felicidades {self.playername}!", message = f"Has obtenido la posicion {i+1} en los mejores puntajes!")
break
file = open("Scores.txt","a")
file.write(f"\n{self.playername}${self.player.score}")
def Reset_level(self): #Reset_level resetea el valor de algunas variables de juego, y lo pausa por 2 segundos
if self.soundon:
self.music.stop()
self.Meteor_list=[]
self.player.hp = 20
self.current_time = 0
time.sleep(2)
def End_Game(self): #End_Game termina el ciclo principal de juego y salva el puntaje del jugador
self.playing = False
if self.soundon:
self.music.stop()
pygame.mixer.unpause()
self.Save_Score()
self.window.deiconify()
def Win(self):# Win se encarga de las acciones a tomar al terminar cada nivel
if self.level >2: #Si el jugador esta en el tercer nivel, el juego acaba
self.Message = self.font.render("Has vencido el juego!",True,white)
self.Screen.blit(self.Message,(200,100))
self.End_Game()
else: #En cualquier otro caso, se le informa al jugador que ha pasado de nivel
self.Message = self.font.render(f"Has vencido el nivel {self.level}!",True,white)
self.Screen.blit(self.Message,(200,100))
pygame.display.update()
self.level+=1
self.title = f"Game: Nivel {self.level}"
self.Reset_level()
def play_music(self):
if self.soundon:
pygame.mixer.pause()
self.music = pygame.mixer.Sound(Music_list[self.level-1])
self.music.play(-1)
def Start(self): #Start comienza el juego y contiene el ciclo principal del juego
self.Screen = pygame.display.set_mode((self.width,self.height)) #Crea pantalla de pygame
pygame.display.set_caption(self.title) #Titulo de la pantalla
self.Screen.fill(black) #Fondo de la pantalla
self.player.master = self.Screen
self.player.create_self()
self.play_music()
self.player.scorespeed = 1 + 2*(self.level-1)*(self.level>1)
self.timer = time.time()
self.current_level_score = 0
while self.playing: #Si el jugador no ha dejado de jugar
#https://www.geeksforgeeks.org/how-to-create-buttons-in-a-game-using-pygame/ para la creacion del boton de regreso al menu
mouse = pygame.mouse.get_pos()
self.clock.tick(self.FPS)#para que el juego vaya a 30fps
self.Screen.fill(black) #Se redibuja el fondo de pantalla cada frame
self.Screen.blit(self.bg,(0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT: #Salir del juego si el jugador quiere, y volver a la pantalla principal
self.End_Game()
break
if event.type == pygame.KEYDOWN:#Cada tecla que se presiona
if event.key == pygame.K_RIGHT:#moverse a la derecha con la flecha derecha
self.player.speedx=10
if event.key == pygame.K_LEFT : #moverse a la izquierda con la flecha izquierda
self.player.speedx=-10
if event.key == pygame.K_DOWN : #moverse hacia abajo con la flecha de abajo
self.player.speedy=10
if event.key == pygame.K_UP: #moverse hacia arriba con la felcha de arriba
self.player.speedy=-10
if event.type == pygame.KEYUP: #Si se suelta una tecla
if event.key in | |
from collections import defaultdict, namedtuple
from dataclasses import dataclass
import distutils.util
import functools
import itertools
import json
import math
import operator
import os
import random
import uuid
import shutil
import logging
import time
from typing import List, Dict, NamedTuple, Optional
from django.db.models import Q
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, get_list_or_404
from django.conf import settings
from google.cloud import storage
from rest_framework.decorators import api_view
import requests
from expiringdict import ExpiringDict
from .models import (
Dataset,
DatasetItem,
Category,
Mode,
User,
Annotation,
DNNModel,
CategoryCount,
)
BUILTIN_MODES = ["POSITIVE", "NEGATIVE", "HARD_NEGATIVE", "UNSURE"]
logger = logging.getLogger(__name__)
@api_view(["POST"])
@csrf_exempt
def start_cluster(request):
# TODO(mihirg): Remove this setting from Django; it's now managed by Terraform
# (or figure out how to set it from the frontend if we need that)
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_cluster",
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"cluster_id": response_data["cluster_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_cluster_status(request, cluster_id):
params = {"cluster_id": cluster_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/cluster_status", params=params
)
response_data = r.json()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_cluster(request, cluster_id):
params = {"cluster_id": cluster_id}
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_cluster",
json=params,
)
return JsonResponse(
{
"status": "success",
}
)
@api_view(["POST"])
@csrf_exempt
def create_model(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_name = payload["model_name"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
pos_tags = parse_tag_set_from_query_v2(payload["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(payload["neg_tags"])
val_pos_tags = parse_tag_set_from_query_v2(payload["val_pos_tags"])
val_neg_tags = parse_tag_set_from_query_v2(payload["val_neg_tags"])
augment_negs = bool(payload["augment_negs"])
model_kwargs = payload["model_kwargs"]
resume_model_id = payload.get("resume", None)
dataset = get_object_or_404(Dataset, name=dataset_name)
eligible_images = DatasetItem.objects.filter(dataset=dataset, is_val=False)
categories = Category.objects.filter(
tag_sets_to_query(pos_tags, neg_tags, val_pos_tags, val_neg_tags)
)
annotations = Annotation.objects.filter(
dataset_item__in=eligible_images,
category__in=categories,
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
val_pos_dataset_item_pks = []
val_neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags for t in tags):
neg_dataset_item_pks.append(pk)
elif any(t in val_pos_tags for t in tags):
val_pos_dataset_item_pks.append(pk)
elif any(t in val_neg_tags for t in tags):
val_neg_dataset_item_pks.append(pk)
# Augment with randomly sampled negatives if requested
num_extra_negs = settings.BGSPLIT_NUM_NEGS_MULTIPLIER * len(
pos_dataset_item_pks
) - len(neg_dataset_item_pks)
if augment_negs and num_extra_negs > 0:
# Uses "include" and "exclude" category sets from request
all_eligible_pks = filtered_images_v2(
request,
dataset,
exclude_pks=(
pos_dataset_item_pks
+ neg_dataset_item_pks
+ val_pos_dataset_item_pks
+ val_neg_dataset_item_pks
),
)
sampled_pks = random.sample(
all_eligible_pks, min(len(all_eligible_pks), num_extra_negs)
)
neg_dataset_item_pks.extend(sampled_pks)
pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
if resume_model_id:
resume_model = get_object_or_404(DNNModel, model_id=resume_model_id)
resume_model_path = resume_model.checkpoint_path
else:
resume_model = None
resume_model_path = None
params = {
"pos_identifiers": pos_dataset_item_internal_identifiers,
"neg_identifiers": neg_dataset_item_internal_identifiers,
"val_pos_identifiers": val_pos_dataset_item_internal_identifiers,
"val_neg_identifiers": val_neg_dataset_item_internal_identifiers,
"augment_negs": augment_negs,
"model_kwargs": model_kwargs,
"model_name": model_name,
"bucket": bucket_name,
"cluster_id": cluster_id,
"index_id": index_id,
"resume_from": resume_model_path,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_job",
json=params,
)
response_data = r.json()
if r.status_code != 200:
return JsonResponse(
{"status": "failure", "reason": response_data.get("reason", "")},
status=r.status_code,
)
m = DNNModel(
dataset=dataset,
name=model_name,
model_id=response_data["model_id"],
category_spec={
"augment_negs": augment_negs,
"pos_tags": payload["pos_tags"],
"neg_tags": payload["neg_tags"],
"augment_negs_include": payload.get("include", []) if augment_negs else [],
"augment_negs_exclude": payload.get("exclude", []) if augment_negs else [],
},
)
model_epoch = -1 + model_kwargs.get("epochs_to_run", 1)
if resume_model_id:
m.resume_model_id = resume_model_id
if model_kwargs.get("resume_training", False):
model_epoch += resume_model.epoch + 1
m.epoch = model_epoch
m.save()
return JsonResponse(
{
"status": "success",
"model_id": response_data["model_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_status(request, model_id):
params = {"model_id": model_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_job_status", params=params
)
response_data = r.json()
if response_data["has_model"]:
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.checkpoint_path = response_data["checkpoint_path"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def update_model_v2(request):
payload = json.loads(request.body)
# user = payload["user"]
old_model_name = payload["old_model_name"]
new_model_name = payload["new_model_name"]
models = get_list_or_404(DNNModel, name=old_model_name)
for m in models:
m.name = new_model_name
m.save()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def delete_model_v2(request):
payload = json.loads(request.body)
model_name = payload["model_name"]
# cluster_id = payload['cluster_id']
models = get_list_or_404(DNNModel, name=model_name)
for m in models:
# TODO(fpoms): delete model data stored on NFS?
# shutil.rmtree(os.path.join(m.checkpoint_path, '..'))
shutil.rmtree(m.output_directory, ignore_errors=True)
m.delete()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def run_model_inference(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_id = payload["model_id"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
dataset = get_object_or_404(Dataset, name=dataset_name)
model_checkpoint_path = get_object_or_404(
DNNModel, model_id=model_id
).checkpoint_path
if model_checkpoint_path is None or len(model_checkpoint_path) == 0:
return JsonResponse(
{
"status": "failure",
"reason": f"Model {model_id} does not have a model checkpoint.",
},
status=400,
)
params = {
"bucket": bucket_name,
"model_id": model_id,
"checkpoint_path": model_checkpoint_path,
"cluster_id": cluster_id,
"index_id": index_id,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_inference_job",
json=params,
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"job_id": response_data["job_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_inference_status(request, job_id):
params = {"job_id": job_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_inference_job_status",
params=params,
)
response_data = r.json()
if response_data["has_output"]:
model_id = response_data["model_id"]
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.output_directory = response_data["output_dir"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_model_inference(request, job_id):
params = {"job_id": job_id}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_bgsplit_inference_job", json=params
)
response_data = r.json()
return JsonResponse(response_data, status=r.status_code)
#
# V2 ENDPOINTS
# TODO(mihirg): Make these faster
#
Tag = namedtuple("Tag", "category value") # type: NamedTuple[str, str]
Box = namedtuple(
"Box", "category value x1 y1 x2 y2"
) # type: NamedTuple[str, str, float, float, float, float]
PkType = int
@dataclass
class ResultSet:
type: str
ranking: List[PkType]
distances: List[float]
model: Optional[str]
# TODO(fpoms): this needs to be wrapped in a lock so that
# updates are atomic across concurrent requests
current_result_sets = ExpiringDict(
max_age_seconds=30 * 60,
max_len=50,
) # type: Dict[str, ResultSet]
def parse_tag_set_from_query_v2(s):
if isinstance(s, list):
parts = s
elif isinstance(s, str) and s:
parts = s.split(",")
else:
parts = []
ts = set()
for part in parts:
if not part:
continue
category, value_str = part.split(":")
ts.add(Tag(category, value_str))
return ts
def tag_sets_to_query(*tagsets):
merged = set().union(*tagsets)
if not merged:
return Q()
return Q(
annotation__in=Annotation.objects.filter(
functools.reduce(
operator.or_,
[Q(category__name=t.category, mode__name=t.value) for t in merged],
)
)
)
def serialize_tag_set_for_client_v2(ts):
return [{"category": t.category, "value": t.value} for t in sorted(list(ts))]
def serialize_boxes_for_client_v2(bs):
return [
{
"category": b.category,
"value": b.value,
"x1": b.x1,
"y1": b.y1,
"x2": b.x2,
"y2": b.y2,
}
for b in sorted(list(bs))
]
def get_tags_from_annotations_v2(annotations):
tags_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=False)
ann_dicts = annotations.values("dataset_item__pk", "category__name", "mode__name")
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
tags_by_pk[pk].append(Tag(category, mode))
return tags_by_pk
def get_boxes_from_annotations_v2(annotations):
boxes_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=True)
ann_dicts = annotations.values(
"dataset_item__pk",
"category__name",
"mode__name",
"bbox_x1",
"bbox_y1",
"bbox_x2",
"bbox_y2",
)
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
box = (ann["bbox_x1"], ann["bbox_y1"], ann["bbox_x2"], ann["bbox_y2"])
boxes_by_pk[pk].append(Box(category, mode, *box))
return boxes_by_pk
def filtered_images_v2(request, dataset, exclude_pks=None) -> List[PkType]:
filt_start = time.time()
if request.method == "POST":
payload = json.loads(request.body)
include_tags = parse_tag_set_from_query_v2(payload.get("include"))
exclude_tags = parse_tag_set_from_query_v2(payload.get("exclude"))
pks = [i for i in payload.get("subset", []) if i]
split = payload.get("split", "train")
offset_to_return = int(payload.get("offset", 0))
num_to_return = int(payload.get("num", -1))
else:
include_tags = parse_tag_set_from_query_v2(request.GET.get("include"))
exclude_tags = parse_tag_set_from_query_v2(request.GET.get("exclude"))
pks = [i for i in request.GET.get("subset", "").split(",") if i]
split = request.GET.get("split", "train")
offset_to_return = int(request.GET.get("offset", 0))
num_to_return = int(request.GET.get("num", -1))
end_to_return = None if num_to_return == -1 else offset_to_return + num_to_return
dataset_items = None
is_val = split == "val"
db_start = time.time()
# Get pks for dataset items of interest
if pks and exclude_pks:
# Get specific pks - excluded pks if requested
exclude_pks = set(exclude_pks)
pks = [pk for pk in pks if pk not in exclude_pks]
elif not pks:
# Otherwise get all dataset items - exclude pks
dataset_items = DatasetItem.objects.filter(dataset=dataset, is_val=is_val)
if exclude_pks:
dataset_items = dataset_items.exclude(pk__in=exclude_pks)
pks = dataset_items.values_list("pk", flat=True)
db_end = time.time()
result = None
db_tag_start = time.time()
if not include_tags and not exclude_tags:
# If no tags specified, just return retrieved pks
result = pks
else:
# Otherwise, filter using include and exclude tags
if dataset_items is None:
dataset_items = DatasetItem.objects.filter(pk__in=pks)
if include_tags:
dataset_items = dataset_items.filter(tag_sets_to_query(include_tags))
if exclude_tags:
dataset_items = dataset_items.exclude(tag_sets_to_query(exclude_tags))
result = dataset_items.values_list("pk", flat=True)
db_tag_end = time.time()
result = list(result[offset_to_return:end_to_return])
filt_end = time.time()
print(
f"filtered_images_v2: tot: {filt_end-filt_start}, "
f"db ({len(result)} items): {db_end-db_start}, db tag: {db_tag_end-db_tag_start}"
)
return result
def process_image_query_results_v2(request, dataset, query_response):
filtered_pks = filtered_images_v2(request, dataset)
# TODO(mihirg): Eliminate this database call by directly returning pks from backend
dataset_items = DatasetItem.objects.filter(pk__in=filtered_pks)
dataset_items_by_path = {di.path: di for di in dataset_items}
distances = []
ordered_pks = []
for r in query_response["results"]:
if r["label"] in dataset_items_by_path:
ordered_pks.append(dataset_items_by_path[r["label"]].pk)
distances.append(r["dist"])
return dict(
pks=ordered_pks,
distances=distances,
)
def create_result_set_v2(results, type, model=None):
pks = results["pks"]
distances = results["distances"]
result_set_id = str(uuid.uuid4())
current_result_sets[result_set_id] = ResultSet(
type=type, ranking=pks, distances=distances, model=model
)
| |
'g3', 'g0')] = 4
conflict_table[('g2', 'x', 'g3', 'g1')] = 4
conflict_table[('g2', 'x', 'g3', 'x')] = 0
conflict_table[('g2', 'x', 'x', 'g0')] = 2
conflict_table[('g2', 'x', 'x', 'g1')] = 2
conflict_table[('g2', 'x', 'x', 'g3')] = 0
conflict_table[('g3', 'g0', 'g1', 'g2')] = 6
conflict_table[('g3', 'g0', 'g1', 'x')] = 4
conflict_table[('g3', 'g0', 'g2', 'g1')] = 6
conflict_table[('g3', 'g0', 'g2', 'x')] = 4
conflict_table[('g3', 'g0', 'x', 'g1')] = 4
conflict_table[('g3', 'g0', 'x', 'g2')] = 4
conflict_table[('g3', 'g0', 'x', 'x')] = 2
conflict_table[('g3', 'g1', 'g0', 'g2')] = 6
conflict_table[('g3', 'g1', 'g0', 'x')] = 4
conflict_table[('g3', 'g1', 'g2', 'g0')] = 6
conflict_table[('g3', 'g1', 'g2', 'x')] = 4
conflict_table[('g3', 'g1', 'x', 'g0')] = 4
conflict_table[('g3', 'g1', 'x', 'g2')] = 4
conflict_table[('g3', 'g1', 'x', 'x')] = 2
conflict_table[('g3', 'g2', 'g0', 'g1')] = 6
conflict_table[('g3', 'g2', 'g0', 'x')] = 4
conflict_table[('g3', 'g2', 'g1', 'g0')] = 6
conflict_table[('g3', 'g2', 'g1', 'x')] = 4
conflict_table[('g3', 'g2', 'x', 'g0')] = 4
conflict_table[('g3', 'g2', 'x', 'g1')] = 4
conflict_table[('g3', 'g2', 'x', 'x')] = 2
conflict_table[('g3', 'x', 'g0', 'g1')] = 4
conflict_table[('g3', 'x', 'g0', 'g2')] = 4
conflict_table[('g3', 'x', 'g0', 'x')] = 2
conflict_table[('g3', 'x', 'g1', 'g0')] = 4
conflict_table[('g3', 'x', 'g1', 'g2')] = 4
conflict_table[('g3', 'x', 'g1', 'x')] = 2
conflict_table[('g3', 'x', 'g2', 'g0')] = 4
conflict_table[('g3', 'x', 'g2', 'g1')] = 4
conflict_table[('g3', 'x', 'g2', 'x')] = 2
conflict_table[('g3', 'x', 'x', 'g0')] = 2
conflict_table[('g3', 'x', 'x', 'g1')] = 2
conflict_table[('g3', 'x', 'x', 'g2')] = 2
conflict_table[('x', 'g0', 'g1', 'g2')] = 0
conflict_table[('x', 'g0', 'g1', 'g3')] = 0
conflict_table[('x', 'g0', 'g1', 'x')] = 0
conflict_table[('x', 'g0', 'g2', 'g1')] = 2
conflict_table[('x', 'g0', 'g2', 'g3')] = 0
conflict_table[('x', 'g0', 'g2', 'x')] = 0
conflict_table[('x', 'g0', 'g3', 'g1')] = 2
conflict_table[('x', 'g0', 'g3', 'g2')] = 2
conflict_table[('x', 'g0', 'g3', 'x')] = 0
conflict_table[('x', 'g0', 'x', 'g1')] = 0
conflict_table[('x', 'g0', 'x', 'g2')] = 0
conflict_table[('x', 'g0', 'x', 'g3')] = 0
conflict_table[('x', 'g1', 'g0', 'g2')] = 2
conflict_table[('x', 'g1', 'g0', 'g3')] = 2
conflict_table[('x', 'g1', 'g0', 'x')] = 2
conflict_table[('x', 'g1', 'g2', 'g0')] = 4
conflict_table[('x', 'g1', 'g2', 'g3')] = 0
conflict_table[('x', 'g1', 'g2', 'x')] = 0
conflict_table[('x', 'g1', 'g3', 'g0')] = 4
conflict_table[('x', 'g1', 'g3', 'g2')] = 2
conflict_table[('x', 'g1', 'g3', 'x')] = 0
conflict_table[('x', 'g1', 'x', 'g0')] = 2
conflict_table[('x', 'g1', 'x', 'g2')] = 0
conflict_table[('x', 'g1', 'x', 'g3')] = 0
conflict_table[('x', 'g2', 'g0', 'g1')] = 4
conflict_table[('x', 'g2', 'g0', 'g3')] = 2
conflict_table[('x', 'g2', 'g0', 'x')] = 2
conflict_table[('x', 'g2', 'g1', 'g0')] = 4
conflict_table[('x', 'g2', 'g1', 'g3')] = 2
conflict_table[('x', 'g2', 'g1', 'x')] = 2
conflict_table[('x', 'g2', 'g3', 'g0')] = 4
conflict_table[('x', 'g2', 'g3', 'g1')] = 4
conflict_table[('x', 'g2', 'g3', 'x')] = 0
conflict_table[('x', 'g2', 'x', 'g0')] = 2
conflict_table[('x', 'g2', 'x', 'g1')] = 2
conflict_table[('x', 'g2', 'x', 'g3')] = 0
conflict_table[('x', 'g3', 'g0', 'g1')] = 4
conflict_table[('x', 'g3', 'g0', 'g2')] = 4
conflict_table[('x', 'g3', 'g0', 'x')] = 2
conflict_table[('x', 'g3', 'g1', 'g0')] = 4
conflict_table[('x', 'g3', 'g1', 'g2')] = 4
conflict_table[('x', 'g3', 'g1', 'x')] = 2
conflict_table[('x', 'g3', 'g2', 'g0')] = 4
conflict_table[('x', 'g3', 'g2', 'g1')] = 4
conflict_table[('x', 'g3', 'g2', 'x')] = 2
conflict_table[('x', 'g3', 'x', 'g0')] = 2
conflict_table[('x', 'g3', 'x', 'g1')] = 2
conflict_table[('x', 'g3', 'x', 'g2')] = 2
conflict_table[('x', 'x', 'g0', 'g1')] = 0
conflict_table[('x', 'x', 'g0', 'g2')] = 0
conflict_table[('x', 'x', 'g0', 'g3')] = 0
conflict_table[('x', 'x', 'g1', 'g0')] = 2
conflict_table[('x', 'x', 'g1', 'g2')] = 0
conflict_table[('x', 'x', 'g1', 'g3')] = 0
conflict_table[('x', 'x', 'g2', 'g0')] = 2
conflict_table[('x', 'x', 'g2', 'g1')] = 2
conflict_table[('x', 'x', 'g2', 'g3')] = 0
conflict_table[('x', 'x', 'g3', 'g0')] = 2
conflict_table[('x', 'x', 'g3', 'g1')] = 2
conflict_table[('x', 'x', 'g3', 'g2')] = 2
def linear_conflicts(start_list,goal_list):
"""
calculates number of moves to add to the estimate of
the moves to get from start to goal based on the number
of conflicts on a given row or column. start_list
represents the current location and goal_list represnts
the final goal.
"""
# Find which of the tiles in start_list have their goals on this line
# build a pattern to use in a lookup table of this form:
# g0, g1, g3, g3 fill in x where there is no goal for this line
# all 'x' until we file a tile whose goal is in this line
goal_pattern = ['x', 'x', 'x', 'x']
for g in range(4):
for s in range(4):
start_tile_num = start_list[s]
if start_tile_num == goal_list[g] and start_tile_num != 0:
goal_pattern[s] = 'g' + str(g) # i.e. g0
global conflict_table
tup_goal_pattern = tuple(goal_pattern)
if tup_goal_pattern in conflict_table:
return conflict_table[tuple(goal_pattern)]
else:
return 0
class lcmap(dict):
"""
Lets you return 0 if you look for an object that
is not in the dictionary.
"""
def __missing__(self, key):
return 0
def listconflicts(goal_list):
"""
list all possible start lists that will have at least
one linear conflict.
Possible goal tile configurations
g g g g
g g g x
g g x g
g x g g
x g g g
g g x x
g x g x
g x x g
x g g x
x g x g
x x g g
"""
all_tiles = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
non_goal_tiles = []
for t in all_tiles:
if t not in goal_list:
non_goal_tiles.append(t)
combinations = lcmap()
# g g g g
for i in goal_list:
tile_list2 = goal_list[:]
tile_list2.remove(i)
for j in tile_list2:
tile_list3 = tile_list2[:]
tile_list3.remove(j)
for k in tile_list3:
tile_list4 = tile_list3[:]
tile_list4.remove(k)
for l in tile_list4:
start_list = (i, j, k, l)
conflictadd = linear_conflicts(start_list,goal_list)
if conflictadd > 0:
combinations[start_list]=conflictadd
# g g g x
for i in goal_list:
tile_list2 = goal_list[:]
tile_list2.remove(i)
for j in tile_list2:
tile_list3 = tile_list2[:]
tile_list3.remove(j)
for k in tile_list3:
for l in non_goal_tiles:
start_list = (i, j, k, l)
conflictadd = linear_conflicts(start_list,goal_list)
if conflictadd > 0:
combinations[start_list]=conflictadd
# g g x g
for i in goal_list:
tile_list2 = goal_list[:]
tile_list2.remove(i)
for j in tile_list2:
tile_list3 = tile_list2[:]
tile_list3.remove(j)
for k in non_goal_tiles:
for l in tile_list3:
start_list = (i, j, k, l)
conflictadd = linear_conflicts(start_list,goal_list)
if conflictadd > 0:
combinations[start_list]=conflictadd
# g x g g
for i in goal_list:
tile_list2 = goal_list[:]
tile_list2.remove(i)
for j in non_goal_tiles:
for k in tile_list2:
tile_list3 = tile_list2[:]
tile_list3.remove(k)
for l in tile_list3:
start_list = (i, j, k, l)
conflictadd = linear_conflicts(start_list,goal_list)
if conflictadd > 0:
combinations[start_list]=conflictadd
# x g g g
for i in non_goal_tiles:
for j in goal_list:
tile_list2 = goal_list[:]
tile_list2.remove(j)
for k in tile_list2:
tile_list3 = tile_list2[:]
tile_list3.remove(k)
for l in tile_list3:
start_list = (i, j, k, l)
conflictadd = linear_conflicts(start_list,goal_list)
if conflictadd > 0:
combinations[start_list]=conflictadd
# g g x x
for i in goal_list:
tile_list2 = goal_list[:]
tile_list2.remove(i)
for j in tile_list2:
tile_list3 = tile_list2[:]
tile_list3.remove(j)
for k in non_goal_tiles:
tile_list4 = non_goal_tiles[:]
tile_list4.remove(k)
for l in tile_list4:
start_list = (i, j, k, l)
conflictadd = linear_conflicts(start_list,goal_list)
if conflictadd > 0:
combinations[start_list]=conflictadd
# g x g x
for i in goal_list:
tile_list2 = goal_list[:]
tile_list2.remove(i)
for j in non_goal_tiles:
tile_list3 = non_goal_tiles[:]
tile_list3.remove(j)
for k in tile_list2:
for l in tile_list3:
start_list = (i, j, k, l)
conflictadd = linear_conflicts(start_list,goal_list)
if conflictadd > 0:
combinations[start_list]=conflictadd
# g x x g
for i in goal_list:
tile_list2 = goal_list[:]
tile_list2.remove(i)
for j in non_goal_tiles:
tile_list3 = non_goal_tiles[:]
tile_list3.remove(j)
for k in tile_list2:
for l in tile_list3:
start_list = (i, j, k, l)
conflictadd = linear_conflicts(start_list,goal_list)
if conflictadd > 0:
combinations[start_list]=conflictadd
# x g g x
for i in non_goal_tiles:
tile_list2 = non_goal_tiles[:]
tile_list2.remove(i)
for j in goal_list:
tile_list3 = goal_list[:]
tile_list3.remove(j)
for k in tile_list3:
for l in tile_list2:
start_list = (i, | |
<reponame>ma-kast/AMfe
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Module for updating bases
"""
import numpy as np
import scipy as sp
from scipy.linalg import eigh, svd
from amfe.linalg.orth import m_orthogonalize
__all__ = ['ifpks',
'ifpks_modified',
'update_modes',
]
def update_modes(K, M, V_old, Kinv_operator, r=2, tol=1e-6, verbose=False, modified_ifpks=True):
"""
Shortcut to update modes with Inverse Free Preconditioned Krylov Subspace Method
Parameters
----------
K : array_like
stiffness matrix
M : array_like
mass matrix
V_old : numpy.ndarray
Matrix with old eigenmodes as column vectors
Kinv_operator : LinearOperator
Linear Operator solving K_old x = b for a similar stiffness matrix K_old
This should be a cheap operation e.g. from a direct solver, K_old already factorized
r : int
Number of Krylov search directions
tol : float
desired tolerance for the squared eigenfrequency omega**2
verbose : bool
Flag if verbose version shall be used
modified_ifpks : bool (default: True)
Flag if modified version of ifpks shall be used (recommended)
Returns
-------
omega : list
list with new omega values
X : numpy.ndarray
matrix with eigenmodes as column vectors
"""
if modified_ifpks:
rho, V = ifpks_modified(K, M, Kinv_operator, V_old, r, tol,
verbose=verbose)
else:
rho, V = ifpks(K, M, Kinv_operator, V_old, r, tol,
verbose=verbose)
return np.sqrt(rho), V
def ifpks(K, M, P, X_0, r=2, tol=1e-6, verbose=False, m_orth=m_orthogonalize):
"""
Inverse Free Preconditioned Krylov Subspace Method according to [Voormeeren2013]_
This method can be helpful to update Modes if some parameters of the model have changed slightly
Parameters
----------
K : sparse_matrix
Stiffness matrix of the current problem
M : sparse_matrix
Mass matrix of the current problem
P : LinearOperator
Linear Operator for Preconditioning
e.g. solving K_old x = b for a similar stiffness matrix K_old
This should be a cheap operation e.g. from a direct solver, K_old already factorized
X_0 : numpy.ndarray
Matrix with Eigenmodes of the old/reference problem
r : int
number of Krylov search directions
tol : float
desired tolerance for the squared eigenfrequency omega**2
verbose : bool
Flag if verbose version shall be used
m_orth : function
Pointer to orthogonalization scheme
Returns
-------
rho : list
list with new omega**2 values
X : numpy.ndarray
matrix with eigenmodes as column vectors
References
----------
.. [Voormeeren2013] <NAME>, <NAME>,
"Updating component reduction bases of static and vibration modes using preconditioned iterative techniques",
Computer Methods in Applied Mechanics and Engineering, Volume 253, 2013, Pages 39-59.
"""
X = X_0
no_of_modes = X.shape[1]
X = m_orth(X, M)
Zm = np.zeros((X.shape[0], X.shape[1] * (r + 1)))
rho = list()
k = 0
rho.append(np.diag(X.T @ K @ X))
if verbose:
print('IFPKS Iteration No. {}, rho: {}'.format(k, rho[k]))
while k == 0 or np.linalg.norm(rho[k] - rho[k - 1]) > tol * np.linalg.norm(rho[k]):
# Generate Krylov Subspace
rho.append(np.Inf)
# Zm[:,0:no_of_modes] = m_orth(X, M)
# stattdessen:
Zm[:, 0:no_of_modes] = X
# no_of_orth = no_of_modes
for j in np.arange(r):
Zm[:, no_of_modes * (j + 1):no_of_modes * (j + 2)] = P.dot(
K @ Zm[:, no_of_modes * (j):no_of_modes * (j + 1)] - M @ Zm[:,
no_of_modes * (j):no_of_modes * (j + 1)] * rho[
k])
Zm[:, :no_of_modes * (j + 2)] = m_orth(Zm[:, :no_of_modes * (j + 2)], M)
Kr = Zm.T @ K @ Zm
lam, V = eigh(Kr)
V = m_orth(V[:, :no_of_modes])
X = Zm @ V
X = m_orth(X, M)
rho[k + 1] = np.diag(X.T @ K @ X)
k = k + 1
print('IFPKS Iteration No. {}, rho: {}'.format(k, rho[k]))
return rho[-1], X
def ifpks_modified(K, M, P, X_0, r=2, tol=1e-6, verbose=False, m_orth=m_orthogonalize):
"""
Modified Inverse Free Preconditioned Krylov Subspace Method
This method can be helpful to update Modes if some parameters of the model have changed slightly
It is slightly modified compared to [Voormeeren2013]_.
Instead M-orthogonalize all Krylov vectors, a Orthogonalization and truncation using an SVD is used instead.
This is more stable as the Krylov vectors can be almost linear dependent.
Additionally a M-orthogonalization would slow down the algorithm significantly.
Parameters
----------
K : sparse_matrix
Stiffness matrix of the current problem
M : sparse_matrix
Mass matrix of the current problem
P : LinearOperator
Linear Operator for Preconditioning
e.g. solving K_old x = b for a similar stiffness matrix K_old
This should be a cheap operation e.g. from a direct solver, K_old already factorized
X_0 : numpy.ndarray
Matrix with Eigenmodes of the old/reference problem
r : int
number of Krylov search directions
tol : float
desired tolerance for the squared eigenfrequency omega**2
verbose : bool
Flag if verbose version shall be used
m_orth : function
Pointer to orthogonalization scheme
Returns
-------
rho : list
list with new omega**2 values
X : numpy.ndarray
matrix with eigenmodes as column vectors
References
----------
.. [Voormeeren2013] <NAME>, <NAME>,
"Updating component reduction bases of static and vibration modes using preconditioned iterative techniques",
Computer Methods in Applied Mechanics and Engineering, Volume 253, 2013, Pages 39-59.
"""
X = X_0
no_of_modes = X.shape[1]
X = m_orth(X, M)
Zm = np.zeros((X.shape[0], X.shape[1] * (r + 1)))
rho = list()
k = 0
rho.append(np.diag(X.T @ K @ X))
if verbose:
print('IFPKS Iteration No. {}, rho: {}'.format(k, rho[k]))
while k == 0 or np.linalg.norm(rho[k] - rho[k - 1]) > tol * np.linalg.norm(rho[k]):
# Generate Krylov Subspace
rho.append(np.Inf)
# Zm[:,0:no_of_modes] = m_orth(X, M)
# stattdessen:
Zm[:, 0:no_of_modes] = X
# no_of_orth = no_of_modes
for j in np.arange(r):
Zm[:, no_of_modes * (j + 1):no_of_modes * (j + 2)] = P.solve(
K.dot(Zm[:, no_of_modes * (j):no_of_modes * (j + 1)]) - M.dot(Zm[:,
no_of_modes * (j):no_of_modes * (j + 1)]) * rho[
k])
Zm[:, :no_of_modes * (j + 2)] = m_orth(Zm[:, :no_of_modes * (j + 2)], M)
# Facebook svd
# Zm[:,:no_of_modes*(j+2)],s,_ = pca(Zm[:,:no_of_modes*(j+2)],no_of_modes*(j+2), raw=True)
# Scipy svd
Zm[:, :no_of_modes * (j + 2)], s, _ = svd(Zm[:, :no_of_modes * (j + 2)], full_matrices=False)
Kr = Zm[:, s > 1e-8].T.dot(K).dot(Zm[:, s > 1e-8])
Mr = Zm[:, s > 1e-8].T.dot(M).dot(Zm[:, s > 1e-8])
lam, V = eigh(Kr, Mr)
X = Zm[:, s > 1e-8].dot(V)
X = m_orth(X[:, :no_of_modes], M)
rho[k + 1] = np.diag(X.T.dot(K).dot(X))
k = k + 1
print('IFPKS Iteration No. {}, rho: {}'.format(k, rho[k]))
return rho[-1], X
def update_static_derivatives(V, K_func, Kinv_operator, Theta_0, M=None, omega=0.0, h=1.0, verbose=False, symmetric=True,
finite_diff='central'):
"""
Update the static correction derivatives for the given basis V.
Optionally, a frequency shift can be performed.
Parameters
----------
V : ndarray
array containing the linear basis
K_func : function
function returning the tangential stiffness matrix for a given
displacement. Has to work like `K = K_func(u)`.
Kinv_operator : LinearOperator
Linear Operator solving K_old x = b for a similar stiffness matrix K_old
This should be a cheap operation e.g. from a direct solver, K_old already factorized
M : ndarray, optional
mass matrix. Can be sparse or dense. If `None` is given, the mass of 0
is assumed. Default value is `None`.
omega : float, optional
shift frequency. Default value is 0.
h : float, optional
step width for finite difference scheme. Default value is 500 * machine
epsilon
verbose : bool, optional
flag for verbosity. Default value: True
finite_diff : str {'central', 'forward', backward}
Method for finite difference scheme. 'central' computes the finite
difference based on a central difference scheme, 'forward' based on an
forward scheme etc. Note that the upwind scheme can cause severe
distortions of the static correction derivative.
Returns
-------
Theta : ndarray
three dimensional array of static corrections derivatives. Theta[:,i,j]
contains the static derivative 1/2 * dx_i / dx_j. As the static
derivatives are symmetric, Theta[:,i,j] == Theta[:,j,i].
See Also
--------
static_derivatives
"""
P = Kinv_operator
no_of_dofs = V.shape[0]
no_of_modes = V.shape[1]
Theta = np.zeros((no_of_dofs, no_of_modes, no_of_modes))
K = K_func(np.zeros(no_of_dofs))
if (omega > 0) and (M is not None):
K_dyn = K - omega ** 2 * M
else:
K_dyn = K
for i in range(no_of_modes):
if verbose:
print('Computing finite difference K-matrix')
| |
<filename>main backup.py
import pygame
from pygame.locals import *
# from socket import *
# from threading import Thread
import tj
import json
# import pickle
import time
import sys
# import random
import tj
# import zlib
# USER CHANGABLE CONSTANTS
# AQUIRING CONSTANTS ----------------------------------------
D = {
'RES': [500, 500],
'SIZE': [40, 40],
'SIZE_SMALL': [32, 32],
'ADDER': 6,
'HIT_POINTS': 5.3,
'T_SKIP': [1, 1, 1],
'PROJ_RADIUS': 5,
'FPS': 60,
'BG_COLOR': [40, 40, 40],
'COLOR_1': [255, 205, 220], # My color
'COLOR_0': [80, 130, 80], # Opponent Color
'FONT': "fc.ttf",
'WARNING_FONT': "warning.ttf",
'WARNING_COLOR': [220, 40, 40],
'SPLASH_TIME': 2,
'SHOW_WATERMARK': True,
'WATERMARK_FONT': "watermark.ttf",
'WATERMARK_COLOR': [48, 48, 48],
'SHOW_FPS': True}
try:
f = open('Settings.json', 'r')
D = json.loads(f.read())
f.close()
except:
f = open('Settings.json', 'w')
f.write(json.dumps(D))
f.close()
RES = D['RES']
SIZE = D['SIZE']
SIZE_SMALL = D['SIZE_SMALL']
ADDER = D['ADDER']
HIT_POINTS = D['HIT_POINTS']
T_SKIP = D['T_SKIP']
PROJ_RADIUS = D['PROJ_RADIUS']
FPS = D['FPS']
BG_COLOR = D['BG_COLOR']
COLOR_1 = D['COLOR_1'] # My color
COLOR_0 = D['COLOR_0'] # Opponent Color
FONT = D['FONT']
WARNING_FONT = D['WARNING_FONT']
WARNING_COLOR = D['WARNING_COLOR']
SPLASH_TIME = D['SPLASH_TIME']
SHOW_WATERMARK = D['SHOW_WATERMARK']
WATERMARK_COLOR = D['WATERMARK_COLOR']
SHOW_FPS = D['SHOW_FPS']
WATERMARK_FONT = D['WATERMARK_FONT']
# ------------------------------------------------------------------
# AUTO CALCULATED CONSTANTS
TRANSFORMED_COLOR_1 = tj.transform_color(
COLOR_1, BG_COLOR, skipR=T_SKIP[0], skipG=T_SKIP[1], skipB=T_SKIP[2])
TRANSFORMED_COLOR_0 = tj.transform_color(
COLOR_0, BG_COLOR, skipR=T_SKIP[0], skipG=T_SKIP[1], skipB=T_SKIP[2])
TRANSFORMED_COLOR_1 = TRANSFORMED_COLOR_1+TRANSFORMED_COLOR_1[::-1]
TRANSFORMED_COLOR_0 = TRANSFORMED_COLOR_0+TRANSFORMED_COLOR_0[::-1]
ADDER2 = round(ADDER/1.414, 2)
PROJ_MULTI = 1.414
SPLASHED = False
temp_splash = 0
time_started = False
DIFF = [(SIZE[0]-SIZE_SMALL[0])//2, (SIZE[1]-SIZE_SMALL[1])//2]
def cycle(L): # A simple function to put the first element of the list, at last
temp = L.pop(0)
L.append(temp)
return L
def display_text(screen, text, size, font, color, pos):
Text = pygame.font.Font(font, size)
textsurface = Text.render(text, True, color)
screen.blit(textsurface, pos)
def display_other_info(screen, fps):
if SHOW_WATERMARK:
display_text(screen, f"TJ ", 220, WATERMARK_FONT,
WATERMARK_COLOR, [RES[0]//2-100, 10])
display_text(screen, f"Productions", 92, WATERMARK_FONT,
WATERMARK_COLOR, [10, RES[1]//3+20])
display_text(screen, f"2019", 220, WATERMARK_FONT,
WATERMARK_COLOR, [50, RES[1]//2])
if SHOW_FPS:
fps = round(fps, 1)
display_text(screen, f"FPS | {fps}", 15, FONT,
[200, 200, 200], [RES[0]//2-35, RES[1]-25])
def display_info(screen, Player1, Player0, Proj):
global SPLASHED, TEMP_SPLASH, time_started, temp_splash
# Player1,2 and Proj are all objects
# Firstly, display Health inf of both players
Life1 = round(Player1.life, 1)
Life0 = round(Player0.life, 1)
if Player1.life < 25 and not SPLASHED:
if not time_started:
temp_splash = time.time()
time_started = True
display_text(screen, f"HEALTH LOW!", RES[0]//5,
WARNING_FONT, WARNING_COLOR, [5, RES[1]//2-30])
if time.time()-temp_splash > SPLASH_TIME:
SPLASHED = True
display_text(screen, f" You | {Life1} %", 15, FONT, COLOR_1, [10, 10])
display_text(screen, f"Opponent | {Life0} %", 15, FONT, COLOR_0, [10, 28])
# Next, display number of live projectiles
display_text(screen, f" Your | {Proj.num_Type1} %",
17, FONT, COLOR_1, [RES[0]-130, 10])
display_text(screen, f"Opponent | {Proj.num_Type0} %",
17, FONT, COLOR_0, [RES[0]-130, 28])
# class Receiver:
# def __init__(self):
# self.my_ip = tj.get_ip_address() # This PC's IP address
# self.port = 8211
# self.buffer = 1300
# self.my_addr = (self.my_ip, self.port)
# self.socket = socket(AF_INET, SOCK_DGRAM)
# self.socket.bind(self.my_addr)
# def recv_var(self):
# global DATA_RECEIVED
# """Run this function in a thread"""
# while True:
# data = self.socket.recv(self.buffer)
# DATA_RECEIVED = pickle.loads(data)
# def close(self):
# self.socket.close()
# class Sender:
# def __init__(self):
# self.partner_ip = self.__get_pip() # Get IP address of partner (p_ip)
# self.port = 8211
# self.p_addr = (self.partner_ip, self.port)
# self.socket = socket(AF_INET, SOCK_DGRAM) # Make a UDP socket
# @staticmethod
# def __get_pip():
# ip = tj.get_ip_address() # input('Enter the IP address of the opponent computer: ')
# return ip
# def send_var(self, variable):
# data = pickle.dumps(variable)
# self.socket.sendto(data, self.p_addr)
# def close(self):
# self.socket.close()
class Player:
def __init__(self, Type): # Type: 1 is me , Type: 0 is enemy
if Type:
self.controls = {'UP': K_UP, 'DOWN': K_DOWN,
'LEFT': K_LEFT, 'RIGHT': K_RIGHT, 'SHOOT': K_SPACE}
s = [-SIZE[0]*2, -SIZE[1]*2]
self.color = COLOR_1
else:
self.controls = {'UP': K_w, 'DOWN': K_s,
'LEFT': K_a, 'RIGHT': K_d, 'SHOOT': K_x}
s = [0, 0]
self.color = COLOR_0
self.Type = Type
self.coord = [(RES[0]+s[0])//2, (RES[1]+s[1]) //
2] # Coordinates of the upper left corner of the Player
self.center = None
self.update_center()
self.player = None # it is the pygame rectangle object
self.vel = None
self.PLAYER_SHOT = False
self.life = 100
def update_center(self):
self.center = [self.coord[0]+SIZE[0]/2, self.coord[1]+SIZE[1]/2]
def update_player(self, screen):
global TRANSFORMED_COLOR_0, TRANSFORMED_COLOR_1
if self.life < 25:
if self.Type:
self.color = TRANSFORMED_COLOR_1[0]
TRANSFORMED_COLOR_1 = cycle(TRANSFORMED_COLOR_1)
else:
self.color = TRANSFORMED_COLOR_0[0]
TRANSFORMED_COLOR_0 = cycle(TRANSFORMED_COLOR_0)
self.player = pygame.draw.rect(
screen, self.color, [int(self.coord[0]), int(self.coord[1]), *SIZE])
pygame.draw.rect(screen, BG_COLOR,
[int(self.coord[0])+DIFF[0], int(self.coord[1])+DIFF[1], *SIZE_SMALL])
@staticmethod
def __check_boundary(coord, vel):
x_vel, y_vel = vel
x_coord, y_coord = coord
if y_vel < 0 and 0 <= y_coord-ADDER2-1:
# Means if player is going UP and player is below the UPPER boundary
y_coord += y_vel
elif y_vel > 0 and y_coord <= (RES[1]-SIZE[1]-ADDER2-1):
# Means if player is going DOWN and player is above the LOWER boundary
y_coord += y_vel
if x_vel < 0 and 0 <= x_coord-ADDER2-1:
# Means if player is going LEFT and player is below the LEFT boundary
x_coord += x_vel
elif x_vel > 0 and x_coord <= (RES[0]-SIZE[0]-ADDER2-1):
# Means if player is going RIGHT and player is above the RIGHT boundary
x_coord += x_vel
return [x_coord, y_coord]
def handle_events(self, Proj):
# global DATA_TO_SEND
# Proj is the Projectile object here, Sender is the Sender object
D = pygame.key.get_pressed()
x_vel, y_vel = 0, 0
if D[self.controls['UP']]:
y_vel = -ADDER
elif D[self.controls['DOWN']]:
y_vel = ADDER
if D[self.controls['LEFT']]:
if y_vel < 0:
x_vel = -ADDER2
y_vel = -ADDER2
elif y_vel > 0:
x_vel = -ADDER2
y_vel = ADDER2
else:
x_vel = -ADDER
elif D[self.controls['RIGHT']]:
if y_vel < 0:
x_vel = ADDER2
y_vel = -ADDER2
elif y_vel > 0:
x_vel = ADDER2
y_vel = ADDER2
else:
x_vel = ADDER
if D[self.controls['SHOOT']]:
player_shoots = True
else:
player_shoots = False
self.add_projectile(player_shoots)
self.coord = self.__check_boundary(self.coord, [x_vel, y_vel])
# DATA_TO_SEND.append(self.coord)
self.update_center()
self.vel = [x_vel, y_vel]
def add_projectile(self, player_shoots):
global Proj
if player_shoots:
if not self.PLAYER_SHOT:
# Means Player hits the shoot button, and the shoot button is
# not already pressed, this approach allows for only 1 shot at a time
vel = self.vel
coord = self.center
Type = self.Type
proj = [coord, vel, Type]
Proj.add_projectile(coord, vel, Type)
# DATA_TO_SEND.append(proj)
self.PLAYER_SHOT = True
else:
self.PLAYER_SHOT = False
def check_died(self):
if self.life <= 0:
print(f'Player {self.Type} died!!!')
class Projectile:
def __init__(self):
self.projectiles = []
self.num_Type1, self.num_Type0 = 0, 0
def add_projectile(self, coord, vel, Type):
proj = [coord, vel, Type]
self.projectiles.append(proj)
@staticmethod
def __check_proj_boundary(coord, vel):
"""Check if the projectile is in the boundary or not
If it is in the boundary, then return updated coordinates
Else return None, as a signal to destroy that projectile"""
x_vel, y_vel = vel
x_vel, y_vel = x_vel*PROJ_MULTI, y_vel*PROJ_MULTI
x_coord, y_coord = coord
if y_vel < 0 and 0 <= y_coord:
# Means if proj is going UP and player is below the UPPER boundary
y_coord += y_vel
elif y_vel > 0 and y_coord <= RES[1]:
# Means if proj is going DOWN and player is above the LOWER boundary
y_coord += y_vel
if x_vel < 0 and 0 <= x_coord:
# Means if proj is going LEFT and player is below the LEFT boundary
x_coord += x_vel
elif x_vel > 0 and x_coord <= RES[0]:
# Means if proj is going RIGHT and player is above the RIGHT boundary
x_coord += x_vel
new_coord = [x_coord, y_coord]
if new_coord != coord:
return new_coord
else:
return None
def draw_proj(self, screen, Player1, Player2):
new_projectiles = []
self.num_Type1, self.num_Type0 = 0, 0
for proj in self.projectiles:
coord = proj[0] # Coordinates of the projectile
vel = proj[1] # velocity of the projectile
Type = proj[2] # Color of the projetile
to_not_draw = False
if Type: # Means is Type: 1, means projectile is fired by player 1
color = COLOR_1
else:
color = COLOR_0
new_coord = self.__check_proj_boundary(coord, vel)
if new_coord:
if Type != Player1.Type:
# That is, if the projectile and player type is different
to_not_draw = self.check_collision(Player1, coord)
elif Type != Player2.Type:
# That is, if the projectile and player type is different
to_not_draw = self.check_collision(Player2, coord)
if not to_not_draw:
pygame.draw.circle(
screen, color, [int(new_coord[0]), int(new_coord[1])], PROJ_RADIUS)
new_projectiles.append([new_coord, vel, Type])
if Type:
self.num_Type1 += 1
else:
self.num_Type0 += 1
self.projectiles = new_projectiles
def check_collision(self, Player, proj_coord):
# Here Player is the Player object
# proj_coord are the coord. of projectile
if Player.player.collidepoint(proj_coord):
Player.life -= HIT_POINTS
if Player.life < 0:
Player.life = 0
return True
return False
# def handle_enemy():
# # This is the function, specifically | |
ax[0].set_title(f'Band {band} Subband {sb}')
if subband_plot_with_slow:
ff = np.arange(-3, 3.1, .05)
rr, ii = self.eta_scan(band, sb, ff, 10, write_log=False)
dd = rr + 1.j*ii
sbc = self.get_subband_centers(band)
ax[1].plot(ff+sbc[1][sb], np.abs(dd)/2.5E6)
if save_plot:
pna = plotname_append
save_name = f'{timestamp}_find_freq_b{band}_sb{sb:03}{pna}.png'
os.path.join(self.plot_dir, save_name)
plt.savefig(path, bbox_inches='tight')
self.pub.register_file(path, 'find_freq', plot=True)
plt.close()
else:
self.log(f'No data for subband {sb}')
return freq[peak]
@set_action()
def find_flag_blocks(self, flag, minimum=None, min_gap=None):
"""
Find blocks of adjacent points in a boolean array with the
same value.
Args
----
flag : array-like of bool
The array in which to find blocks.
minimum : int or None, optional, default None
The minimum length of block to return. Discards shorter
blocks.
min_gap : int or None, optional, default None
The minimum gap between flag blocks. Fills in gaps
smaller.
Returns
-------
starts : list of int
The start indices for each block.
ends : list of int
The end indices for each block. NOTE: the end index is
the last index in the block. Add 1 for slicing, where the
upper limit should be after the block
"""
if min_gap is not None:
_flag = self.pad_flags(np.asarray(flag, dtype=bool),
min_gap=min_gap).astype(np.int8)
else:
_flag = np.asarray(flag).astype(int)
marks = np.diff(_flag)
start = np.where(marks == 1)[0]+1
if _flag[0]:
start = np.concatenate([[0],start])
end = np.where(marks == -1)[0]
if _flag[-1]:
end = np.concatenate([end,[len(_flag)-1]])
if minimum is not None:
inds = np.where(end - start + 1 > minimum)[0]
return start[inds],end[inds]
else:
return start,end
@set_action()
def pad_flags(self, f, before_pad=0, after_pad=0, min_gap=0, min_length=0):
"""
Adds and combines flagging.
Args
----
f : list of bool
The flag array to pad.
before_pad : int, optional, default 0
The number of samples to pad before a flag.
after_pad : int, optional, default 0
The number of samples to pad after a flag.
min_gap : int, optional, default 0
The smallest allowable gap. If bigger, it combines.
min_length : int, optional, default 0
The smallest length a pad can be.
Returns
-------
pad_flag : list of bool
The padded boolean array.
"""
before, after = self.find_flag_blocks(f)
after += 1
inds = np.where(np.subtract(before[1:],after[:-1]) < min_gap)[0]
after[inds] = before[inds+1]
before -= before_pad
after += after_pad
padded = np.zeros_like(f)
for b, a in zip(before, after):
if (a-after_pad)-(b+before_pad) > min_length:
padded[np.max([0,b]):a] = True
return padded
@set_action()
def plot_find_peak(self, freq, resp, peak_ind, save_plot=True,
save_name=None):
"""
Plots the output of find_freq.
Args
----
freq : float array
The frequency data.
resp : float array
The response to full_band_resp.
peak_ind : int array
The indicies of peaks found.
save_plot : bool, optional, default True
Whether to save the plot.
save_name : str or None, optional, default None
The name of the plot.
"""
if save_plot:
plt.ioff()
else:
plt.ion()
# Break out components
Idat = np.real(resp)
Qdat = np.imag(resp)
phase = np.unwrap(np.arctan2(Qdat, Idat))
# Plot
fig, ax = plt.subplots(2, sharex=True, figsize=(6,4))
ax[0].plot(freq, np.abs(resp), label='amp', color='b')
ax[0].plot(freq, Idat, label='I', color='r', linestyle=':', alpha=.5)
ax[0].plot(freq, Qdat, label='Q', color='g', linestyle=':', alpha=.5)
ax[0].legend(loc='lower right')
ax[1].plot(freq, phase, color='b')
ax[1].set_ylim((-np.pi, np.pi))
if len(peak_ind): # empty array returns False
ax[0].plot(freq[peak_ind], np.abs(resp[peak_ind]), 'x', color='k')
ax[1].plot(freq[peak_ind], phase[peak_ind], 'x', color='k')
else:
self.log('No peak_ind values.', self.LOG_USER)
fig.suptitle("Peak Finding")
ax[1].set_xlabel("Frequency offset from Subband Center (MHz)")
ax[0].set_ylabel("Response")
ax[1].set_ylabel("Phase [rad]")
if save_plot:
if save_name is None:
self.log('Using default name for saving: find_peak.png \n' +
'Highly recommended that you input a non-default name')
save_name = 'find_peak.png'
else:
self.log(f'Plotting saved to {save_name}')
path = os.path.join(self.plot_dir, save_name)
plt.savefig(path, bbox_inches='tight')
self.pub.register_file(path, 'find_freq', plot=True)
plt.close()
@set_action()
def eta_fit(self, freq, resp, peak_freq, delta_freq,
make_plot=False, plot_chans=[], save_plot=True,
band=None, timestamp=None, res_num=None,
use_slow_eta=False):
"""
Cyndia's eta finding code.
Args
----
freq : float array
The frequency data.
resp : float array
The response data.
peak_freq : float
The frequency of the resonance peak.
delta_freq : float
The width of frequency to calculate values.
make_plot : bool, optional, default False
Whether to make plots.
plot_chans : int array, optional, default []
The channels to plot. If an empty array, it will make
plots for all channels.
save_plot : bool, optional, default True
Whether to save plots.
band : int or None, optional, default None
Only used for plotting - the band number of the resontaor.
timestamp : str or None, optional, default None
The timestamp of the data.
res_num : int or None, optional, default None
The resonator number.
Returns
-------
eta : complex
The eta parameter.
eta_scaled : complex
The eta parameter divided by subband_half_width.
eta_phase_deg : float
The angle to rotate IQ circle.
r2 : float
The R^2 value compared to the resonator fit.
eta_mag : float
The amplitude of eta.
latency : float
The delay.
Q : float
The resonator quality factor.
"""
if band is None:
# assume all bands have the same number of channels, and
# pull the number of channels from the first band in the
# list of bands specified in experiment.cfg.
band = self._bands[0]
n_subbands = self.get_number_sub_bands(band)
digitizer_frequency_mhz = self.get_digitizer_frequency_mhz(band)
subband_half_width = digitizer_frequency_mhz/\
n_subbands
if timestamp is None:
timestamp = self.get_timestamp()
amp = np.abs(resp)
try:
left = np.where(freq < peak_freq - delta_freq)[0][-1]
except IndexError:
left = 0
try:
left_plot = np.where(freq < peak_freq - 5*delta_freq)[0][-1]
except IndexError:
left = 0
right = np.where(freq > peak_freq + delta_freq)[0][0]
right_plot = np.where(freq > peak_freq + 5*delta_freq)[0][0]
eta = (freq[right] - freq[left]) / (resp[right] - resp[left])
if use_slow_eta:
band_center = self.get_band_center_mhz(band)
f_sweep_half = 0.3
df_sweep = 0.002
subband, offset = self.freq_to_subband(band, peak_freq*1e-6+band_center)
f_sweep = np.arange(offset - f_sweep_half, offset + f_sweep_half, df_sweep)
f, resp = self.fast_eta_scan(band, subband, f_sweep, 2, tone_power=12)
f_slow, resp_slow, eta_slow = self.eta_estimator(band, subband, f, resp)
# Get eta parameters
latency = (np.unwrap(np.angle(resp))[-1] -
np.unwrap(np.angle(resp))[0]) / (freq[-1] - freq[0])/2/np.pi
eta_mag = np.abs(eta)
eta_angle = np.angle(eta)
eta_scaled = eta_mag / subband_half_width
eta_phase_deg = eta_angle * 180 / np.pi
if left != right:
sk_fit = tools.fit_skewed_lorentzian(freq[left_plot:right_plot],
amp[left_plot:right_plot])
r2 = np.sum((amp[left_plot:right_plot] -
tools.skewed_lorentzian(freq[left_plot:right_plot],
*sk_fit))**2)
Q = sk_fit[5]
else:
r2 = np.nan
Q = np.nan
if make_plot:
if len(plot_chans) == 0:
self.log('Making plot for band' +
f' {band} res {res_num:03}')
self.plot_eta_fit(freq[left_plot:right_plot],
resp[left_plot:right_plot],
eta=eta, eta_mag=eta_mag, r2=r2,
save_plot=save_plot, timestamp=timestamp, band=band,
res_num=res_num, sk_fit=sk_fit, f_slow=f_slow, resp_slow=resp_slow)
else:
if res_num in plot_chans:
self.log(
'Making plot for band ' +
f'{band} res {res_num:03}')
self.plot_eta_fit(freq[left_plot:right_plot],
resp[left_plot:right_plot],
eta=eta, eta_mag=eta_mag, eta_phase_deg=eta_phase_deg,
r2=r2, save_plot=save_plot, timestamp=timestamp,
band=band, res_num=res_num, sk_fit=sk_fit,
f_slow=f_slow, resp_slow=resp_slow)
return eta, eta_scaled, eta_phase_deg, r2, eta_mag, latency, Q
@set_action()
def plot_eta_fit(self, freq, resp, eta=None, eta_mag=None, peak_freq=None,
eta_phase_deg=None, r2=None, save_plot=True, plotname_append='',
show_plot=False, timestamp=None, res_num=None, band=None,
sk_fit=None, f_slow=None, resp_slow=None, channel=None):
"""
Plots the eta parameter fits. This is called by self.eta_fit or
plot_tune_summary.
Args
----
freq : float array
The frequency data.
resp : complex array
The response data.
eta : complex or None, optional, default None
The eta parameter.
eta_mag : complex or None, optional, default None
The amplitude of the eta parameter.
eta_phase_deg : float or None, optional, default None
The angle of the eta parameter in degrees.
r2 : float or None, optional, default None
The R^2 value.
save_plot : bool, optional, default True
Whether to save the plot.
plotname_append : str, optional, default ''
Appended to the default plot filename.
timestamp : str or None, optional, default None
The timestamp to name the file.
res_num : int or None, optional, default None
The resonator number to label the plot.
band : int or None, optional, default None
The band number to label the plot.
sk_fit : float array or None, optional, default None
The fit parameters for the skewed lorentzian.
"""
if timestamp is None:
timestamp = self.get_timestamp()
if show_plot:
plt.ion()
else:
plt.ioff()
I = np.real(resp)
Q = np.imag(resp)
amp = np.sqrt(I**2 + Q**2)
phase = np.unwrap(np.arctan2(Q, I)) # radians
if peak_freq is not None:
plot_freq = freq - peak_freq
else:
plot_freq = freq
plot_freq = plot_freq * 1.0E3
fig = plt.figure(figsize=(9,4.5))
gs=GridSpec(2,3)
ax0 = fig.add_subplot(gs[0,0])
ax1 = fig.add_subplot(gs[1,0], sharex=ax0)
ax2 = fig.add_subplot(gs[:,1:])
ax0.plot(plot_freq, I, label='I', linestyle=':', color='k')
ax0.plot(plot_freq, Q, label='Q', linestyle='--', color='k')
ax0.scatter(plot_freq, amp, c=np.arange(len(freq)), s=3,
label='amp')
zero_idx = np.ravel(np.where(plot_freq == 0))[0]
ax0.plot(plot_freq[zero_idx], amp[zero_idx], 'x', color='r')
if | |
form=BlobEditForm(request.POST)
#
metadata = DMetadata.load_by_uuid(uuid=UUID, coldoc=coldoc)
env = metadata.environ
from ColDoc.utils import tree_environ_helper
teh = tree_environ_helper(blobs_dir = blobs_dir)
## https://docs.djangoproject.com/en/dev/topics/forms/
a = teh.list_allowed_choices(env)
form.fields['split_environment'].choices = a
if not a:
form.fields['split_environment'].required = False
#
if not form.is_valid():
a = "Invalid form: "+repr(form.errors)
if 'save_no_reload' in request.POST:
return JsonResponse({"message":a})
return HttpResponse(a,status=http.HTTPStatus.BAD_REQUEST)
prologue = form.cleaned_data['prologue']
# convert to UNIX line ending
blobcontent = re.sub("\r\n", '\n', form.cleaned_data['blobcontent'] )
blobeditarea = re.sub("\r\n", '\n', form.cleaned_data['BlobEditTextarea'] )
uuid_ = form.cleaned_data['UUID']
nick_ = form.cleaned_data['NICK']
lang_ = form.cleaned_data['lang']
ext_ = form.cleaned_data['ext']
file_md5 = form.cleaned_data['file_md5']
split_selection_ = form.cleaned_data['split_selection']
split_environment_ = form.cleaned_data['split_environment']
selection_start_ = int(form.cleaned_data['selection_start'])
selection_end_ = int(form.cleaned_data['selection_end'])
split_add_beginend_ = form.cleaned_data['split_add_beginend']
assert UUID == uuid_ and NICK == nick_
#
filename, uuid, metadata, lang, ext = \
ColDoc.utils.choose_blob(uuid=UUID, blobs_dir = blobs_dir,
metadata = metadata,
ext = ext_, lang = lang_,
metadata_class=DMetadata, coldoc=NICK)
env = metadata.environ
#
request.user.associate_coldoc_blob_for_has_perm(metadata.coldoc, metadata)
can_change_blob = request.user.has_perm('UUID.change_blob')
can_add_blob = request.user.has_perm('ColDocApp.add_blob')
if not can_change_blob and not can_add_blob:
logger.error('Hacking attempt %r',request.META)
raise SuspiciousOperation("Permission denied")
#
if split_selection_ and not can_add_blob:
logger.error('Hacking attempt %r',request.META)
#messages.add_message(request,messages.WARNING,'No permission to split selection')
#split_selection_ = False
raise SuspiciousOperation("Permission denied (add_blob)")
#
real_file_md5 = hashlib.md5(open(filename,'rb').read()).hexdigest()
if file_md5 != real_file_md5 and 'compile' in request.POST:
a = "The file was changed on disk: compile aborted"
messages.add_message(request,messages.ERROR, a)
return redirect(django.urls.reverse('UUID:index', kwargs={'NICK':NICK,'UUID':UUID}) + '?lang=%s&ext=%s'%(lang_,ext_) + '#blob')
# put back prologue in place
blobcontent, newprologue, sources , weird_prologue = _put_back_prologue(prologue, blobeditarea, env, UUID)
form.cleaned_data['blobcontent'] = blobcontent
# some checks
try:
a = json.loads(prologue)
shortprologue, prologue = a
except:
shortprologue = None
logger.exception('cannot json decode %r', prologue)
weird_prologue.append('Internal JSON error')
if split_selection_:
if shortprologue is None:
weird_prologue.append('Cannot split material when there are internal errors')
split_selection_ = False
elif weird_prologue:
weird_prologue.append('Cannot split material when there are header errors')
split_selection_ = False
elif shortprologue and not blobeditarea.startswith(shortprologue + '\n'):
weird_prologue.append('Sorry, cannot split material when the first line was changed')
split_selection_ = False
else:
displacement = len(prologue) - len(shortprologue)
selection_start_ = max(selection_start_ + displacement, 0)
selection_end_ = max(selection_end_ + displacement, selection_end_)
#
for wp in weird_prologue:
logger.warning(' in %r %s', UUID, wp)
# save state of edit form
if can_change_blob:
user_id = str(request.user.id)
file_editstate = filename[:-4] + '_' + user_id + '_editstate.json'
json.dump(form.cleaned_data, open(file_editstate,'w'))
#
a = '' if ( file_md5 == real_file_md5 ) else "The file was changed on disk: check the diff"
if 'save_no_reload' in request.POST:
H = difflib.HtmlDiff()
blobdiff = H.make_table(open(filename).readlines(),
blobcontent.split('\n'),
'Orig','New', True)
for wp in weird_prologue:
a += '\n' + wp
return JsonResponse({"message":a, 'blobdiff':blobdiff, 'blob_md5': real_file_md5})
for wp in weird_prologue:
messages.add_message(request,messages.WARNING, wp)
if 'save' in request.POST:
messages.add_message(request,messages.INFO,'Saved')
if a:
messages.add_message(request,messages.WARNING, a)
return redirect(django.urls.reverse('UUID:index', kwargs={'NICK':NICK,'UUID':UUID}) + '?lang=%s&ext=%s'%(lang_,ext_) + '#blob')
# diff
file_lines_before = open(filename).readlines()
shutil.copy(filename, filename+'~~')
#
uuid_as_html = '<a href="%s">%s</a>' %(
request.build_absolute_uri(django.urls.reverse('UUID:index', kwargs={'NICK':coldoc.nickname,'UUID':uuid})), uuid)
# write new content
if can_change_blob:
open(filename,'w').write(blobcontent)
metadata.blob_modification_time_update()
if sources is not None:
metadata.optarg = json.dumps(sources)
metadata.save()
else:
pass # may want to check that form was not changed...
# TODO we should have two copies, for html and for text form of each message
all_messages = []
#
from ColDoc.latex import environments_we_wont_latex
from ColDoc.utils import reparse_blob
#
if split_selection_:
from helper import add_blob
addsuccess, addmessage, addnew_uuid = \
add_blob(logger, request.user, settings.COLDOC_SITE_ROOT, nick_, uuid_,
split_environment_, lang_, selection_start_ , selection_end_, split_add_beginend_)
if addsuccess:
new_uuid_as_html = '<a href="%s">%s</a>' %(
request.build_absolute_uri(django.urls.reverse('UUID:index', kwargs={'NICK':coldoc.nickname,'UUID':addnew_uuid})),
addnew_uuid)
addmessage = ("Created blob with UUID %s, please edit %s to properly input it (a stub \\input was inserted for your convenience)"%
(new_uuid_as_html, uuid_as_html))
messages.add_message(request,messages.INFO,addmessage)
addmetadata = DMetadata.load_by_uuid(uuid=addnew_uuid,coldoc=coldoc)
add_extension = addmetadata.get('extension')
else:
add_extension = []
messages.add_message(request,messages.WARNING,addmessage)
all_messages.append(addmessage)
if '.tex' in add_extension:
addfilename, adduuid, addmetadata, addlang, addext = \
ColDoc.utils.choose_blob(uuid=addnew_uuid, blobs_dir = blobs_dir,
ext = ext_, lang = lang_,
metadata_class=DMetadata, coldoc=NICK)
# parse it for metadata
def warn(msg):
all_messages.append('Metadata change in new blob: '+msg)
messages.add_message(request,messages.INFO,'In new blob: '+msg)
reparse_blob(addfilename, addmetadata, blobs_dir, warn)
# compile it
if split_environment_ not in environments_we_wont_latex:
rh, rp = _latex_blob(request, coldoc_dir, blobs_dir, coldoc, lang, addmetadata)
if rh and rp:
a = 'Compilation of new blob succeded'
messages.add_message(request,messages.INFO,a)
else:
a = 'Compilation of new blob failed'
messages.add_message(request,messages.WARNING,a)
all_messages.append(a)
#
# parse it to refresh metadata (after splitting)
def warn(msg):
all_messages.append('Metadata change in blob: '+msg)
messages.add_message(request,messages.INFO,msg)
reparse_blob(filename, metadata, blobs_dir, warn)
#
if ext_ == '.tex' and metadata.environ not in environments_we_wont_latex:
rh, rp = _latex_blob(request, coldoc_dir, blobs_dir, coldoc, lang, metadata)
if rh and rp:
a = 'Compilation of LaTeX succeded'
messages.add_message(request,messages.INFO,a)
else:
a = 'Compilation of LaTeX failed'
messages.add_message(request,messages.WARNING,a)
all_messages.append(a)
logger.info('ip=%r user=%r coldoc=%r uuid=%r ',
request.META.get('REMOTE_ADDR'), request.user.username, NICK, UUID)
email_to = _interested_emails(coldoc,metadata)
#
if not email_to:
logger.warning('No author has a validated email %r', metadata)
else:
a = "User '%s' changed %s - %s" % (request.user , metadata.coldoc.nickname, metadata.uuid)
r = get_email_for_user(request.user)
if r is not None: r = [r]
E = EmailMultiAlternatives(subject = a,
from_email = settings.DEFAULT_FROM_EMAIL,
to= email_to,
reply_to = r)
# html version
H = difflib.HtmlDiff()
file_lines_after = open(filename).readlines()
blobdiff = H.make_file(file_lines_before,
file_lines_after,
'Orig','New', True)
try:
j = blobdiff.index('<body>') + 6
blobdiff = blobdiff[:j] + '<ul><li>' + '\n<li>'.join(all_messages) + \
'</ul>\n<h1>File differences for ' + uuid_as_html + '</h1>\n' + blobdiff[j:]
except:
logger.exception('While preparing ')
else:
E.attach_alternative(blobdiff, 'text/html')
# text version
try:
all_messages = map(html2text, all_messages)
except:
logger.exception('While preparing ')
P = subprocess.run(['diff', '-u', filename + '~~', filename, ], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
check=False, universal_newlines=True )
message = '*) ' + '\n*) '.join(all_messages) + '\n\n*** File differences ***\n\n' + P.stdout
E.attach_alternative(message, 'text/plain')
# send it
try:
E.send()
except:
logger.exception('email failed')
# re-save form data, to account for possible splitting
if can_change_blob:
D = {}
D['file_md5'] = hashlib.md5(open(filename,'rb').read()).hexdigest()
D['blobcontent'] = open(filename).read()
D['split_selection'] = False
D['selection_start'] = str(selection_start_)
D['selection_end'] = str(selection_start_)
json.dump(D, open(file_editstate,'w'))
#
return redirect(django.urls.reverse('UUID:index', kwargs={'NICK':NICK,'UUID':UUID}) + '?lang=%s&ext=%s'%(lang_,ext_) + '#blob')
def postmetadataedit(request, NICK, UUID):
if request.method != 'POST' :
return redirect(django.urls.reverse('UUID:index', kwargs={'NICK':NICK,'UUID':UUID}))
#
coldoc, coldoc_dir, blobs_dir = common_checks(request, NICK, UUID)
#
from ColDoc.utils import tree_environ_helper
teh = tree_environ_helper(blobs_dir = blobs_dir)
#
uuid, uuid_dir, metadata = ColDoc.utils.resolve_uuid(uuid=UUID, uuid_dir=None,
blobs_dir = blobs_dir, coldoc = NICK,
metadata_class=DMetadata)
#
request.user.associate_coldoc_blob_for_has_perm(metadata.coldoc, metadata)
if not request.user.has_perm('UUID.change_dmetadata'):
logger.error('Hacking attempt',request.META)
raise SuspiciousOperation("Permission denied")
#
baF = metadata.backup_filename()
before = open(baF).readlines()
shutil.copy(baF, baF+'~~')
#
form=MetadataForm(request.POST, instance=metadata)
#
j = metadata.get('parent_uuid')
if j:
parent_metadata = DMetadata.load_by_uuid(j[0], metadata.coldoc)
choices = teh.list_allowed_choices(parent_metadata.environ)
else:
choices = [('main_file','main_file',)] # == teh.list_allowed_choices(False)
# useless
form.fields['environ'].choices = choices
# useful
form.fields['environ'].widget.choices = choices
#
if not form.is_valid():
return HttpResponse("Invalid form: "+repr(form.errors),status=http.HTTPStatus.BAD_REQUEST)
#
uuid_ = form.cleaned_data['uuid_']
lang_ = form.cleaned_data['lang_']
ext_ = form.cleaned_data['ext_']
environ_ = form.cleaned_data['environ']
if uuid != uuid_ :
logger.error('Hacking attempt %r',request.META)
raise SuspiciousOperation('UUID Mismatch')
if ext_ not in metadata.get('extension') :
messages.add_message(request,messages.WARNING,'Internal problem, check the metadata again %r != %r' %([ext_], metadata.extension))
#
form.save()
messages.add_message(request,messages.INFO,'Changes saved')
#
from ColDoc.latex import environments_we_wont_latex
if ext_ =='.tex' and metadata.environ not in environments_we_wont_latex:
ret = _latex_uuid(request, coldoc_dir, blobs_dir, coldoc, uuid, metadata)
if ret :
messages.add_message(request,messages.INFO,'Compilation of LaTeX succeded')
else:
messages.add_message(request,messages.WARNING,'Compilation of LaTeX failed')
logger.info('ip=%r user=%r coldoc=%r uuid=%r ',
request.META.get('REMOTE_ADDR'), request.user.username, NICK, UUID)
#
email_to = _interested_emails(coldoc,metadata)
if not email_to:
logger.warning('No author has a validated email %r', metadata)
else:
a = "User '%s' changed metadata in %s - %s" % (request.user , metadata.coldoc.nickname, metadata.uuid)
r = get_email_for_user(request.user)
P = subprocess.run(['diff', '-u', baF+'~~', baF, ], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
check=False, universal_newlines=True )
message = P.stdout
after = open(metadata.backup_filename()).readlines()
H = difflib.HtmlDiff()
html_message = H.make_file(before, after, 'Orig','New', True)
if r is not None: r = [r]
E = EmailMultiAlternatives(subject = a,
from_email = settings.DEFAULT_FROM_EMAIL,
to= email_to, reply_to = r)
E.attach_alternative(message, 'text/plain')
E.attach_alternative(html_message, 'text/html')
try:
E.send()
except:
logger.exception('email failed')
#
return redirect(django.urls.reverse('UUID:index', kwargs={'NICK':NICK,'UUID':UUID}) + '?lang=%s&ext=%s'%(lang_,ext_))
def _prepare_latex_options(request, coldoc_dir, blobs_dir, coldoc):
from ColDoc.latex import prepare_options_for_latex
options = prepare_options_for_latex(coldoc_dir, blobs_dir, DMetadata, coldoc)
#
url = django.urls.reverse('UUID:index', kwargs={'NICK':coldoc.nickname,'UUID':'000'})[:-4]
url = request.build_absolute_uri(url)
# used for PDF
options['url_UUID'] = url
#
from ColDocDjango.transform import squash_helper_ref
def foobar(*v, **k):
" helper factory"
return squash_helper_ref(coldoc, *v, **k)
options["squash_helper"] = foobar
options['metadata_class'] = DMetadata
# used by the deduping mechanism
options['coldoc_site_root'] = settings.COLDOC_SITE_ROOT
options['dedup_root'] = settings.DEDUP_ROOT
options['dedup_url'] = settings.DEDUP_URL
return options
def _latex_blob(request, coldoc_dir, blobs_dir, coldoc, lang, metadata):
options = _prepare_latex_options(request, coldoc_dir, blobs_dir, coldoc)
from ColDoc import latex
return | |
from lib.nodes import RootNode, TextNode, PartialNode, SectionNode, Node
from lib.misc import TYPES, type_of, evalf, format_value, is_array
from lib.directives import DIRECTIVES, SYMBOLS
from lib.template import Template
from lib.domain import Domain
import json
DEFAULT = {
"error_on_func_failure": False,
"eval_zero_as_true": False,
"escape_all": False,
"error_on_missing_tags": False
}
class Result:
def __init__(self):
self.fullkey = ""
self.isdynamic = False
self.isrepeating = False
self.length = 0
self.node = None
self.value = None
self.func = None
def __len__(self):
return self.length
def get_domain(self):
if self.isrepeating:
if self.func:
return self.node.dynamic.create(self.func.fullkey, self.value)
return self.node
if not self.isdynamic:
return self.node
return self.node.dynamic.create(self.func.fullkey, self.value)
class Interface:
def __init__(self, template, options=None):
self._parse_options(options)
self._template = template
self._root = None
self._partials = {}
self._options = {}
self._errorHandler = None
self._spawn_error_handler = None
@property
def error_on_func_failure(self):
return self._error_on_func_failure
@property
def eval_zero_as_true(self):
return self._eval_zero_as_true
@property
def escape_all(self):
return self._escape_all
@property
def error_on_missing_tags(self):
return self._error_on_missing_tags
@error_on_func_failure.setter
def error_on_func_failure(self, to):
if to is not None:
self._error_on_func_failure = bool(to)
@eval_zero_as_true.setter
def eval_zero_as_true(self, to):
if to is not None:
self._eval_zero_as_true = bool(to)
@escape_all.setter
def escape_all(self, to):
if to is not None:
self._escape_all = bool(to)
@error_on_missing_tags.setter
def error_on_missing_tags(self, to):
if to is not None:
self._error_on_missing_tags = bool(to)
def _parse_options(self, options=None):
if not options:
options = {}
self.error_on_func_failure = options["error_on_func_failure"] if "error_on_func_failure" in options \
else DEFAULT["error_on_func_failure"]
self.eval_zero_as_true = options["eval_zero_as_true" ] if "eval_zero_as_true" in options \
else DEFAULT["eval_zero_as_true"]
self.escape_all = options["escape_all"] if "escape_all" in options \
else DEFAULT["escape_all"]
self.error_on_missing_tags = options["error_on_missing_tags"] if "error_on_missing_tags" in options \
else DEFAULT["error_on_missing_tags"]
# snapshot options
self._options = {
"error_on_func_failure": self.error_on_func_failure,
"eval_zero_as_true": self.eval_zero_as_true,
"escape_all": self.escape_all,
"error_on_missing_tags": self.error_on_missing_tags
}
def _missing_handler(self, key, throw_error=False):
if throw_error or self._error_on_missing_tags:
raise Exception("Render error: missing binding for {0}".format(key))
return ""
def _error_handler_inner(self, key, exception):
if self.error_on_func_failure:
raise exception
print("Error evaluating bindings at {0}".format(key))
print(exception)
return ""
def render(self, bindings, options=None):
self._parse_options(options)
if self.error_on_func_failure:
self._spawn_error_handler = lambda key : lambda exception : self._error_handler_inner(key, exception)
try:
if isinstance(bindings, Domain):
self._root = bindings.reroot()
else:
self._root = Domain(bindings)
# map partials
self._partials = {}
if options and "partials" in options and options["partials"]:
self._partials = options["partials"]
for pkey, partial in self._partials.items():
if isinstance(partial, str):
try:
self._partials[pkey] = Template(partial)
except Exception as e:
"Invalid partial template for '{0}'".format(pkey)
raise e
elif not isinstance(partial, Template):
raise Exception("Invalid partial: must be instance of Template or template string ('{0}' is {1})".format(pkey, type(partial)))
return self._render_inside_out(self._render_outside_in(self._template.root))
finally:
# clean up references and temporary variables
self._root = None
self._partials = {}
self._spawn_error_handler = None
def _process_context(self, node, domain, dynamics=None):
on_func_error = self._spawn_error_handler(node.raw) if self._spawn_error_handler else None
def search(snode):
if not snode.incontext and dynamics and len(dynamics):
for dy in reversed(dynamics):
if dy.incontext(snode.key):
return dy.search(snode, on_func_error)
return domain.search(snode, on_func_error)
result = Result()
# get domain of node
result.node = search(node)
if not result.node:
return None
if not node.func:
result.value = result.node.value(on_func_error)
result.isdynamic = result.isrepeating = result.node.isrepeating
result.length = len(result.node.dynamic)
else:
# get domain of function
result.func = search(node.func)
result.isdynamic = True
if not result.func:
raise Exception("Context passed to unresolved function at {0}".format(node.raw))
if not result.func.function:
raise Exception("Context passed to non-function at {0}".format(node.raw))
result.value = evalf(
result.func.function,
result.node.value(on_func_error),
self._root.data,
on_func_error
)
if is_array(result.value):
result.isrepeating = True
result.length = len(result.value)
return result
def _render_outside_in(self, root, domain=None, processed=None, unresolved=None):
domain = domain if domain else self._root
processed = processed if processed else RootNode()
unresolved = unresolved if unresolved else []
for node in root.inner:
# skip comments (shouldn't exist but just in case)
if node.directive == DIRECTIVES.COMMENT:
continue
# text doesn't need processing
if isinstance(node, TextNode):
processed.inner.append(node)
continue
# render partial as sub-render with passed data domain and duplicate options
if isinstance(node, PartialNode):
processed.inner.append(self._partial(node, domain))
continue
# handling nodes in an unresolved context, some exceptions for sections and lists
if domain.isrepeating and (node.func and node.func.incontext or node.incontext):
processed.inner.append(node)
continue
for_section = isinstance(node, SectionNode)
check_node = not for_section and node.directive != DIRECTIVES.LIST
if len(unresolved) and check_node or node.func:
cant_resolve = False
for u in unresolved:
if check_node and u.incontext(node.key):
cant_resolve = True
elif node.func and u.incontext(node.func.key):
cant_resolve = True
if cant_resolve:
processed.inner.append(node)
continue
# get data context -- if null, likely due to nesting into dynamic data, so defer processing
context = self._process_context(node, domain)
if context is None:
processed.inner.append(node)
continue
# render sections (handler split out, but basically recurses here)
if for_section:
self._section(node, context, processed, unresolved)
continue
# render straight values unless it depends on dynamic context (those defer till 2nd round)
processed.inner.append(self._render_value(node, context.value))
return processed
def _render_inside_out(self, root, domain=None, dynamics=None):
domain = domain if domain else self._root
dynamics = dynamics if dynamics else []
processed = []
for node in root.inner:
# only handle sections for this first outside-in loop
if not isinstance(node, SectionNode):
processed.append(node)
continue
# get context, missing here is either skip or exception thrown
context = self._process_context(node, domain, dynamics)
if context is None:
self._missing_handler(node.raw)
continue
# convert to dynamic domain, if necessary
use_domain = context.get_domain()
# standard section bound to context within a dynamic data domain
if not context.isrepeating:
if self._display(node.inclusive, use_domain):
processed.append(self._render_inside_out(node, use_domain, dynamics))
continue
# only thing left is repeating sections
pieces = []
for i in range(context.length):
dydom = use_domain.dynamic.get(i)
dynamics.append(dydom)
if self._display(True, dydom):
pieces.append(self._render_inside_out(node, dydom, dynamics))
dynamics.pop(-1)
# either just add nodes to processed or convert to grammatic list
if not node.list:
processed += pieces
else:
plen = len(pieces)
if plen == 0:
pass
elif plen == 1:
processed.append(pieces[0])
elif plen == 2:
processed.append("{0} and {1}".format(pieces[0], pieces[1]))
else:
last = pieces.pop(-1)
processed.append("{0}, and {1}".format(", ".join(pieces), last))
# this part will run from inner-most out on all remaining nodes
text = ""
for node in processed:
if isinstance(node, TextNode):
text += node.text
elif isinstance(node, str):
text += node
elif not isinstance(node, Node):
text += str(node)
else:
context = self._process_context(node, domain, dynamics)
if context is None:
text += self._missing_handler(node.raw)
else:
text += self._render_value(node, context.value)
return text
def _section(self, node, context, processed, unresolved):
# Repeating sections recurse inner content to process any non-dynamic referencing tags, but also add
# node to processing array for final processing in inside-out rendering.
if context.isrepeating:
if node.inclusive and context.length:
# Copy section node and replace any in-context shortcuts with full path as it will be handled
# later, potentially out of context.
dynode = SectionNode(node, None)
if dynode.incontext:
dynode.key = context.node.fullkey
dynode.incontext = False
dynode._finish()
if dynode.func and dynode.func.incontext:
dynode.func.key = context.func.fullkey
dynode.func.incontext = False
dynode.func._finish()
domain = context.get_domain()
# Add to unresolved domains, recurse, pop unresolved domain, add to processing
unresolved.append(domain)
self._render_outside_in(node, domain, dynode, unresolved)
unresolved.pop(-1)
processed.inner.append(dynode)
# Standard sections simple recurse inner content to render. Only thing is checking for creation of
# dynamic data context first.
else:
domain = context.get_domain()
if self._display(node.inclusive, domain):
self._render_outside_in(node, domain, processed, unresolved)
def _display(self, inclusive, domain):
display = domain.value()
if domain.type == TYPES.OBJECT:
_display = domain.get("_display")
if _display is not None:
return _display.value()
elif domain.type == TYPES.ARRAY and not len(display):
# discrepancy from javascript where empty arrays are still truthy
display = True
else:
if isinstance(display, str):
display = display.strip()
elif isinstance(display, (int, float)):
display = display if display != 0 else self.eval_zero_as_true
return inclusive == bool(display)
def _partial(self, node, context):
if not self._partials[node.key]:
if self.error_on_missing_tags:
raise Exception("Render error: missing partial for {0}".format(node.key))
print("Render error: missing partial for {0}".format(node.key))
return ""
try:
return self._partials[node.key].render(
context if node.incontext else self._root,
self._options
)
except Exception as e:
print("Partial render error for {0}".format(node.key))
print(e)
return ""
def _render_value(self, node, value):
nformat = node.format
vtype = type_of(value)
if vtype <= TYPES.NULL:
return ""
# format list (unless not array, then normal handling)
if node.directive == DIRECTIVES.LIST and vtype == TYPES.ARRAY:
value = [
str(vi) if is_array(vi) else
format_value(vi, nformat, node.escape if node.escape else self.escape_all)
for vi in value
]
vlen = len(value)
if vlen == 0:
return ""
if vlen == 1:
return value[0]
if vlen == 2:
return "{0} and {1}".format(value[0], value[1])
else:
last = value.pop(-1)
return "{0}, and {1}".format(", ".join(value), last)
# other non-value types, convert to string
if vtype == | |
<reponame>Daymorn/StealthUO-Scripts
from __future__ import division
import datetime as _datetime
import struct as _struct
import time as _time
from os import linesep as _linesep
from ._datatypes import *
from ._protocol import EVENTS_NAMES as _EVENTS_NAMES
from ._protocol import ScriptMethod as _ScriptMethod
from ._protocol import get_connection as _get_connection
from .utils import ddt2pdt as _ddt2pdt
from .utils import pdt2ddt as _pdt2ddt
from .utils import iterable as _iterable
_clear_event_callback = _ScriptMethod(7) # ClearEventProc
_clear_event_callback.argtypes = [_ubyte] # EventIndex
_set_event_callback = _ScriptMethod(11) # SetEventProc
_set_event_callback.argtypes = [_ubyte] # EventIndex
def SetEventProc(EventName, Callback=None):
conn = _get_connection()
try:
index = _EVENTS_NAMES.index(EventName.lower())
except ValueError:
raise ValueError('Unknown event "' + EventName + '".')
# clear event
if Callback is None:
_clear_event_callback(index)
# conn.callbacks[index] = None
# set event
else:
if conn.callbacks[index] is None:
_set_event_callback(index)
conn.callbacks[index] = Callback
_connected = _ScriptMethod(9) # GetConnectedStatus
_connected.restype = _bool
def Connected():
return _connected()
_add_to_system_journal = _ScriptMethod(10) # AddToSystemJournal
_add_to_system_journal.argtypes = [_str] # Text
def AddToSystemJournal(*args, **kwargs):
sep = kwargs.pop('sep', ', ')
end = kwargs.pop('end', '')
s_args = sep.join((str(arg) for arg in args))
s_kwargs = sep.join((str(k) + '=' + str(v) for k, v in kwargs.items()))
text = s_args + (sep if s_args and s_kwargs else '') + s_kwargs + end
_add_to_system_journal(text)
_get_stealth_info = _ScriptMethod(12) # GetStealthInfo
_get_stealth_info.restype = _buffer # TAboutData
def GetStealthInfo():
data = _get_stealth_info()
result = dict()
result['StealthVersion'] = _struct.unpack('3H', data[:6])
result['Build'] = _struct.unpack('H', data[6:8])[0]
result['BuildDate'] = _ddt2pdt(_struct.unpack('d', data[8:16])[0])
result['GITRevNumber'] = _struct.unpack('H', data[16:18])[0]
result['GITRevision'] = _str.from_buffer(data[18:]).value
return result
_connect = _ScriptMethod(45) # Connect
def Connect():
_connect()
_disconnect = _ScriptMethod(46) # Disconnect
def Disconnect():
_disconnect()
_set_pause_on_disc = _ScriptMethod(24) # SetPauseScriptOnDisconnectStatus
_set_pause_on_disc.argtypes = [_bool] # Value
def SetPauseScriptOnDisconnectStatus(Value):
_set_pause_on_disc(Value)
_get_pause_on_disc = _ScriptMethod(23) # GetPauseScriptOnDisconnectStatus
_get_pause_on_disc.restype = _bool
def GetPauseScriptOnDisconnectStatus():
return _get_pause_on_disc()
_set_reconnector = _ScriptMethod(22) # SetARStatus
_set_reconnector.argtypes = [_bool] # Value
def SetARStatus(Value):
_set_reconnector(Value)
_get_reconnector = _ScriptMethod(21) # GetARStatus
_get_reconnector.restype = _bool
def GetARStatus():
return _get_reconnector()
_get_self_name = _ScriptMethod(19) # GetCharName
_get_self_name.restype = _str
def CharName():
return _get_self_name()
_change_profile = _ScriptMethod(20) # ChangeProfile
_change_profile.restype = _int
_change_profile.argtypes = [_str] # PName
def ChangeProfile(PName):
return _change_profile(PName)
_change_profile_ex = _ScriptMethod(352) # ChangeProfileEx
_change_profile_ex.restype = _int
_change_profile_ex.argtypes = [_str, # PName
_str, # ShardName
_str] # CharName
def ChangeProfileEx(PName, ShardName, CharName):
return _change_profile_ex(PName, ShardName, CharName)
_get_profile_name = _ScriptMethod(8) # ProfileName
_get_profile_name.restype = _str
def ProfileName():
return _get_profile_name()
_get_self_id = _ScriptMethod(14) # GetSelfID
_get_self_id.restype = _uint
def Self():
return _get_self_id()
_get_self_sex = _ScriptMethod(25) # GetSelfSex
_get_self_sex.restype = _ubyte
def Sex():
return _get_self_sex()
_get_char_title = _ScriptMethod(26) # GetCharTitle
_get_char_title.restype = _str
def GetCharTitle():
return _get_char_title()
_get_gold_count = _ScriptMethod(27) # GetSelfGold
_get_gold_count.restype = _ushort
def Gold():
return _get_gold_count()
_get_armor_points = _ScriptMethod(28) # GetSelfArmor
_get_armor_points.restype = _ushort
def Armor():
return _get_armor_points()
_get_weight = _ScriptMethod(29) # GetSelfWeight
_get_weight.restype = _ushort
def Weight():
return _get_weight()
_get_max_weight = _ScriptMethod(30) # GetSelfMaxWeight
_get_max_weight.restype = _ushort
def MaxWeight():
return _get_max_weight()
_get_world_number = _ScriptMethod(18) # GetWorldNum
_get_world_number.restype = _ubyte
def WorldNum():
return _get_world_number()
_get_self_race = _ScriptMethod(31) # GetSelfRace
_get_self_race.restype = _ubyte
def Race():
return _get_self_race()
_get_max_pets = _ScriptMethod(32) # GetSelfPetsMax
_get_max_pets.restype = _ubyte
def MaxPets():
return _get_max_pets()
_get_pets_count = _ScriptMethod(33) # GetSelfPetsCurrent
_get_pets_count.restype = _ubyte
def PetsCurrent():
return _get_pets_count()
_get_fire_resist = _ScriptMethod(34) # GetSelfFireResist
_get_fire_resist.restype = _ushort
def FireResist():
return _get_fire_resist()
_get_cold_resist = _ScriptMethod(35) # GetSelfColdResist
_get_cold_resist.restype = _ushort
def ColdResist():
return _get_cold_resist()
_get_poison_resist = _ScriptMethod(36) # GetSelfPoisonResist
_get_poison_resist.restype = _ushort
def PoisonResist():
return _get_poison_resist()
_get_energy_resist = _ScriptMethod(37) # GetSelfEnergyResist
_get_energy_resist.restype = _ushort
def EnergyResist():
return _get_energy_resist()
_get_last_connection_time = _ScriptMethod(38) # GetConnectedTime
_get_last_connection_time.restype = _double
def ConnectedTime():
return _ddt2pdt(_get_last_connection_time())
_get_last_disconnection_time = _ScriptMethod(39) # GetDisconnectedTime
_get_last_disconnection_time.restype = _double
def DisconnectedTime():
return _ddt2pdt(_get_last_disconnection_time())
_get_last_opened_container = _ScriptMethod(40) # GetLastContainer
_get_last_opened_container.restype = _uint
def LastContainer():
return _get_last_opened_container()
_get_last_targeted_object = _ScriptMethod(41) # GetLastTarget
_get_last_targeted_object.restype = _uint
def LastTarget():
return _get_last_targeted_object()
_get_last_attacked_object = _ScriptMethod(42) # GetLastAttack
_get_last_attacked_object.restype = _uint
def LastAttack():
return _get_last_attacked_object()
_get_last_status = _ScriptMethod(43) # GetLastStatus
_get_last_status.restype = _uint
def LastStatus():
return _get_last_status()
_get_last_used_object = _ScriptMethod(44) # GetLastObject
_get_last_used_object.restype = _uint
def LastObject():
return _get_last_used_object()
_get_buff_bar_info = _ScriptMethod(349) # GetBuffBarInfo
_get_buff_bar_info.restype = _buffer # TBuffBarInfo
def GetBuffBarInfo():
result = []
fmt = '=HdHII'
size = _struct.calcsize(fmt)
keys = ('Attribute_ID', 'TimeStart', 'Seconds', 'ClilocID1', 'ClilocID2')
data = _get_buff_bar_info()
if b'' == '': # py2
data = bytes(data)
count = _struct.unpack('B', data[:1])[0]
data = data[1:]
for i in range(count):
values = _struct.unpack(fmt, data[i * size:i * size + size])
buff = dict(zip(keys, values))
buff['TimeStart'] = _ddt2pdt(buff['TimeStart'])
result.append(buff)
return result
_get_shard_name = _ScriptMethod(47) # GetShardName
_get_shard_name.restype = _str
def ShardName():
return _get_shard_name()
_get_profile_shard_name = _ScriptMethod(343) # GetProfileShardName
_get_profile_shard_name.restype = _str
def ProfileShardName():
return _get_profile_shard_name()
_get_proxy_ip = _ScriptMethod(60) # GetProxyIP
_get_proxy_ip.restype = _str
def ProxyIP():
return _get_proxy_ip()
_get_proxy_port = _ScriptMethod(61) # GetProxyPort
_get_proxy_port.restype = _ushort
def ProxyPort():
return _get_proxy_port()
_is_proxy_using = _ScriptMethod(62) # GetUseProxy
_is_proxy_using.restype = _bool
def UseProxy():
return _is_proxy_using()
_get_backpack_id = _ScriptMethod(48) # GetBackpackID
_get_backpack_id.restype = _uint
def Backpack():
return _get_backpack_id()
def Ground():
return 0
_get_char_strength = _ScriptMethod(49) # GetSelfStr
_get_char_strength.restype = _int
def Str():
return _get_char_strength()
_get_char_intelligence = _ScriptMethod(50) # GetSelfInt
_get_char_intelligence.restype = _int
def Int():
return _get_char_intelligence()
_get_char_dexterity = _ScriptMethod(51) # GetSelfDex
_get_char_dexterity.restype = _int
def Dex():
return _get_char_dexterity()
_get_char_hp = _ScriptMethod(52) # GetSelfLife
_get_char_hp.restype = _int
def Life():
return _get_char_hp()
def HP():
return _get_char_hp()
_get_char_mana = _ScriptMethod(53) # GetSelfMana
_get_char_mana.restype = _int
def Mana():
return _get_char_mana()
_get_char_stamina = _ScriptMethod(54) # GetSelfStam
_get_char_stamina.restype = _int
def Stam():
return _get_char_stamina()
_get_char_max_hp = _ScriptMethod(55) # GetSelfMaxLife
_get_char_max_hp.restype = _int
def MaxLife():
return _get_char_max_hp()
def MaxHP():
return _get_char_max_hp()
_get_char_max_mana = _ScriptMethod(56) # GetSelfMaxMana
_get_char_max_mana.restype = _int
def MaxMana():
return _get_char_max_mana()
_get_char_max_stamina = _ScriptMethod(57) # GetMaxStam
_get_char_max_stamina.restype = _int
def MaxStam():
return _get_char_max_stamina()
_get_char_luck = _ScriptMethod(58) # GetSelfLuck
_get_char_luck.restype = _int
def Luck():
return _get_char_luck()
_get_extended_info = _ScriptMethod(59) # GetExtInfo
_get_extended_info.restype = _buffer # TExtendedInfo
def GetExtInfo():
keys = ('MaxWeight', 'Race', 'StatCap', 'PetsCurrent', 'PetsMax',
'FireResist', 'ColdResist', 'PoisonResist', 'EnergyResist',
'Luck', 'DamageMin', 'DamageMax', 'Tithing_points',
'ArmorMax', 'fireresistMax', 'coldresistMax',
'poisonresistMax', 'energyresistMax', 'DefenseChance',
'DefensceChanceMax', 'Hit_Chance_Incr', 'Damage_Incr',
'Swing_Speed_Incr', 'Lower_Reagent_Cost', 'Spell_Damage_Incr',
'Faster_Cast_Recovery', 'Faster_Casting', 'Lower_Mana_Cost',
'HP_Regen', 'Stam_Regen', 'Mana_Regen', 'Reflect_Phys_Damage',
'Enhance_Potions', 'Strength_Incr', 'Dex_Incr', 'Int_Incr',
'HP_Incr', 'Mana_Incr')
fmt = '=HBH2B4Hh2Hi26H'
data = _get_extended_info()
if b'' == '': # py2
data = bytes(data)
values = _struct.unpack(fmt, data)
return dict(zip(keys, values))
_is_hidden = _ScriptMethod(63) # GetHiddenStatus
_is_hidden.restype = _bool
def Hidden():
return _is_hidden()
_is_poisoned = _ScriptMethod(64) # GetPoisonedStatus
_is_poisoned.restype = _bool
def Poisoned():
return _is_poisoned()
_is_paralyzed = _ScriptMethod(65) # GetParalyzedStatus
_is_paralyzed.restype = _bool
def Paralyzed():
return _is_paralyzed()
_is_dead = _ScriptMethod(66) # GetDeadStatus
_is_dead.restype = _bool
def Dead():
return _is_dead()
_get_warmode = _ScriptMethod(171) # IsWarMode
_get_warmode.restype = _bool
_get_warmode.argtypes = [_uint] # ObjID
def WarMode():
return _get_warmode(Self())
_get_war_target = _ScriptMethod(67) # GetWarTargetID
_get_war_target.restype = _uint
def WarTargetID():
return _get_war_target()
_set_warmode = _ScriptMethod(68) # SetWarMode
_set_warmode.argtypes = [_bool] # Value
def SetWarMode(Value):
_set_warmode(Value)
_attack = _ScriptMethod(69) # Attack
_attack.argtypes = [_uint] # AttackedID
def Attack(AttackedID):
_attack(AttackedID)
_use_self_paperdoll = _ScriptMethod(70) # UseSelfPaperdollScroll
def UseSelfPaperdollScroll():
_use_self_paperdoll()
_use_paperdoll = _ScriptMethod(71) # UseOtherPaperdollScroll
_use_paperdoll.argtypes = [_uint] # ID
def UseOtherPaperdollScroll(ID):
_use_paperdoll(ID)
_target_id = _ScriptMethod(72) # GetTargetID
_target_id.restype = _uint
def TargetID():
return _target_id()
def TargetPresent(): # GetTargetStatus
return bool(_target_id())
def WaitForTarget(MaxWaitTimeMS):
time = _time.time()
while not _target_id() and time + MaxWaitTimeMS / 1000 > _time.time():
Wait(10)
return time + MaxWaitTimeMS / 1000 > _time.time()
_cancel_target = _ScriptMethod(73) # CancelTarget
def CancelTarget():
_cancel_target()
while _target_id():
Wait(10)
_target_to_object = _ScriptMethod(74) # TargetToObject
_target_to_object.argtypes = [_uint] # ObjectID
def TargetToObject(ObjectID):
_target_to_object(ObjectID)
_target_xyz = _ScriptMethod(75) # TargetToXYZ
_target_xyz.argtypes = [_ushort, # X
_ushort, # Y
_byte] # Z
def TargetToXYZ(X, Y, Z):
_target_xyz(X, Y, Z)
_target_tile = _ScriptMethod(76) # TargetToTile
_target_tile.argtypes = [_ushort, # TileModel
_ushort, # X
_ushort, # Y
_byte] # Z
def TargetToTile(TileModel, X, Y, Z):
_target_tile(TileModel, X, Y, Z)
_wait_target_object = _ScriptMethod(77) # WaitTargetObject
_wait_target_object.argtypes = [_uint] # ObjID
def WaitTargetObject(ObjID):
_wait_target_object(ObjID)
_wait_target_tile = _ScriptMethod(78) # WaitTargetTile
_wait_target_tile.argtypes = [_ushort, # Tile
_ushort, # X
_ushort, # Y
_byte] # Z
def WaitTargetTile(Tile, X, Y, Z):
_wait_target_tile(Tile, X, Y, Z)
_wait_target_xyz = _ScriptMethod(79) # WaitTargetXYZ
_wait_target_xyz.argtypes = [_ushort, # X
_ushort, # Y
_byte] # Z
def WaitTargetXYZ(X, Y, Z):
_wait_target_xyz(X, Y, Z)
_wait_target_self = _ScriptMethod(80) # WaitTargetSelf
def WaitTargetSelf():
_wait_target_self()
_wait_target_graphic = _ScriptMethod(81) # WaitTargetType
_wait_target_graphic.argtypes = [_ushort] # ObjType
def WaitTargetType(ObjType):
_wait_target_graphic(ObjType)
_cancel_wait_target = _ScriptMethod(82) # CancelWaitTarget
def CancelWaitTarget():
_cancel_wait_target()
_wait_target_ground = _ScriptMethod(83) # WaitTargetGround
_wait_target_ground.argtypes = [_ushort] # ObjType
def WaitTargetGround(ObjType):
_wait_target_ground(ObjType)
_wait_target_last = _ScriptMethod(84) # WaitTargetLast
def WaitTargetLast():
_wait_target_last()
_wait = _ScriptMethod(0) # Wait
def Wait(WaitTimeMS):
end = _time.time() + WaitTimeMS / 1000
while _time.time() < end:
_wait() # pause script and event checks
_time.sleep(0.010)
else:
_wait() # condition does not work while delay is a very small number
_use_primary_ability = _ScriptMethod(85) # UsePrimaryAbility
def UsePrimaryAbility():
_use_primary_ability()
_use_secondary_ability = _ScriptMethod(86) # UseSecondaryAbility
def UseSecondaryAbility():
_use_secondary_ability()
_get_ability = _ScriptMethod(87) # GetAbility
_get_ability.restype = _str
def GetActiveAbility():
return _get_ability()
_toggle_fly = _ScriptMethod(88) # ToggleFly
def ToggleFly():
_toggle_fly()
_get_skill_id_from_socket = _ScriptMethod(89) # GetSkillID
_get_skill_id_from_socket.restype = _int # SkillID
_get_skill_id_from_socket.argtypes = [_str] # SkillName
def _get_skill_id(name):
skill_id = _get_skill_id_from_socket(name)
if skill_id < 0:
raise ValueError('Unknown skill name "' + name + '".')
return skill_id
_use_skill = _ScriptMethod(90) # UseSkill
_use_skill.argtypes = [_int] # SkillID
def UseSkill(SkillName):
_use_skill(_get_skill_id(SkillName))
return True
_lock_skill = _ScriptMethod(91) # ChangeSkillLockState
_lock_skill.argtypes = [_int, # SkillID
_ubyte] # SkillState
def ChangeSkillLockState(SkillName, skillState):
_lock_skill(_get_skill_id_from_socket(SkillName), skillState)
_get_skill_cap = _ScriptMethod(92) # GetSkillCap
_get_skill_cap.restype = _double
_get_skill_cap.argtypes = [_int] # SkillID
def GetSkillCap(SkillName):
return _get_skill_cap(_get_skill_id_from_socket(SkillName))
_get_skill_value = _ScriptMethod(93) # GetSkillValue
_get_skill_value.restype = _double
_get_skill_value.argtypes = [_int] # SkillID
def GetSkillValue(SkillName):
return _get_skill_value(_get_skill_id_from_socket(SkillName))
_get_skill_current_value = _ScriptMethod(351) # GetSkillCurrentValue
_get_skill_current_value.restype = _double
_get_skill_current_value.argtypes = [_int] # SkillID
def GetSkillCurrentValue(SkillName):
return _get_skill_current_value(_get_skill_id_from_socket(SkillName))
_request_virtues = _ScriptMethod(94) # ReqVirtuesGump
def ReqVirtuesGump():
_request_virtues()
_VIRTUES = {
'compassion': 0x69,
'honesty': 0x6A,
'honor': 0x6B,
'humility': 0x6C,
'justice': 0x6D,
'sacrifice': 0x6E,
'spirituality': 0x6F,
'valor': 0x70,
}
_use_virtue = _ScriptMethod(95) | |
at least one must be thermostated.
is_thermostated = False
if isinstance(integrator, openmm.CompoundIntegrator):
for integrator_id in range(integrator.getNumIntegrators()):
_integrator = integrator.getIntegrator(integrator_id)
is_thermostated = is_thermostated or self._is_integrator_thermostated(_integrator)
else:
is_thermostated = self._is_integrator_thermostated(integrator)
# If integrator is coupled to heat bath, remove system thermostat.
system = copy.deepcopy(self._system)
if is_thermostated:
self._remove_thermostat(system)
# Create platform.
if platform is None:
return openmm.Context(system, integrator)
else:
return openmm.Context(system, integrator, platform)
def apply_to_context(self, context):
"""Apply this ThermodynamicState to the context.
The method apply_to_context does *not* check for the compatibility
of the context. The user is responsible for this. Depending on the
system size, is_context_compatible can be an expensive operation,
so is_state_compatible should be preferred when possible.
Parameters
----------
context : simtk.openmm.Context
The OpenMM Context to be set to this ThermodynamicState.
Raises
------
ThermodynamicsError
If the context is in a different thermodynamic ensemble w.r.t.
this state. This is just a quick check which does not substitute
is_state_compatible or is_context_compatible.
See Also
--------
ThermodynamicState.is_state_compatible
ThermodynamicState.is_context_compatible
Examples
--------
The method doesn't verify compatibility with the context, it is
the user's responsibility to do so, possibly with is_state_compatible
rather than is_context_compatible which is slower.
>>> from simtk import openmm, unit
>>> from openmmtools import testsystems
>>> toluene = testsystems.TolueneVacuum()
>>> state1 = ThermodynamicState(toluene.system, 273.0*unit.kelvin)
>>> state2 = ThermodynamicState(toluene.system, 310.0*unit.kelvin)
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context = state1.create_context(integrator)
>>> if state2.is_state_compatible(state1):
... state2.apply_to_context(context)
>>> context.getParameter(openmm.AndersenThermostat.Temperature())
310.0
"""
system = context.getSystem()
# Apply pressure and temperature to barostat.
barostat = self._find_barostat(system)
if barostat is not None:
if self._barostat is None:
# The context is NPT but this is NVT.
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
has_changed = self._set_barostat_pressure(barostat, self.pressure)
if has_changed:
context.setParameter(barostat.Pressure(), self.pressure)
has_changed = self._set_barostat_temperature(barostat, self.temperature)
if has_changed:
# TODO remove try except when drop openmm7.0 support
try:
context.setParameter(barostat.Temperature(), self.temperature)
except AttributeError: # OpenMM < 7.1
openmm_state = context.getState(getPositions=True, getVelocities=True,
getParameters=True)
context.reinitialize()
context.setState(openmm_state)
elif self._barostat is not None:
# The context is NVT but this is NPT.
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
# Apply temperature to thermostat or integrator.
thermostat = self._find_thermostat(system)
if thermostat is not None:
if thermostat.getDefaultTemperature() != self.temperature:
thermostat.setDefaultTemperature(self.temperature)
context.setParameter(thermostat.Temperature(), self.temperature)
else:
integrator = context.getIntegrator()
self._set_integrator_temperature(integrator)
# -------------------------------------------------------------------------
# Internal-usage: system handling
# -------------------------------------------------------------------------
# Standard values are not standard in a physical sense, they are
# just consistent between ThermodynamicStates to make comparison
# of standard system hashes possible. We set this to round floats
# and use OpenMM units to avoid funniness due to precision errors
# caused by periodic binary representation/unit conversion.
_STANDARD_PRESSURE = 1.0*unit.bar
_STANDARD_TEMPERATURE = 273.0*unit.kelvin
_NONPERIODIC_NONBONDED_METHODS = {openmm.NonbondedForce.NoCutoff,
openmm.NonbondedForce.CutoffNonPeriodic}
def _check_internal_consistency(self):
"""Shortcut self._check_system_consistency(self._system)."""
self._check_system_consistency(self._system)
def _check_system_consistency(self, system):
"""Check system consistency with this ThermodynamicState.
Raise an error if the system is inconsistent. Currently checks
that there's 1 and only 1 thermostat at the correct temperature,
that there's only 1 barostat (or none in case this is in NVT),
that the barostat is supported, has the correct temperature and
pressure, and that it is not associated to a non-periodic system.
Parameters
----------
system : simtk.openmm.System
The system to test.
Raises
------
ThermodynamicsError
If the system is inconsistent with this state.
"""
TE = ThermodynamicsError # shortcut
# This raises MULTIPLE_THERMOSTATS
thermostat = self._find_thermostat(system)
# When system is self._system, we check the presence of a
# thermostat before the barostat to avoid crashes when
# checking the barostat temperature.
if thermostat is None:
raise TE(TE.NO_THERMOSTAT)
elif thermostat.getDefaultTemperature() != self.temperature:
raise TE(TE.INCONSISTENT_THERMOSTAT)
# This raises MULTIPLE_BAROSTATS and UNSUPPORTED_BAROSTAT.
barostat = self._find_barostat(system)
if barostat is not None:
if not self._is_barostat_consistent(barostat):
raise TE(TE.INCONSISTENT_BAROSTAT)
# Check that barostat is not added to non-periodic system. We
# cannot use System.usesPeriodicBoundaryConditions() because
# in OpenMM < 7.1 that returns True when a barostat is added.
# TODO just use usesPeriodicBoundaryConditions when drop openmm7.0
for force in system.getForces():
if isinstance(force, openmm.NonbondedForce):
nonbonded_method = force.getNonbondedMethod()
if nonbonded_method in self._NONPERIODIC_NONBONDED_METHODS:
raise TE(TE.BAROSTATED_NONPERIODIC)
elif self._barostat is not None:
raise TE(TE.NO_BAROSTAT)
@classmethod
def _standardize_system(cls, system):
"""Return a copy of the system in a standard representation.
This effectively defines which ThermodynamicStates are compatible
between each other. Compatible ThermodynamicStates have the same
standard systems, and is_state_compatible will return True if
the (cached) serialization of the standard systems are identical.
Here, the standard system has the barostat pressure/temperature
set to _STANDARD_PRESSURE/TEMPERATURE (if a barostat exist), and
the thermostat removed (if it is present). Removing the thermostat
means that systems that will enforce a temperature through an
integrator coupled to a heat bath will be compatible as well. The
method apply_to_context then sets the parameters in the Context.
Effectively this means that only same systems in the same ensemble
(NPT or NVT) are compatible between each other.
Parameters
----------
system : simtk.openmm.System
The system to standardize.
See Also
--------
ThermodynamicState.apply_to_context
ThermodynamicState.is_state_compatible
ThermodynamicState.is_context_compatible
"""
cls._remove_thermostat(system)
barostat = cls._find_barostat(system)
if barostat is not None:
barostat.setDefaultPressure(cls._STANDARD_PRESSURE)
cls._set_barostat_temperature(barostat, cls._STANDARD_TEMPERATURE)
@classmethod
def _get_standard_system_hash(cls, system):
"""Return the serialization hash of the standard system."""
standard_system = copy.deepcopy(system)
cls._standardize_system(standard_system)
system_serialization = openmm.XmlSerializer.serialize(standard_system)
return system_serialization.__hash__()
@property
def _standard_system_hash(self):
"""Shortcut for _get_standard_system_hash(self._system)."""
if self._cached_standard_system_hash is None:
self._cached_standard_system_hash = self._get_standard_system_hash(self._system)
return self._cached_standard_system_hash
# -------------------------------------------------------------------------
# Internal-usage: integrator handling
# -------------------------------------------------------------------------
def _is_integrator_thermostated(self, integrator):
"""True if integrator is coupled to a heat bath.
Raises
------
ThermodynamicsError
If integrator is couple to a heat bath at a different
temperature than this thermodynamic state.
"""
is_thermostated = False
try:
temperature = integrator.getTemperature()
except AttributeError:
pass
else:
if temperature != self.temperature:
err_code = ThermodynamicsError.INCONSISTENT_INTEGRATOR
raise ThermodynamicsError(err_code)
is_thermostated = True
return is_thermostated
def _set_integrator_temperature(self, integrator):
"""Set heat bath temperature of the integrator.
If integrator is a CompoundIntegrator, it sets the temperature
of every sub-integrator.
Returns
-------
has_changed : bool
True if the integrator temperature has changed.
"""
def set_temp(_integrator):
try:
if _integrator.getTemperature() != self.temperature:
_integrator.setTemperature(self.temperature)
return True
except AttributeError:
pass
return False
if isinstance(integrator, openmm.CompoundIntegrator):
has_changed = False
for integrator_id in range(integrator.getNumIntegrators()):
_integrator = integrator.getIntegrator(integrator_id)
has_changed = has_changed or set_temp(_integrator)
return has_changed
else:
return set_temp(integrator)
# -------------------------------------------------------------------------
# Internal-usage: barostat handling
# -------------------------------------------------------------------------
_SUPPORTED_BAROSTATS = {'MonteCarloBarostat'}
@property
def _barostat(self):
"""Shortcut for self._find_barostat(self._system)."""
return self._find_barostat(self._system)
@classmethod
def _find_barostat(cls, system):
"""Return the first barostat found in the system.
Returns
-------
barostat : OpenMM Force object
The barostat in system, or None if no barostat is found.
Raises
------
ThermodynamicsError
If the system contains unsupported barostats.
"""
barostat_id = cls._find_barostat_index(system)
if barostat_id is None:
return None
barostat = system.getForce(barostat_id)
if barostat.__class__.__name__ not in cls._SUPPORTED_BAROSTATS:
raise ThermodynamicsError(ThermodynamicsError.UNSUPPORTED_BAROSTAT,
barostat.__class__.__name__)
return barostat
@classmethod
def _remove_barostat(cls, system):
"""Remove the system barostat.
Returns
-------
True if the barostat was found and removed, False otherwise.
"""
barostat_id = cls._find_barostat_index(system)
if barostat_id is not None:
system.removeForce(barostat_id)
return True
return False
@staticmethod
def _find_barostat_index(system):
"""Return the index of the first barostat found in the system.
Returns
-------
barostat_id : int
The index of the barostat force in self._system or None if
no barostat is found.
Raises
------
ThermodynamicsError
If the system contains multiple barostats.
"""
barostat_ids = [i for i, force in enumerate(system.getForces())
if 'Barostat' in force.__class__.__name__]
if len(barostat_ids) == 0:
return None
if len(barostat_ids) > 1:
raise ThermodynamicsError(ThermodynamicsError.MULTIPLE_BAROSTATS)
return barostat_ids[0]
def _is_barostat_consistent(self, barostat):
"""Check the barostat's temperature and pressure."""
try:
barostat_temperature = barostat.getDefaultTemperature()
except AttributeError: # versions previous to OpenMM 7.1
barostat_temperature = barostat.getTemperature()
barostat_pressure = barostat.getDefaultPressure()
is_consistent = barostat_temperature == self.temperature
is_consistent = is_consistent and barostat_pressure == self.pressure
return is_consistent
def _set_system_pressure(self, system, pressure):
"""Add or configure the system barostat to the given pressure.
If a new barostat is added, its temperature is set to
self.temperature.
Parameters
----------
system : simtk.openmm.System
The system's barostat will be added/configured.
pressure : simtk.unit.Quantity or None
The pressure with units compatible to bars. If None, the
barostat of the system is removed.
Returns
-------
barostat : OpenMM Force object or None
The current barostat of the system. None if the barostat
was removed.
Raises
------
ThermodynamicsError
If pressure needs to be set for a non-periodic system.
"""
if pressure is None: # If new pressure is None, remove barostat.
self._remove_barostat(system)
return | |
resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success. If
the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class Policy(_messages.Message):
r"""An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources. A `Policy` is a collection of
`bindings`. A `binding` binds one or more `members`, or principals, to a
single `role`. Principals can be user accounts, service accounts, Google
groups, and domains (such as G Suite). A `role` is a named list of
permissions; each `role` can be an IAM predefined role or a user-created
custom role. For some types of Google Cloud resources, a `binding` can also
specify a `condition`, which is a logical expression that allows access to a
resource only if the expression evaluates to `true`. A condition can add
constraints based on attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies). **JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:<EMAIL>", "group:<EMAIL>", "domain:google.com",
"serviceAccount:<EMAIL>" ] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:<EMAIL>" ], "condition": { "title": "expirable access",
"description": "Does not grant access after Sep 2020", "expression":
"request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: -
user:<EMAIL> - group:<EMAIL> - domain:google.com -
serviceAccount:my-project-id<EMAIL>.com role:
roles/resourcemanager.organizationAdmin - members: - user:<EMAIL>
role: roles/resourcemanager.organizationViewer condition: title: expirable
access description: Does not grant access after Sep 2020 expression:
request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features, see the [IAM
documentation](https://cloud.google.com/iam/docs/).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members`, or principals, with a `role`.
Optionally, may specify a `condition` that determines how and when the
`bindings` are applied. Each of the `bindings` must contain at least one
principal. The `bindings` in a `Policy` can refer to up to 1,500
principals; up to 250 of these principals can be Google groups. Each
occurrence of a principal counts towards these limits. For example, if
the `bindings` grant 50 different roles to `user:<EMAIL>`, and
not to any other principal, then you can add another 1,450 principals to
the `bindings` in the `Policy`.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. **Important:** If you use IAM Conditions, you must include the
`etag` field whenever you call `setIamPolicy`. If you omit this field,
then IAM allows you to overwrite a version `3` policy with a version `1`
policy, and all of the conditions in the version `3` policy are lost.
version: Specifies the format of the policy. Valid values are `0`, `1`,
and `3`. Requests that specify an invalid value are rejected. Any
operation that affects conditional role bindings must specify version
`3`. This requirement applies to the following operations: * Getting a
policy that includes a conditional role binding * Adding a conditional
role binding to a policy * Changing a conditional role binding in a
policy * Removing any role binding, with or without a condition, from a
policy that includes conditions **Important:** If you use IAM
Conditions, you must include the `etag` field whenever you call
`setIamPolicy`. If you omit this field, then IAM allows you to overwrite
a version `3` policy with a version `1` policy, and all of the
conditions in the version `3` policy are lost. If a policy does not
include any conditions, operations on that policy may specify any valid
version or leave the field unset. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
version = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class PromoteMigrationJobRequest(_messages.Message):
r"""Request message for 'PromoteMigrationJob' request."""
class RestartMigrationJobRequest(_messages.Message):
r"""Request message for 'RestartMigrationJob' request."""
class ResumeMigrationJobRequest(_messages.Message):
r"""Request message for 'ResumeMigrationJob' request."""
class ReverseSshConnectivity(_messages.Message):
r"""The details needed to configure a reverse SSH tunnel between the source
and destination databases. These details will be used when calling the
generateSshScript method (see https://cloud.google.com/database-migration/do
cs/reference/rest/v1alpha2/projects.locations.migrationJobs/generateSshScrip
t) to produce the script that will help set up the reverse SSH | |
<filename>src/rogerthat/bizz/service/mfd/sub.py<gh_stars>0
#!/usr/bin/env python
# @@xxx_skip_license@@
# @PydevCodeAnalysisIgnore
#
# Generated Tue Nov 6 11:26:05 2018 by generateDS.py version 2.29.24.
# Python 3.7.0 (default, Jul 23 2018, 20:22:55) [Clang 9.1.0 (clang-902.0.39.2)]
#
# Command line options:
# ('-f', '')
# ('--no-questions', '')
# ('--external-encoding', 'utf-8')
# ('-o', 'gen.py')
# ('-s', 'sub.py')
#
# Command line arguments:
# ../../../service/api/MessageFlow.1.xsd
#
# Command line:
# /usr/local/bin/generateDS -f --no-questions --external-encoding="utf-8" -o "gen.py" -s "sub.py" ../../../service/api/MessageFlow.1.xsd
#
# Current working directory (os.getcwd()):
# mfd
#
import sys
from lxml import etree as etree_
from rogerthat.bizz.service.mfd import gen as supermod
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
parser = etree_.ETCompatXMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'utf-8'
#
# Data representation classes
#
class AttachmentSub(supermod.Attachment):
def __init__(self, name=None, url=None, contentType=None, size=None):
super(AttachmentSub, self).__init__(name, url, contentType, size, )
supermod.Attachment.subclass = AttachmentSub
# end class AttachmentSub
class FlowElementSub(supermod.FlowElement):
def __init__(self, id=None, extensiontype_=None):
super(FlowElementSub, self).__init__(id, extensiontype_, )
supermod.FlowElement.subclass = FlowElementSub
# end class FlowElementSub
class AnswerSub(supermod.Answer):
def __init__(self, caption=None, action=None, id=None, reference=None, color=None):
super(AnswerSub, self).__init__(caption, action, id, reference, color, )
supermod.Answer.subclass = AnswerSub
# end class AnswerSub
class MessageSub(supermod.Message):
def __init__(self, id=None, allowDismiss=None, dismissReference=None, brandingKey=None, autoLock=None, vibrate=None, alertType=None, alertIntervalType=None, content=None, answer=None, attachment=None):
super(MessageSub, self).__init__(id, allowDismiss, dismissReference, brandingKey, autoLock, vibrate, alertType, alertIntervalType, content, answer, attachment, )
supermod.Message.subclass = MessageSub
# end class MessageSub
class ResultsFlushSub(supermod.ResultsFlush):
def __init__(self, id=None, reference=None):
super(ResultsFlushSub, self).__init__(id, reference, )
supermod.ResultsFlush.subclass = ResultsFlushSub
# end class ResultsFlushSub
class ResultsEmailSub(supermod.ResultsEmail):
def __init__(self, id=None, reference=None, emailAdmins=None, email=None):
super(ResultsEmailSub, self).__init__(id, reference, emailAdmins, email, )
supermod.ResultsEmail.subclass = ResultsEmailSub
# end class ResultsEmailSub
class FlowCodeSub(supermod.FlowCode):
def __init__(self, id=None, exceptionReference=None, outlet=None, javascriptCode=None):
super(FlowCodeSub, self).__init__(id, exceptionReference, outlet, javascriptCode, )
supermod.FlowCode.subclass = FlowCodeSub
# end class FlowCodeSub
class WidgetSub(supermod.Widget):
def __init__(self, extensiontype_=None):
super(WidgetSub, self).__init__(extensiontype_, )
supermod.Widget.subclass = WidgetSub
# end class WidgetSub
class BaseSliderWidgetSub(supermod.BaseSliderWidget):
def __init__(self, min=None, max=None, step=None, precision=None, unit=None, extensiontype_=None):
super(BaseSliderWidgetSub, self).__init__(min, max, step, precision, unit, extensiontype_, )
supermod.BaseSliderWidget.subclass = BaseSliderWidgetSub
# end class BaseSliderWidgetSub
class SliderWidgetSub(supermod.SliderWidget):
def __init__(self, min=None, max=None, step=None, precision=None, unit=None, value=None):
super(SliderWidgetSub, self).__init__(min, max, step, precision, unit, value, )
supermod.SliderWidget.subclass = SliderWidgetSub
# end class SliderWidgetSub
class RangeSliderWidgetSub(supermod.RangeSliderWidget):
def __init__(self, min=None, max=None, step=None, precision=None, unit=None, lowValue=None, highValue=None):
super(RangeSliderWidgetSub, self).__init__(min, max, step, precision, unit, lowValue, highValue, )
supermod.RangeSliderWidget.subclass = RangeSliderWidgetSub
# end class RangeSliderWidgetSub
class PhotoUploadWidgetSub(supermod.PhotoUploadWidget):
def __init__(self, quality=None, gallery=None, camera=None, ratio=None):
super(PhotoUploadWidgetSub, self).__init__(quality, gallery, camera, ratio, )
supermod.PhotoUploadWidget.subclass = PhotoUploadWidgetSub
# end class PhotoUploadWidgetSub
class GPSLocationWidgetSub(supermod.GPSLocationWidget):
def __init__(self, gps=None):
super(GPSLocationWidgetSub, self).__init__(gps, )
supermod.GPSLocationWidget.subclass = GPSLocationWidgetSub
# end class GPSLocationWidgetSub
class TextWidgetSub(supermod.TextWidget):
def __init__(self, maxChars=None, placeholder=None, value=None, keyboardType=None, extensiontype_=None):
super(TextWidgetSub, self).__init__(maxChars, placeholder, value, keyboardType, extensiontype_, )
supermod.TextWidget.subclass = TextWidgetSub
# end class TextWidgetSub
class TextLineWidgetSub(supermod.TextLineWidget):
def __init__(self, maxChars=None, placeholder=None, value=None, keyboardType=None):
super(TextLineWidgetSub, self).__init__(maxChars, placeholder, value, keyboardType, )
supermod.TextLineWidget.subclass = TextLineWidgetSub
# end class TextLineWidgetSub
class TextBlockWidgetSub(supermod.TextBlockWidget):
def __init__(self, maxChars=None, placeholder=None, value=None, keyboardType=None):
super(TextBlockWidgetSub, self).__init__(maxChars, placeholder, value, keyboardType, )
supermod.TextBlockWidget.subclass = TextBlockWidgetSub
# end class TextBlockWidgetSub
class ValueSub(supermod.Value):
def __init__(self, value=None, extensiontype_=None):
super(ValueSub, self).__init__(value, extensiontype_, )
supermod.Value.subclass = ValueSub
# end class ValueSub
class FloatValueSub(supermod.FloatValue):
def __init__(self, value=None):
super(FloatValueSub, self).__init__(value, )
supermod.FloatValue.subclass = FloatValueSub
# end class FloatValueSub
class AdvancedOrderCategorySub(supermod.AdvancedOrderCategory):
def __init__(self, id=None, name=None, item=None):
super(AdvancedOrderCategorySub, self).__init__(id, name, item, )
supermod.AdvancedOrderCategory.subclass = AdvancedOrderCategorySub
# end class AdvancedOrderCategorySub
class AdvancedOrderItemSub(supermod.AdvancedOrderItem):
def __init__(self, id=None, value=None, unit=None, unitPrice=None, hasPrice=True, step=None, stepUnit=None, stepUnitConversion=None, imageUrl=None, name=None, description=None):
super(AdvancedOrderItemSub, self).__init__(id, value, unit, unitPrice, hasPrice, step, stepUnit, stepUnitConversion, imageUrl, name, description, )
supermod.AdvancedOrderItem.subclass = AdvancedOrderItemSub
# end class AdvancedOrderItemSub
class BasePaymentMethodSub(supermod.BasePaymentMethod):
def __init__(self, id=None, currency=None, amount=None, precision=None, extensiontype_=None):
super(BasePaymentMethodSub, self).__init__(id, currency, amount, precision, extensiontype_, )
supermod.BasePaymentMethod.subclass = BasePaymentMethodSub
# end class BasePaymentMethodSub
class PaymentMethodSub(supermod.PaymentMethod):
def __init__(self, id=None, currency=None, amount=None, precision=None, provider_id=None, calculateAmount=False, target=None):
super(PaymentMethodSub, self).__init__(id, currency, amount, precision, provider_id, calculateAmount, target, )
supermod.PaymentMethod.subclass = PaymentMethodSub
# end class PaymentMethodSub
class TextAutocompleteWidgetSub(supermod.TextAutocompleteWidget):
def __init__(self, maxChars=None, placeholder=None, value=None, keyboardType=None, suggestion=None):
super(TextAutocompleteWidgetSub, self).__init__(maxChars, placeholder, value, keyboardType, suggestion, )
supermod.TextAutocompleteWidget.subclass = TextAutocompleteWidgetSub
# end class TextAutocompleteWidgetSub
class ChoiceSub(supermod.Choice):
def __init__(self, value=None, label=None):
super(ChoiceSub, self).__init__(value, label, )
supermod.Choice.subclass = ChoiceSub
# end class ChoiceSub
class SelectWidgetSub(supermod.SelectWidget):
def __init__(self, choice=None, extensiontype_=None):
super(SelectWidgetSub, self).__init__(choice, extensiontype_, )
supermod.SelectWidget.subclass = SelectWidgetSub
# end class SelectWidgetSub
class SelectSingleWidgetSub(supermod.SelectSingleWidget):
def __init__(self, choice=None, value=None):
super(SelectSingleWidgetSub, self).__init__(choice, value, )
supermod.SelectSingleWidget.subclass = SelectSingleWidgetSub
# end class SelectSingleWidgetSub
class SelectMultiWidgetSub(supermod.SelectMultiWidget):
def __init__(self, choice=None, value=None):
super(SelectMultiWidgetSub, self).__init__(choice, value, )
supermod.SelectMultiWidget.subclass = SelectMultiWidgetSub
# end class SelectMultiWidgetSub
class SelectDateWidgetSub(supermod.SelectDateWidget):
def __init__(self, minDate=None, maxDate=None, date=None, minuteInterval=None, mode=None, unit=None):
super(SelectDateWidgetSub, self).__init__(minDate, maxDate, date, minuteInterval, mode, unit, )
supermod.SelectDateWidget.subclass = SelectDateWidgetSub
# end class SelectDateWidgetSub
class SelectFriendWidgetSub(supermod.SelectFriendWidget):
def __init__(self, selectionRequired=None, multiSelect=None):
super(SelectFriendWidgetSub, self).__init__(selectionRequired, multiSelect, )
supermod.SelectFriendWidget.subclass = SelectFriendWidgetSub
# end class SelectFriendWidgetSub
class MyDigiPassWidgetSub(supermod.MyDigiPassWidget):
def __init__(self, scope=None):
super(MyDigiPassWidgetSub, self).__init__(scope, )
supermod.MyDigiPassWidget.subclass = MyDigiPassWidgetSub
# end class MyDigiPassWidgetSub
class OpenIdWidgetSub(supermod.OpenIdWidget):
def __init__(self, provider=None, scope=None):
super(OpenIdWidgetSub, self).__init__(provider, scope, )
supermod.OpenIdWidget.subclass = OpenIdWidgetSub
# end class OpenIdWidgetSub
class AdvancedOrderWidgetSub(supermod.AdvancedOrderWidget):
def __init__(self, currency=None, leapTime=None, category=None):
super(AdvancedOrderWidgetSub, self).__init__(currency, leapTime, category, )
supermod.AdvancedOrderWidget.subclass = AdvancedOrderWidgetSub
# end class AdvancedOrderWidgetSub
class SignWidgetSub(supermod.SignWidget):
def __init__(self, caption=None, algorithm=None, keyName=None, index=None, payload=None):
super(SignWidgetSub, self).__init__(caption, algorithm, keyName, index, payload, )
supermod.SignWidget.subclass = SignWidgetSub
# end class SignWidgetSub
class OauthWidgetSub(supermod.OauthWidget):
def __init__(self, url=None, caption=None, successMessage=None):
super(OauthWidgetSub, self).__init__(url, caption, successMessage, )
supermod.OauthWidget.subclass = OauthWidgetSub
# end class OauthWidgetSub
class PayWidgetSub(supermod.PayWidget):
def __init__(self, memo=None, target=None, autoSubmit=True, testMode=False, embeddedAppId=None, method=None, baseMethod=None):
super(PayWidgetSub, self).__init__(memo, target, autoSubmit, testMode, embeddedAppId, method, baseMethod, )
supermod.PayWidget.subclass = PayWidgetSub
# end class PayWidgetSub
class FormSub(supermod.Form):
def __init__(self, positiveButtonCaption=None, positiveButtonConfirmation=None, negativeButtonCaption=None, negativeButtonConfirmation=None, widget=None, javascriptValidation=None):
super(FormSub, self).__init__(positiveButtonCaption, positiveButtonConfirmation, negativeButtonCaption, negativeButtonConfirmation, widget, javascriptValidation, )
supermod.Form.subclass = FormSub
# end class FormSub
class FormMessageSub(supermod.FormMessage):
def __init__(self, id=None, member=None, brandingKey=None, autoLock=None, vibrate=None, alertType=None, alertIntervalType=None, positiveReference=None, negativeReference=None, content=None, form=None, attachment=None):
super(FormMessageSub, self).__init__(id, member, brandingKey, autoLock, vibrate, alertType, alertIntervalType, positiveReference, negativeReference, content, form, attachment, )
supermod.FormMessage.subclass = FormMessageSub
# end class FormMessageSub
class OutletSub(supermod.Outlet):
def __init__(self, value=None, name=None, reference=None):
super(OutletSub, self).__init__(value, name, reference, )
supermod.Outlet.subclass = OutletSub
# end class OutletSub
class EndSub(supermod.End):
def __init__(self, id=None, waitForFollowUpMessage=False):
super(EndSub, self).__init__(id, waitForFollowUpMessage, )
supermod.End.subclass = EndSub
# end class EndSub
class MessageFlowDefinitionSub(supermod.MessageFlowDefinition):
def __init__(self, name=None, startReference=None, language=None, end=None, message=None, formMessage=None, resultsFlush=None, resultsEmail=None, flowCode=None):
super(MessageFlowDefinitionSub, self).__init__(name, startReference, language, end, message, formMessage, resultsFlush, resultsEmail, flowCode, )
supermod.MessageFlowDefinition.subclass = MessageFlowDefinitionSub
# end class MessageFlowDefinitionSub
class MessageFlowDefinitionSetSub(supermod.MessageFlowDefinitionSet):
def __init__(self, definition=None):
super(MessageFlowDefinitionSetSub, self).__init__(definition, )
supermod.MessageFlowDefinitionSet.subclass = MessageFlowDefinitionSetSub
# end class MessageFlowDefinitionSetSub
class StepSub(supermod.Step):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, extensiontype_=None):
super(StepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, extensiontype_, )
supermod.Step.subclass = StepSub
# end class StepSub
class BaseMessageStepSub(supermod.BaseMessageStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, extensiontype_=None):
super(BaseMessageStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, extensiontype_, )
supermod.BaseMessageStep.subclass = BaseMessageStepSub
# end class BaseMessageStepSub
class MessageStepSub(supermod.MessageStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, answer=None):
super(MessageStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, answer, )
supermod.MessageStep.subclass = MessageStepSub
# end class MessageStepSub
class WidgetStepSub(supermod.WidgetStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, extensiontype_=None):
super(WidgetStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, extensiontype_, )
supermod.WidgetStep.subclass = WidgetStepSub
# end class WidgetStepSub
class TextWidgetStepSub(supermod.TextWidgetStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None, extensiontype_=None):
super(TextWidgetStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, extensiontype_, )
supermod.TextWidgetStep.subclass = TextWidgetStepSub
# end class TextWidgetStepSub
class TextLineWidgetStepSub(supermod.TextLineWidgetStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
super(TextLineWidgetStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, )
supermod.TextLineWidgetStep.subclass = TextLineWidgetStepSub
# end class TextLineWidgetStepSub
class TextBlockWidgetStepSub(supermod.TextBlockWidgetStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
super(TextBlockWidgetStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, )
supermod.TextBlockWidgetStep.subclass = TextBlockWidgetStepSub
# end class TextBlockWidgetStepSub
class TextAutoCompleteWidgetStepSub(supermod.TextAutoCompleteWidgetStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
super(TextAutoCompleteWidgetStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, )
supermod.TextAutoCompleteWidgetStep.subclass = TextAutoCompleteWidgetStepSub
# end class TextAutoCompleteWidgetStepSub
class SliderWidgetStepSub(supermod.SliderWidgetStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
super(SliderWidgetStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, )
supermod.SliderWidgetStep.subclass = SliderWidgetStepSub
# end class SliderWidgetStepSub
class RangeSliderWidgetStepSub(supermod.RangeSliderWidgetStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
super(RangeSliderWidgetStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, )
supermod.RangeSliderWidgetStep.subclass = RangeSliderWidgetStepSub
# end class RangeSliderWidgetStepSub
class PhotoUploadWidgetStepSub(supermod.PhotoUploadWidgetStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, value=None):
super(PhotoUploadWidgetStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, value, )
supermod.PhotoUploadWidgetStep.subclass = PhotoUploadWidgetStepSub
# end class PhotoUploadWidgetStepSub
class GPSLocationWidgetStepSub(supermod.GPSLocationWidgetStep):
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, horizontalAccuracy=None, verticalAccuracy=None, latitude=None, longitude=None, altitude=None, timestamp=None):
super(GPSLocationWidgetStepSub, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, horizontalAccuracy, verticalAccuracy, latitude, longitude, altitude, timestamp, )
supermod.GPSLocationWidgetStep.subclass = GPSLocationWidgetStepSub
# end class GPSLocationWidgetStepSub
class MyDigiPassEidProfileSub(supermod.MyDigiPassEidProfile):
def __init__(self, firstName=None, firstName3=None, lastName=None, gender=None, nationality=None, dateOfBirth=None, locationOfBirth=None, nobleCondition=None, issuingMunicipality=None, cardNumber=None, | |
<gh_stars>1000+
#!/usr/bin/env python3
import sqlite3, re, cgi, os, sys
import pandas as pd
# Configurable Setting
ReportsDirectory = "./"
# End
if not os.path.exists(ReportsDirectory):
os.makedirs(ReportsDirectory)
DB = ""
try:
DB = sys.argv[1]
except IndexError:
DB = ""
if len(DB) < 1:
print("Usage: python OfflineReportGenerator.py PowershellC2.SQLite")
exit()
if not os.path.exists(DB):
print("%s Does not exist" % DB)
exit()
# Main program
def replace_tabs(s):
s = s.replace("\t", " ")
return s
PayloadCommsHost = "1.1.1.1"
ServerTAG = "\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nPoshC2 Server\\n%s" % PayloadCommsHost
GV = GV.replace("POSHSERVER", ServerTAG)
implants = get_implants_all_db()
hosts = ""
daisyhosts = ""
for i in implants:
if "Daisy" not in i[15]:
if i[3] not in hosts:
hostname = i[11].replace("\\", "\\\\")
hosts += "\"%s\" -> \"%s \\n %s\\n\\n\\n\\n \"; \n" % (ServerTAG, hostname, i[3])
for i in implants:
if "Daisy" in i[15]:
hostname = i[11].replace("\\", "\\\\")
if "\"%s\\n\\n\\n\\n \" -> \"%s \\n %s\\n\\n\\n\\n \"; \n" % (i[9].replace('\x00', '').replace("\\", "\\\\").replace('@', ' \\n '), hostname, i[3]) not in daisyhosts:
daisyhosts += "\"%s\\n\\n\\n\\n \" -> \"%s \\n %s\\n\\n\\n\\n \"; \n" % (i[9].replace('\x00', '').replace("\\", "\\\\").replace('@', ' \\n '), hostname, i[3])
GV = GV.replace("DAISYHOSTS", daisyhosts)
GV = GV.replace("IMPLANTHOSTS", hosts)
def get_implants_all_db():
conn = sqlite3.connect(DB)
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT * FROM Implants")
result = c.fetchall()
if result:
return result
else:
return None
def get_htmlimplant(randomuri):
conn = sqlite3.connect(DB)
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT * FROM Implants WHERE RandomURI=?", (randomuri,))
result = c.fetchone()
if result:
return result
else:
return None
def generate_table(table):
HTMLPre = """<script>
function SearchUser() {
// Declare variables
var input, filter, table, tr, td, i;
input = document.getElementById("SearchUser");
filter = input.value.toUpperCase();
table = document.getElementById("PoshTable");
tr = table.getElementsByTagName("tr");
// Loop through all table rows, and hide those who don't match the search query
for (i = 0; i < tr.length; i++) {
td = tr[i].getElementsByTagName("td")[2];
if (td) {
if (td.innerHTML.toUpperCase().indexOf(filter) > -1) {
tr[i].style.display = "";
} else {
tr[i].style.display = "none";
}
}
}
}
function SearchHost() {
// Declare variables
var input, filter, table, tr, td, i;
input = document.getElementById("SearchHost");
filter = input.value.toUpperCase();
table = document.getElementById("PoshTable");
tr = table.getElementsByTagName("tr");
// Loop through all table rows, and hide those who don't match the search query
for (i = 0; i < tr.length; i++) {
td = tr[i].getElementsByTagName("td")[3];
if (td) {
if (td.innerHTML.toUpperCase().indexOf(filter) > -1) {
tr[i].style.display = "";
} else {
tr[i].style.display = "none";
}
}
}
}
function SearchURL() {
// Declare variables
var input, filter, table, tr, td, i;
input = document.getElementById("SearchURL");
filter = input.value.toUpperCase();
table = document.getElementById("PoshTable");
tr = table.getElementsByTagName("tr");
// Loop through all table rows, and hide those who don't match the search query
for (i = 0; i < tr.length; i++) {
td = tr[i].getElementsByTagName("td")[9];
if (td) {
if (td.innerHTML.toUpperCase().indexOf(filter) > -1) {
tr[i].style.display = "";
} else {
tr[i].style.display = "none";
}
}
}
}
function SearchCommand() {
// Declare variables
var input, filter, table, tr, td, i;
input = document.getElementById("CommandInput");
filter = input.value.toUpperCase();
table = document.getElementById("PoshTable");
tr = table.getElementsByTagName("tr");
// Loop through all table rows, and hide those who don't match the search query
for (i = 0; i < tr.length; i++) {
td = tr[i].getElementsByTagName("td")[3];
if (td) {
if (td.innerHTML.toUpperCase().indexOf(filter) > -1) {
tr[i].style.display = "";
} else {
tr[i].style.display = "none";
}
}
}
}
function SearchOutput() {
// Declare variables
var input, filter, table, tr, td, i;
input = document.getElementById("OutputInput");
filter = input.value.toUpperCase();
table = document.getElementById("PoshTable");
tr = table.getElementsByTagName("tr");
// Loop through all table rows, and hide those who don't match the search query
for (i = 0; i < tr.length; i++) {
td = tr[i].getElementsByTagName("td")[4];
if (td) {
if (td.innerHTML.toUpperCase().indexOf(filter) > -1) {
tr[i].style.display = "";
} else {
tr[i].style.display = "none";
}
}
}
}
function SearchTask() {
// Declare variables
var input, filter, table, tr, td, i;
input = document.getElementById("SearchTask");
filter = input.value.toUpperCase();
table = document.getElementById("PoshTable");
tr = table.getElementsByTagName("tr");
// Loop through all table rows, and hide those who don't match the search query
for (i = 0; i < tr.length; i++) {
td = tr[i].getElementsByTagName("td")[0];
if (td) {
if (td.innerHTML.toUpperCase().indexOf(filter) > -1) {
tr[i].style.display = "";
} else {
tr[i].style.display = "none";
}
}
}
}
// Do some tweaking to markup to make things easier
function tweakMarkup(){
// Add classes to columns
var classes = ['id', 'Label', taskid', 'randomuri', 'command', 'output', 'user','ImplantID','RandomURI','User','Hostname','IpAddress','Key','FirstSeen','LastSeen','PID','Proxy','Arch','Domain','Alive','Sleep','ModsLoaded','Pivot']
tbl = document.getElementById("PoshTable");
ths = tbl.getElementsByTagName("th");
for(i=0; i<ths.length; i++){
th = ths[i];
th.className = classes[i]
}
trs = tbl.getElementsByTagName("tr");
for(i=0; i<trs.length; i++){
tr = trs[i]
tds = tr.getElementsByTagName('td');
if(i % 2 == 0){
tr.className = 'even';
}else{
tr.className = 'odd';
}
for(j=0; j<tds.length; j++){
td = tds[j];
td.className = classes[j]
if(td.className.match(/output|Hostname|IpAddress|Key|FirstSeen|LastSeen|PID|Proxy|Arch|Domain|Alive|Sleep|ModsLoaded|Pivot|id|taskid|randomuri|command|output|user|ImplantID|RandomURI|User|Hostname|IpAddress|Key|FirstSeen|LastSeen|PID|Proxy|Arch|Domain|Alive|Sleep|ModsLoaded|Pivot|Label/)){
td.className += ' hidden';
td.innerHTML = '<div>' + td.innerHTML + '</div>';
td.onclick = toggleHide
}
}
}
}
function toggleHide(evnt){
td = evnt.target;
if(td.nodeName == 'DIV'){
td = td.parentElement;
}
cls = td.className;
if(cls.match(/hidden/)){
cls = cls.replace('hidden','shown');
}else{
cls = cls.replace('shown','hidden');
}
td.className = cls;
}
</script>
<style>
#CommandInput, #OutputInput, #SearchTask, #SearchHost, #SearchUser, #SearchURL {
background-image: url('/css/searchicon.png'); /* Add a search icon to input */
background-position: 10px 12px; /* Position the search icon */
background-repeat: no-repeat; /* Do not repeat the icon image */
width: 100%; /* Full-width */
font-size: 16px; /* Increase font-size */
padding: 12px 20px 12px 40px; /* Add some padding */
border: 1px solid #ddd; /* Add a grey border */
margin-bottom: 12px; /* Add some space below the input */
}
body {
font-family: Verdana, Geneva, Arial, Helvetica, sans-serif;
}
table {
font-family: monospace;
margin: 1em 0;
white-space: pre;
border-spacing: 0;
width: 100%;
table-layout: fixed;
}
table tr {}
table tr.even{
background-color: #f2f2f2
}
table tr th,
table tr td {
text-align: left;
padding: 0.5em;
border: 1px solid #ccc;
}
table tr th {
background-color: #4CAF50;
color: white;
}
table tr td {
vertical-align: top;
}
table tr td.command {
}
table tr td.hidden div,
table tr td.shown div {
cursor: pointer;
background: top right url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAHkSURBVDjL3ZNvT1JhGMafb3G+TQqKECNFRIEDcvgXmB5IPNJmTdbC1SQ0S1xzZKXyT41TdpCOMyYtiXS9aW2uD8EbPsHV87RRmyLrdc92vbt/1/U8930/ZLYxASbpSwgz9SCin2+CHtJJwYoLgbITvvcOeN7a4S6NgTB45+cmCucvu8JMFOZCZQHpr0tYO12Ga9cKwpJz5xvIfH+GR2dxRGp+uSOs8Jxv39GKV+/gYS2OlXoSfNECMnMSRKw+hdS3BLI/Mlho3MPUR88lE+++ozlfjWG1kYJUCcNRsMCWM4NM02vf/hTgwsf+1uLpfTw4mcOtQ0G9aCDINiWmRiAdiAz+HTC6Nfi3QKx6uckjT3Pi0K1c1QPnzojahtsi3Zr2L/rfDGin5fE3o+pVxeYXRmVw3dA0Pddzfwz8Co82LFVERMuTbEyXJjGUMaqBgoBQ0Qfjmq5lWO3n9E/76IK8s4PCYHCytoDZgwhsWXPzosGNdYPszY1jTonBnxVgSuuhe6KhyfRDJGsJ3P0gQSqLDG7RBeE6PeF6Wie7X/MI5N2YLonoX+oFce1ZsXicQOJoHs68FdbNznBbAytaREthSHIE2lQPCF8cgT0/jLHtIQbD8sqEbrBuWYM+mqx93ANN8hp+AQOPtI0tirA3AAAAAElFTkSuQmCC);
background-repeat: no-repeat;
overflow: scroll;
word-wrap: break-all;
white-space:normal;
min-height: 25px;
width: 100%;
}
table tr td.shown div {
background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAHqSURBVDjL3ZHbThpRFIZ5i3kcLRYPqIgUGcDhNKBAqyKCobTR2NhiKmCstcWmBmtLPaCO4CQ6SBWVKInx0N70KbjhCf7O3ia0ZS686F0vVrL3Xvv7VvIvFQBVuOITQxfe6tj5IEPu9xW/ZxGcu2aJnAksxW9eYP42hmB5oBY48zAjJ240QoP7HH3j8xYhWgwiUgiAyxpFlTxZmL2ewvrPNBJX0wid+TF0zCsEHtEKGcbT4igWK0k8OwzBumGo0uZoeUCYuZzE0vUcVn6k8OSbUyFwyfDbSgKvShOIFsZgWTfU2K96pv5huOSm8KfvS/AXHAqBQ2CxcJFAsjwDe5YFgWkGdzCPoSMXHhed8BXs8B7YFALbVh/6Nx+RyWAzevR91qEu+Jf6X<KEY>);
}
table tr td.output {
width: 100px;
}
table tr td.hidden div {
height: 1em;
overflow: hidden;
cursor: pointer;
}
table tr th.id {
width: 3%;
min-width: 3em;
}
table tr th.taskid {
width: 12%;
}
table tr th.randomuri {
width: 15%;
}
table tr th.user {
width: 10%;
}
p {
margin-left: 20px;
font-size: 12px;
}
</style>
<pre>
__________ .__. _________ ________
\_______ \____ _____| |__ \_ ___ \ \_____ \
| ___/ _ \/ ___/ | \ / \ \/ / ____/
| | ( <_>)___ \| Y \ \ \____/ \
|____| \____/____ >___| / \______ /\_______
\/ \/ \/ \/
================= www.PoshC2.co.uk ===============
</pre>
"""
if table == "Tasks":
HTMLPre += """<input type="text" id="SearchTask" onkeyup="SearchTask()" placeholder="Search for task..">
<input type="text" id="CommandInput" onkeyup="SearchCommand()" placeholder="Search for command..">
<input type="text" id="OutputInput" onkeyup="SearchOutput()" placeholder="Search for output..">
"""
if table == "Implants":
HTMLPre += """<input type="text" id="SearchHost" onkeyup="SearchHost()" placeholder="Search for host..">
<input type="text" id="SearchUser" onkeyup="SearchUser()" placeholder="Search for user..">
<input type="text" id="SearchURL" onkeyup="SearchURL()" placeholder="Search for URL..">
"""
conn = sqlite3.connect(DB)
pd.set_option('display.max_colwidth', -1)
pd.options.mode.chained_assignment = None
frame = pd.read_sql_query("SELECT * FROM %s" % table, conn)
# encode the Output column
if table == "Tasks":
for index, row in frame.iterrows():
frame.loc[index, "Command"] = replace_tabs(cgi.escape(row["Command"]))
frame.loc[index, "Output"] = replace_tabs(cgi.escape(row["Output"]))
# convert the random uri to original hostname
if table == "Tasks":
framelen = frame['RandomURI'].count()
for x in range(0, framelen):
try:
frame['RandomURI'][x]
a = get_htmlimplant(str(frame['RandomURI'][x]))
frame['RandomURI'][x] = a[2] + " @ " + a[3]
except Exception as e:
print(e)
a = "None"
reportname = "%s%s.html" % (ReportsDirectory, table)
output_file = open(reportname, 'w')
HTMLPost = (frame.to_html(classes='table', index=False, escape=False)).replace("\\r\\n", "</br>")
HTMLPost = HTMLPost.replace("\\n", "</br>")
HTMLPost = re.sub(u'\x00', '', HTMLPost)
HTMLPost = HTMLPost.replace(" <td>", " <td class=\"TableColumn\">")
HTMLPost = HTMLPost.replace("<tr style=\"text-align: right;\">", "<tr>")
HTMLPost = HTMLPost.replace("<table border=\"1\" class=\"dataframe table\">", "<table id=\"PoshTable\" border=\"1\" class=\"PoshTableClass\">")
HTMLPost = HTMLPost.replace("<th>CompletedTaskID</th>", "<th class=\"CompletedTaskID\">ID</th>")
HTMLPost = HTMLPost.replace("<th>ID</th>", "<th class=\"ID\">ID</th>")
HTMLPost = HTMLPost.replace("<th>Label</th>", "<th class=\"Label\">Label</th>")
HTMLPost = HTMLPost.replace("<th>TaskID</th>", "<th class=\"TaskID\">TaskID</th>")
HTMLPost = HTMLPost.replace("<th>RandomURI</th>", "<th class=\"RandomURI\">RandomURI</th>")
HTMLPost = HTMLPost.replace("<th>Command</th>", "<th class=\"Command\">Command</th>")
HTMLPost = HTMLPost.replace("<th>Output</th>", "<th class=\"Output\">Output</th>")
HTMLPost = HTMLPost.replace("<th>User</th>", "<th class=\"User\">User</th>")
HTMLPost = HTMLPost.replace("<th>ImplantID</th>", "<th class=\"ImplantID\">ImplantID</th>")
HTMLPost = HTMLPost.replace("<th>User</th>", "<th class=\"User\">User</th>")
HTMLPost = HTMLPost.replace("<th>Hostname</th>", "<th class=\"Hostname\">Hostname</th>")
HTMLPost = HTMLPost.replace("<th>IpAddress</th>", "<th class=\"IpAddress\">IpAddress</th>")
HTMLPost = HTMLPost.replace("<th>Key</th>", "<th | |
self.res_layer_5 = nn.Sequential(
resnet.Bottleneck(2048, 256, 256, 1, True, 2, 1, nn.BatchNorm2d),
resnet.Bottleneck(256, 256, 256, 1, True, 1, 1, nn.BatchNorm2d),
resnet.Bottleneck(256, 256, 512, 1, True, 1, 1, nn.BatchNorm2d),
)
self.res_layer_6 = nn.Sequential(
resnet.Bottleneck(512, 256, 256, 1, True, 2, 1, nn.BatchNorm2d),
resnet.Bottleneck(256, 256, 256, 1, True, 1, 1, nn.BatchNorm2d),
resnet.Bottleneck(256, 256, 256, 1, True, 1, 1, nn.BatchNorm2d),
)
self.init_pyramid = nn.Sequential(
resnet.Bottleneck(256, 256, 256, 1, True, 1, 1, nn.BatchNorm2d),
resnet.Bottleneck(256, 256, 256, 1, True, 1, 1, nn.BatchNorm2d),
resnet.Bottleneck(256, 256, 256, 1, True, 1, 1, nn.BatchNorm2d),
)
self.class_logits = nn.Sequential(
resnet.Bottleneck(256, 64, 256, 1, True, 1, 1, nn.BatchNorm2d),
resnet.Bottleneck(256, 64, 256, 1, True, 1, 1, nn.BatchNorm2d),
resnet.Bottleneck(256, 64, num_classes, 1, True, 1, 1, nn.BatchNorm2d),
)
self.conv_6 = resnet.Bottleneck(256, 64, 256, 1, True, 1, 1, nn.BatchNorm2d)
self.conv_5 = resnet.Bottleneck(256, 64, 256, 1, True, 1, 1, nn.BatchNorm2d)
self.conv_4 = resnet.Bottleneck(256, 64, 256, 1, True, 1, 1, nn.BatchNorm2d)
self.conv_3 = resnet.Bottleneck(256, 64, 256, 1, True, 1, 1, nn.BatchNorm2d)
self.conv_2 = resnet.Bottleneck(256, 64, 256, 1, True, 1, 1, nn.BatchNorm2d)
self.conv_s = [self.conv_6, self.conv_5, self.conv_4, self.conv_3, self.conv_2]
self.out_6 = nn.Conv2d(256, 2, 1)
self.out_5 = nn.Conv2d(256, 2, 1)
self.out_4 = nn.Conv2d(256, 2, 1)
self.out_3 = nn.Conv2d(256, 2, 1)
self.out_2 = nn.Conv2d(256, 2, 1)
self.out_s = [self.out_6, self.out_5, self.out_4, self.out_3, self.out_2]
self.chn256_6 = nn.Conv2d(256+1, 256, 1)
self.chn256_5 = nn.Conv2d(512+1, 256, 1)
self.chn256_4 = nn.Conv2d(2048+1, 256, 1)
self.chn256_3 = nn.Conv2d(1024+1, 256, 1)
self.chn256_2 = nn.Conv2d(512+1, 256, 1)
self.chn256_s = [self.chn256_6, self.chn256_5, self.chn256_4, self.chn256_3, self.chn256_2]
self.upmix256_5 = nn.Conv2d(256+512+1, 256, 1)
self.upmix256_4 = nn.Conv2d(256+2048+1, 256, 1)
self.upmix256_3 = nn.Conv2d(256+1024+1, 256, 1)
self.upmix256_2 = nn.Conv2d(256+512+1, 256, 1)
self.upmix256_s = [None, self.upmix256_5, self.upmix256_4, self.upmix256_3, self.upmix256_2]
self.test0 = {}
self.test1 = {}
self.log_dict = {}
self.loss_evaluators = []
self.mask_conv_0 = nn.Sequential(
res_conv(256+1, 256),
res_conv(256, 256),
nn.Conv2d(256, 2, 1)
)
self.mask_conv_1 = nn.Conv2d(256+1, 1, 3, padding=1)
self.mask_conv_2 = nn.Conv2d(256+1, 1, 3, padding=1)
self.mask_conv_3 = nn.Conv2d(256+1, 1, 3, padding=1)
self.mask_conv_4 = nn.Conv2d(256+1, 1, 3, padding=1)
self.mask_conv_0_bn_0 = nn.BatchNorm2d(2)
self.cs_criteron = nn.CrossEntropyLoss()
self.class_criteron = nn.CrossEntropyLoss()
self.mask_convs = [self.mask_conv_0, self.mask_conv_1, self.mask_conv_2, self.mask_conv_3, self.mask_conv_4]
self.low_thresh = 0.2
self.cs_loss_factor = 1.0
self.miss_loss_factor = 1.0
self.class_loss_factor = 1.0
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _init_target(self, img_tensor_shape, device, target=None):
target_ori_mask = target.get_field('masks').get_mask_tensor().unsqueeze(0).to(device)
target_shape = (1, target_ori_mask.shape[-3]) + img_tensor_shape
target_mask_pad_to_img = target_ori_mask.new(*target_shape).zero_()
target_mask_pad_to_img[:,:,:target.size[1], :target.size[0]] = target_ori_mask
target_levels = {}
target_levels[0] = target_mask_pad_to_img
level_shape = ((img_tensor_shape[0]+1)//2, (img_tensor_shape[1]+1)//2)
target_levels[1] = F.interpolate(target_mask_pad_to_img.float(), level_shape, mode='nearest').type(target_mask_pad_to_img.dtype)
level_shape = ((level_shape[0]+1)//2, (level_shape[1]+1)//2)
target_levels[2] = F.interpolate(target_mask_pad_to_img.float(), level_shape, mode='nearest').type(target_mask_pad_to_img.dtype)
level_shape = ((level_shape[0]+1)//2, (level_shape[1]+1)//2)
target_levels[3] = F.interpolate(target_mask_pad_to_img.float(), level_shape, mode='nearest').type(target_mask_pad_to_img.dtype)
level_shape = ((level_shape[0]+1)//2, (level_shape[1]+1)//2)
target_levels[4] = F.interpolate(target_mask_pad_to_img.float(), level_shape, mode='nearest').type(target_mask_pad_to_img.dtype)
level_shape = ((level_shape[0]+1)//2, (level_shape[1]+1)//2)
target_levels[5] = F.interpolate(target_mask_pad_to_img.float(), level_shape, mode='nearest').type(target_mask_pad_to_img.dtype)
level_shape = ((level_shape[0]+1)//2, (level_shape[1]+1)//2)
target_levels[6] = F.interpolate(target_mask_pad_to_img.float(), level_shape, mode='nearest').type(target_mask_pad_to_img.dtype)
level_shape = ((level_shape[0]+1)//2, (level_shape[1]+1)//2)
target_levels[7] = F.interpolate(target_mask_pad_to_img.float(), level_shape, mode='nearest').type(target_mask_pad_to_img.dtype)
# import pdb; pdb.set_trace()
# print([t.shape for t in target_levels.values()])
target_levels['labels'] = target.get_field('labels')
return target_levels
def compute_mask(self, level, feature, pyramids, is_init=False):
for j, pyramid in enumerate(pyramids):
# feature_gaussian_mask = pyramid.get_feature_gaussian_mask(level, feature.shape[-2:]).to(feature.device)
feature_gaussian_mask = pyramid.get_feature_gaussian_mask(level, feature[0,0]).to(feature.device)
if is_init:
conv_in = torch.cat((feature, feature_gaussian_mask[None, None,:,:]), dim=1)
chn256 = self.chn256_s[level](conv_in)
x_init = self.init_pyramid(chn256)
mask_logits = self.conv_s[level](x_init)
pyramid.set_mask_logits(level, mask_logits)
mask = self.out_s[level](mask_logits)
pyramid.set_mask(level, mask)
# import pdb; pdb.set_trace()
class_logits = self.class_logits(chn256)
class_logits = F.adaptive_avg_pool2d(class_logits, (1,1)).squeeze(-1).squeeze(-1)
pyramid.class_logits = class_logits
else:
last_mask_logits = pyramid.get_mask_logits(level-1)
up_size = tuple(feature.shape[-2:])
last_mask_logits_up = F.interpolate(last_mask_logits, up_size, mode='bilinear', align_corners=False)
conv_in = torch.cat((last_mask_logits_up, feature, feature_gaussian_mask[None, None,:,:]), dim=1)
out = self.upmix256_s[level](conv_in)
mask_logits = self.conv_s[level](out)
pyramid.set_mask_logits(level, mask_logits)
mask = self.out_s[level](mask_logits)
pyramid.set_mask(level, mask)
# if mask_logits.max() > 1e20 or torch.isnan(mask_logits.max()):
if torch.isnan(mask_logits.max()):
import pdb; pdb.set_trace()
# return feature_gaussian_mask
def compute_loss(self, level, pyramids, target_levels, target_support_pyramids):
# TODO: multi target cross entropy loss
losses = []
miss_losses = []
class_losses = []
# covered_idx = []
for pyramid in pyramids:
mask_logits = pyramid.get_mask(level)
if pyramid.target_idx:
target_mask = target_levels[7-level][0, [pyramid.target_idx]]
loss_cs = self.cs_criteron(mask_logits, target_mask.squeeze(1).long())
losses.append(loss_cs)
# if loss_cs > 1e20 or torch.isnan(loss_cs):
if torch.isnan(loss_cs):
import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
if pyramid.init_level == level:
target_label = target_levels['labels'][[pyramid.target_idx]]
loss_class = self.class_criteron(pyramid.class_logits, target_label)
class_losses.append(loss_class)
# import pdb; pdb.set_trace()
# TODO: 检查未被追踪的target_idx
# TODO: 避免惩罚该target 指导的pyramid, 不存在这个问题。。。
for i, t_match_list in enumerate(target_support_pyramids):
if not t_match_list: # target 没有match到pyramid
miss_target_map = target_levels[7-level][0, i]
if miss_target_map.sum():
miss_pos = miss_target_map.nonzero()
# import pdb; pdb.set_trace()
all_masks = torch.cat([i_p.get_mask(level) for i_p in pyramids], dim=1)
loss_miss = all_masks[0,:,miss_pos[:,0], miss_pos[:,1]].mean()
cs_loss = sum(loss for loss in losses)/len(losses)\
if len(losses) else mask_logits.sum()*0
miss_loss = sum(loss for loss in miss_losses) / len(miss_losses)\
if len(miss_losses) else mask_logits.sum()*0
class_loss = sum(loss for loss in class_losses) / len(class_losses)\
if len(class_losses) else mask_logits.sum()*0
resloss = cs_loss * self.cs_loss_factor + miss_loss * self.miss_loss_factor + \
class_loss * self.class_loss_factor
# resloss = (sum(loss for loss in losses)/len(losses)\
# if len(losses) else mask_logits.sum()*0) * self.cs_loss_factor + \
# (sum(loss for loss in miss_losses) / len(miss_losses)\
# if len(miss_losses) else mask_logits.sum()*0) * self.miss_loss_factor
return resloss
# 只能给新来的 pyramids match target, 旧的match 要保持连贯性
def match_target(self, level, pyramids, target_levels, target_support_pyramids):
for pyr in pyramids:
target_idxs = target_levels[7-level][0, :, pyr.pos[0], pyr.pos[1]].nonzero()
for i, target_idx in enumerate(target_idxs):
target_idx_int = target_idx[0].item()
target_support_pyramids[target_idx_int].append(pyr.idx)
# 解决一个pixel 多个target 的问题, 核心:(小target优先), 已分配的不改
if pyr.target_idx:
# print('target_idxs', target_idxs)
# print('pyr.target_idx:', pyr.target_idx)
target_map_last = target_levels[7-level][0, pyr.target_idx]
target_map_now = target_levels[7-level][0, target_idx_int]
# 重叠的上一个已经有其他pixel选项了, 让其让路
if len(target_support_pyramids[pyr.target_idx]) > 1:
target_support_pyramids[pyr.target_idx].remove(pyr.idx)
pyr.bind_target(target_idx_int)
elif len(target_support_pyramids[target_idx_int]) > 1:
target_support_pyramids[target_idx_int].remove(pyr.idx)
continue
elif (target_map_now == target_map_last).all():
target_support_pyramids[target_idx_int].remove(pyr.idx)
continue
elif target_map_now.sum() < target_map_last.sum():
target_support_pyramids[pyr.target_idx].remove(pyr.idx)
pyr.bind_target(target_idx_int)
elif target_map_now.sum() > target_map_last.sum():
target_support_pyramids[target_idx_int].remove(pyr.idx)
continue
else:
target_support_pyramids[target_idx_int].remove(pyr.idx)
continue
# import pdb; pdb.set_trace()
else:
pyr.bind_target(target_idx_int)
def forward_ori(self, image, targets=None):
x_img = image.tensors
xs_r50 = self.r50(x_img)
xs_r50.append(self.res_layer_5(xs_r50[-1]))
xs_r50.append(self.res_layer_6(xs_r50[-1]))
new_pos_limit_1 = 50
new_pos_limit_2 = 50
new_pos_limit_3 = 50
N, _, img_size_h, img_size_w = x_img.shape
device = x_img.device
level_sizes = [tuple(f.shape[-2:]) for f in xs_r50[::-1]]
losses = {}
losses_0 = []
losses_1 = []
losses_2 = []
losses_3 = []
losses_4 = []
test_masks = []
for i in range(N):
InstancePyramid.inst_count = 0
curr_level = 0
x_curr = xs_r50[-1]
init_pos = torch.nonzero(torch.ones_like(x_curr[0][0]))
inst_pyramids = [InstancePyramid(pos, curr_level, level_sizes) for pos in init_pos]
if x_curr[[i]].abs().max() > 1e19 or torch.isnan(x_curr[[i]].max()):
import pdb; pdb.set_trace()
self.compute_mask(curr_level, x_curr[[i]], inst_pyramids, True)
self.log_dict.update({'pyr_num_l0': len(inst_pyramids)})
if self.training:
target_levels = self._init_target((img_size_h, img_size_w ), device, targets[i])
target_support_pyramids_0 = [[] for k in range(target_levels[7].shape[1])]
# 统计 target 匹配
self.match_target(0, inst_pyramids, target_levels, target_support_pyramids_0)
loss_0 = self.compute_loss(0, inst_pyramids, target_levels, target_support_pyramids_0)
losses_0.append(loss_0)
curr_level = 1
x_curr = xs_r50[-2]
if x_curr[[i]].abs().max() > 1e20 or torch.isnan(x_curr[[i]].max()):
import pdb; pdb.set_trace()
# 生成 upsample mask,对现有的mask pyramids
self.compute_mask(curr_level, x_curr[[i]], inst_pyramids)
# TODO: 考虑其他的new_masks计算方法,比如说 multi target cross entropy loss 中的单一channel
new_masks_minus = torch.cat([i_p.get_mask(curr_level)[:,[1]] - i_p.get_mask(curr_level)[:,[0]] for i_p in inst_pyramids], dim=1)
new_masks_softmax = F.softmax(new_masks_minus,dim=1)
avg_sharing = 1.0 / len(inst_pyramids)
num_pixels = int(new_masks_softmax.shape[-1]*new_masks_softmax.shape[-2])
# top_percent = new_masks_softmax.view(-1).topk(int(num_pixels*(1-0.3)))[0][-1].item()
# max_topk = new_masks_softmax.max(dim=1)[0].view(-1).topk(num_pixels-3)[0][-1].item()
max_topk = new_masks_softmax.max(dim=1)[0].view(-1).topk(8, largest=False)[0][-1].item()
# 这里非常的有趣,保证最少选拔8人,如果KOL话语权占不到5%,那就诞生新的KOL proposal
# pending_thresh越高,新增的new_pos越多 所以 max_topk 应该是保底, 应该配合比例
pending_thresh = max(0.02, max_topk)
new_pos = torch.nonzero(new_masks_softmax[0].max(dim=0)[0] < pending_thresh)
if len(new_pos) > new_pos_limit_1:
# import pdb; pdb.set_trace()
raw_pos = new_masks_softmax.max(dim=1)[0].view(-1).topk(new_pos_limit_1, largest=False)[1]
new_pos_0 = raw_pos // x_curr.shape[-1]
new_pos_1 = raw_pos % x_curr.shape[-1]
new_pos = torch.cat((new_pos_0.view(-1,1), new_pos_1.view(-1,1)), dim=1)
new_occupy = 1.0*len(new_pos) / x_curr.shape[-2] / x_curr.shape[-1]
# if new_occupy > 0.5 or len(new_pos) <8:
if new_occupy > 0.5 or len(new_pos) <8-1:
print('new_occupy:{}| len(new_pos):{}'.format(new_occupy, len(new_pos)))
# import pdb; pdb.set_trace()
new_pyramids = [InstancePyramid(pos, curr_level, level_sizes) for pos in new_pos]
self.compute_mask(curr_level, x_curr[[i]], new_pyramids, True)
# 出清没有领地的pyramid 在所有pixel都进不了前3
# 统计没有pyramid的targets
# 额外惩罚霸占位置的pyramid,保护弱势应得的 pyramid
merit_pyramids_idx = new_masks_softmax.topk(2, dim=1)[1].unique()
# merit_pyramids_idx = new_masks_softmax.topk(3, dim=1)[1].unique()
merit_pyramids = [inst_pyramids[i] for i in range(len(inst_pyramids)) if i in merit_pyramids_idx]
target_len_1_before = sum([len(l) for l in target_support_pyramids_0])
for i2 in range(len(inst_pyramids)):
if i2 not in merit_pyramids_idx:
die_id = inst_pyramids[i2].idx
die_target_idx = inst_pyramids[i2].target_idx
if die_target_idx:
target_support_pyramids_0[die_target_idx].remove(die_id)
target_len_1_after = sum([len(l) for l in target_support_pyramids_0])
# if target_len_1_before != target_len_1_after:
# import pdb; pdb.set_trace()
inst_pyramids = merit_pyramids + new_pyramids
self.log_dict.update({'pyr_num_l1': len(inst_pyramids)})
if self.training:
self.match_target(curr_level, new_pyramids, target_levels, target_support_pyramids_0)
loss_1 = self.compute_loss(curr_level, inst_pyramids, target_levels, target_support_pyramids_0)
losses_1.append(loss_1)
# import pdb; pdb.set_trace()
curr_level = 2
x_curr = xs_r50[-3]
if x_curr[[i]].abs().max() > 1e20 or torch.isnan(x_curr[[i]].max()):
# import pdb; pdb.set_trace()
continue
self.compute_mask(curr_level, x_curr[[i]], inst_pyramids)
new_masks_minus = torch.cat([i_p.get_mask(curr_level)[:,[1]] - i_p.get_mask(curr_level)[:,[0]] for i_p in inst_pyramids], dim=1)
new_masks_softmax = F.softmax(new_masks_minus,dim=1)
max_topk | |
isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
with tf.variable_scope("deconv_bn_relu1_"+str(layer)) as scope:
bn = deconv_bn1(tensor_in, 0, params, mtrain)
tensor_out = relu1(bn, 0, params, mtrain)
return tensor_out
def conv_bn_relu_max_pool1(tensor_in=None, layer=0, params=None, mtrain=None):
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
with tf.variable_scope("conv_bn_relu_max_pool1_"+str(layer)) as scope:
relu = conv_bn_relu1(tensor_in, 0, params, mtrain)
tensor_out = max_pool1(relu, 0, params, mtrain)
return tensor_out
def resnet_unit1(tensor_in=None, layer=0, params=None, mtrain=None):
"""
第一种类型的resnet_unit
Residual unit with 2 sub layers, using Plan A for shortcut connection.
"""
params["conv"]["padding"] = "SAME"
stride = params["conv"]["stride"]
params["avg_pool"]["shape"] = [stride[0], stride[1]]
params["avg_pool"]["stride"] = [stride[0], stride[1]]
params["avg_pool"]["padding"] = "SAME"
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
x_shape = tensor_in.get_shape()
in_filter = int(x_shape[3])
out_filter = params["conv"]["number"]
with tf.variable_scope("resnet_unit1_"+str(layer)) as scope:
relu = conv_bn_relu1(tensor_in, 0, params, mtrain)
params["conv"]["stride"] = [1, 1]
bn = conv_bn1(relu, 0, params, mtrain)
orig_x = avg_pool1(tensor_in, 0, params, mtrain)
if in_filter != out_filter:
pad1 = (out_filter - in_filter) // 2
pad2 = out_filter - in_filter - pad1
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [pad1, pad2]])
tensor_out = relu1(tf.add(bn, orig_x), 0, params, mtrain)
return tensor_out
def resnet_unit2(tensor_in=None, layer=0, params=None, mtrain=None):
depth_output = params["resnet_unit"]["depth_output"]
depth_bottle = params["resnet_unit"]["depth_bottle"]
use_branch = params["resnet_unit"]["use_branch"]
shape = params["resnet_unit"]["shape"]
stride = params["resnet_unit"]["stride"]
rate = params["resnet_unit"]["rate"]
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
x_shape = tensor_in.get_shape().as_list()
depth_input = x_shape[-1]
with tf.variable_scope("resnet_unit2_"+str(layer)) as scope:
if use_branch:
params["conv"] = {"number":depth_output, "shape":shape, "rate":1, "stride":stride, "padding":"VALID"}
shortcut = conv_bn1(tensor_in, 0, params, mtrain)
else:
shortcut = tensor_in
params["conv"] = {"number":depth_bottle, "shape":shape, "rate":1, "stride":stride, "padding":"VALID"}
residual = conv_bn_relu1(tensor_in, 0, params, mtrain)
params["conv"] = {"number":depth_bottle, "shape":[3, 3], "rate":rate, "stride":[1, 1], "padding":"SAME" }
residual = conv_bn_relu1(residual, 1, params, mtrain)
params["conv"] = {"number":depth_output, "shape":[1, 1], "rate":1, "stride":[1, 1], "padding":"VALID"}
residual = conv_bn1(residual, 1, params, mtrain)
tensor_out = relu1(shortcut+residual, 0, params, mtrain)
return tensor_out
def resnet_unit3(tensor_in=None, layer=0, params=None, mtrain=None):
"""
第三种类型的resnet_unit
"""
depth_output = params["resnet_unit"]["depth_output"]
depth_bottle = params["resnet_unit"]["depth_bottle"]
stride = params["resnet_unit"]["stride"]
rate = params["resnet_unit"]["rate"]
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
x_shape = tensor_in.get_shape().as_list()
depth_input = x_shape[-1]
with tf.variable_scope("resnet_unit3_"+str(layer)) as scope:
if depth_output == depth_input:
if stride == 1:
shortcut = tensor_in
else:
params["max_pool"] = {"shape": [1, 1], "stride": [stride, stride], "padding": "VALID"}
shortcut = max_pool1(tensor_in, 0, params, mtrain)
params["conv"] = {"number": depth_bottle, "shape": [1, 1], "rate": 1, "stride": [stride, stride], "padding": "VALID"}
residual = bn_relu_conv1(tensor_in, 0, params, mtrain)
else:
tensor_in = bn_relu1(tensor_in, 0, params, mtrain)
params["conv"] = {"number": depth_output, "shape": [1, 1], "rate": 1, "stride": [stride, stride], "padding": "VALID"}
shortcut = conv1(tensor_in, 0, params, mtrain)
params["conv"] = {"number": depth_bottle, "shape": [1, 1], "rate": 1, "stride": [stride, stride], "padding": "VALID"}
residual = conv1(tensor_in, 1, params, mtrain)
params["conv"] = {"number": depth_bottle, "shape": [3, 3], "rate": rate, "stride": [1, 1], "padding": "SAME"}
residual = bn_relu_conv1(residual, 1, params, mtrain)
params["conv"] = {"number": depth_output, "shape": [1, 1], "rate": 1, "stride": [1, 1], "padding": "VALID"}
residual = bn_relu_conv1(residual, 2, params, mtrain)
tensor_out = shortcut + residual
print_activations(tensor_out)
return tensor_out
def resnet_block2(tensor_in=None, layer=0, params=None, mtrain=None):
block_setting = params["resnet_block"]["block_setting"]
output_stride = params["resnet_block"]["output_stride"]
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
rate = 1
current_stride = 1
tensor_out = tensor_in
out_list = []
for i, block in enumerate(block_setting):
depth_output, depth_bottle, shape, stride, unit_number, unit_trainable = block
params["com"]["trainable"] = unit_trainable
with tf.variable_scope("resnet_block2_"+str(layer)+"_"+str(i)) as scope:
for j in range(unit_number):
if j == 0: #the first unit in the block
if current_stride == output_stride:
stride = [1, 1]
rate *= stride[0]
else:
rate = 1
current_stride *= stride[0]
params["resnet_unit"] = {"depth_output": depth_output, "depth_bottle": depth_bottle, "use_branch": True, \
"shape": shape, "stride": stride, "rate": rate}
else: #identity mapping
params["resnet_unit"] = {"depth_output": depth_output, "depth_bottle": depth_bottle, "use_branch": False, \
"shape": [1, 1], "stride": [1, 1], "rate": rate}
tensor_out = resnet_unit2(tensor_out, j, params, mtrain)
out_list.append(tensor_out)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return out_list
def resnet_block3(tensor_in=None, layer=0, params=None, mtrain=None):
block_setting = params["resnet_block"]["block_setting"]
output_stride = params["resnet_block"]["output_stride"]
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
current_stride = 1
rate = 1
tensor_out = tensor_in
out_list = []
for i, block in enumerate(block_setting):
with tf.variable_scope("resnet_block3_"+str(layer)+"_"+str(i)) as scope:
for j, unit in enumerate(block):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
unit_depth_output, unit_depth_bottle, unit_stride, unit_trainable = unit
params["com"]["trainable"] = unit_trainable
if output_stride is not None and current_stride == output_stride:
params["resnet_unit"] = {"depth_output": unit_depth_output, "depth_bottle": unit_depth_bottle,
"stride": 1, "rate": rate}
tensor_out = resnet_unit3(tensor_out, j, params, mtrain)
rate *= unit_stride #完成之后做记录
else:
params["resnet_unit"] = {"depth_output": unit_depth_output, "depth_bottle": unit_depth_bottle,
"stride": unit_stride, "rate": 1}
tensor_out = resnet_unit3(tensor_out, j, params, mtrain)
current_stride *= unit_stride #完成之后做记录
out_list.append(bn_relu1(tensor_out, 0, params, mtrain))
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return out_list
def pyramid1(tensor_in=None, layer=0, params=None, mtrain=None):
depth = params["pyramid"]["depth"]
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
num_layers = len(tensor_in)
pyramid = []
with tf.variable_scope("pyramid1_"+str(layer)) as scope:
params["conv"] = {"number": depth, "shape": [1, 1], "rate": 1, "stride": [1, 1], "padding": "SAME"}
pyramid.append(conv_bn_relu1(tensor_in[-1], 0, params, mtrain))
for i in range(num_layers-2, -1, -1):
j = num_layers - 2 - i
p, c = pyramid[j], tensor_in[i]
c_shape = tf.shape(c)
p = tf.image.resize_bilinear(p, [c_shape[1], c_shape[2]])
params["conv"]["shape"] = [1, 1]
c = conv_bn_relu1(c, 1+j, params, mtrain)
p = tf.add(p, c)
pyramid.append(p)
for i in range(num_layers):
p = pyramid[i]
params["conv"]["shape"] = [3, 3]
p = conv_bn_relu1(p, num_layers+i, params, mtrain)
pyramid[i] = p
pyramid = pyramid[::-1]
return pyramid
def pyramid2(tensor_in=None, layer=0, params=None, mtrain=None):
depth = params["pyramid"]["depth"]
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
num_layers = len(tensor_in)
pyramid = []
with tf.variable_scope("pyramid2_"+str(layer)) as scope:
pyramid.append(tensor_in[-1])
params["conv"] = {"rate": 1, "stride": [1, 1], "padding": "SAME"}
for i in range(num_layers-2, -1, -1):
j = num_layers - 2 - i
p, c = pyramid[j], tensor_in[i]
c_shape = tf.shape(c)
p = tf.image.resize_bilinear(p, [c_shape[1], c_shape[2]])
p = tf.concat([p, c], axis=-1)
params["conv"] = {"shape": [1, 1], "number": depth[j], "rate": 1, "stride": [1, 1], "padding": "SAME"}
p = conv_bn_relu1(p, 0+2*j, params, mtrain)
params["conv"] = {"shape": [3, 3], "number": depth[j], "rate": 1, "stride": [1, 1], "padding": "SAME"}
p = conv_bn_relu1(p, 1+2*j, params, mtrain)
pyramid.append(p)
#pyramid = list(reversed(pyramid))
p = pyramid[-1]
params["conv"] = {"shape": [3, 3], "number": depth[-1], "rate": 1, "stride": [1, 1], "padding": "SAME"}
tensor_out = conv_bn_relu1(p, 2*(num_layers-1), params, mtrain)
return tensor_out
def pyramid3(tensor_in=None, layer=0, params=None, mtrain=None):
"""
第三种类型的pyramid #DSSD
"""
depth = params["pyramid"]["depth"]
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
num_layers = len(tensor_in)
pyramid = []
with tf.variable_scope("pyramid3_"+str(layer)) as scope:
params["conv"] = {"number": depth, "shape": [3, 3], "rate": 1, "stride": [1, 1], "padding": "SAME"}
params["deconv"] = {"number": depth, "shape": [2, 2], "rate": 1, "stride": [2, 2], "padding": "SAME", "out_shape":[28, 28]}
p = tensor_in[-1]
pyramid.append(p)
for i in range(num_layers-2, -1, -1):
j = num_layers - 2 - i
p, c = pyramid[j], tensor_in[i]
c_shape = c.get_shape()
params["deconv"]["out_shape"] = [int(c_shape[1]), int(c_shape[2])]
p = deconv1(p, j, params, mtrain)
p = conv_bn1(p, 0+2*j, params, mtrain)
c = conv_bn_relu1(c, j, params, mtrain)
c = conv_bn1(c, 1+2*j, params, mtrain)
p = tf.multiply(p, c)
#p = tf.add(p, c)
p = relu1(p, 0, params, mtrain)
pyramid.append(p)
pyramid = pyramid[::-1]
return pyramid
def pyramid4(tensor_in=None, layer=0, params=None, mtrain=None):
"""
第四种类型的pyramid #DSSD
"""
depth = params["pyramid"]["depth"]
if isinstance(tensor_in, tuple):
tensor_in = tensor_in[0]
num_layers = len(tensor_in)
pyramid = []
with tf.variable_scope("pyramid4_"+str(layer)) as scope:
params["conv"] = {"number": depth, "shape": [3, 3], "rate": 1, "stride": [1, 1], "padding": "SAME"}
params["deconv"] = {"number": depth, "shape": [2, 2], "rate": 1, "stride": [2, 2], "padding": "SAME", "out_shape":[28, 28]}
p = tensor_in[-1]
p = conv_bn_relu1(p, 0, params, mtrain)
p = conv_bn_relu1(p, 1, params, mtrain)
pyramid.append(p)
for i in range(num_layers-2, -1, -1):
j = num_layers - 2 - i
p, c = pyramid[j], tensor_in[i]
c_shape = c.get_shape()
params["deconv"]["out_shape"] = [int(c_shape[1]), int(c_shape[2])]
p = deconv_bn1(p, j, params, mtrain)
c = conv_bn_relu1(c, 2+2*j, params, mtrain)
c = conv_bn1(c, j, params, mtrain)
p = tf.add(p, | |
"""
Peg Solitaire Puzzle Implementation
Author: <NAME>, <NAME>
This program implements the peg solitaire game for 3 different game boards.
Solvability is not guaranteed on at least one...yet.
The program used the following websites to create its base code.
# Example program to show using an array to back a grid on-screen.
#
# Sample Python/Pygame Programs
# Simpson College Computer Science
# http://programarcadegames.com/
# http://simpson.edu/computer-science/
# Explanation video: http://youtu.be/mdTeqiWyFn
"""
import board_solver as bs
import os
import pygame
import sys
# import termios
import time
import copy
import random
import json
# import tty
from pprint import pprint, pformat
from queue import PriorityQueue
# from termcolor import cprint
from typing import FrozenSet, List, Tuple
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (50, 50, 255)
GOLD = (255, 215, 0)
DKGREEN = (0, 100, 0)
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 50
HEIGHT = 50
# This sets the margin between each cell
MARGIN = 5
N_SQ = 7
PEG_NONE = 0
PEG_EXIST = 1
PEG_SELECT = 2
PEG_WALL = 3
mouseDown = False
# This class represents the player
# It derives from the "Sprite" class in Pygame
class Button:
# Constructor. Pass in the color of the block, and its x and y position
def __init__(self, origin, advanceBoard):
# Determines whether we are meant to advance the board list or not
self.adv = advanceBoard
# Variables to hold the height and width of the block
self.width = 20
self.height = 20
# Create an image of the player, and fill it with a color.
# This could also be an image loaded from the disk.
self.image = pygame.Surface([self.width, self.height])
self.image.fill(DKGREEN)
self.rect = self.image.get_rect()
self.rect.x = origin[0]
self.rect.y = origin[1]
self.point_list = []
if self.adv:
self.point_list.append([self.rect.x + 5, self.rect.y + 2])
self.point_list.append([self.rect.x + 17, self.rect.y + 9])
self.point_list.append([self.rect.x + 17, self.rect.y + 10])
self.point_list.append([self.rect.x + 5, self.rect.y + 17])
else:
self.point_list.append([self.rect.x + 17, self.rect.y + 2])
self.point_list.append([self.rect.x + 5, self.rect.y + 9])
self.point_list.append([self.rect.x + 5, self.rect.y + 10])
self.point_list.append([self.rect.x + 17, self.rect.y + 17])
# Update the position of the player
def update(self, screen, board_list, og_list, idx):
pos = pygame.mouse.get_pos()
grid = board_list[idx]
midpoint = [self.rect.x + self.width // 2, self.rect.y + self.height // 2]
# The mouse click was not meant for this button
if abs(pos[0] - midpoint[0]) > 8 or abs(pos[1] - midpoint[1]) > 8:
return grid, idx
if self.adv and idx >= len(board_list) - 1:
# There are no more boards to be had
return grid, idx
elif not self.adv and idx <= 0:
return grid, idx
# Advance or retreat on boards
if self.adv:
idx += 1
else:
idx -= 1
# Reset the board to the original; this is the reset functionality
board_list[idx] = copy.deepcopy(og_list[idx])
grid = board_list[idx]
print("Button clicked: " + str(self.adv))
return grid, idx
# When we update the screen, we need to call the following method
def drawIcon(self, screen):
pygame.draw.rect(screen, DKGREEN, self.rect)
pygame.draw.polygon(screen, GOLD, self.point_list)
def mouseColorSpace(grid, screen):
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
column = pos[0] // (WIDTH + MARGIN)
row = pos[1] // (HEIGHT + MARGIN)
coord = list([-1, -1])
# Find which peg is considered selected amongst them all.
for r in range(N_SQ):
for c in range(N_SQ):
if grid[r][c] == PEG_SELECT:
coord = list([r, c])
# If the mouse is clicked within the board's border, do the following.
if column < N_SQ or row < N_SQ:
# Look through all to see if any further action is needed
for r in range(N_SQ):
for c in range(N_SQ):
# continue loop if we're not looking at the square touched by the user or it's a wall
if [r, c] != [row, column]:
continue
# We're looking at the same coord touched by mouse
if (
grid[r][c] == PEG_NONE
and coord != [-1, -1]
and (abs(coord[0] - r) == 2 or abs(coord[1] - c) == 2)
):
grid = jumpPeg(grid, coord, r, c)
elif [r, c] == coord:
grid[coord[0]][coord[1]] = PEG_EXIST
elif (
coord != [-1, -1]
and coord != [r, c]
and (grid[r][c] != PEG_NONE and grid[r][c] != PEG_WALL)
):
grid[coord[0]][coord[1]] = PEG_EXIST
grid[r][c] = PEG_SELECT
elif coord == [-1, -1] and (
grid[r][c] != PEG_NONE and grid[r][c] != PEG_WALL
):
grid[r][c] = PEG_SELECT
print("Click ", pos, "Grid coordinates: ", row, column)
return grid, screen
def jumpPeg(grid, selCoord, row, col):
"""
Provides the logic for jumping pegs in the solitaire game
Pre-condition: 2 pegs and 1 hole in a line, the selected peg is at the end of this
line, [i.e. (P P H), (H P P)]
Post-condition: 2 holes and 1 peg in a line, [i.e. (H H P), (P H H)]
We know that the given selCoord should be a selected peg and the 'row' and 'col' are
the hole.
"""
# We want to check that the selected peg is in line with proper preconditions
if row != selCoord[0] and col != selCoord[1]:
return grid
elif grid[(row + selCoord[0]) // 2][(col + selCoord[1]) // 2] == PEG_NONE:
# Check the midpoint between the two for an existing peg
return grid
# It appears we have passed the precondition; execute post-conditions
grid[row][col] = PEG_EXIST
grid[(row + selCoord[0]) // 2][(col + selCoord[1]) // 2] = PEG_NONE
grid[selCoord[0]][selCoord[1]] = PEG_NONE
return grid
def makeTestBoards(board_list):
# Create a 2 dimensional array. A two dimensional
# array is simply a list of lists.
grid = []
for row in range(N_SQ):
# Add an empty array that will hold each cell
# in this row
grid.append([])
for column in range(N_SQ):
# MAKE A CROSS SHAPE
if (
(row < 2 and column < 2)
or (row >= 5 and column < 2)
or (row >= 5 and column >= 5)
or (row < 2 and column >= 5)
):
grid[row].append(PEG_WALL)
else:
grid[row].append(PEG_EXIST) # Append a cell
# Need at least one hole in the center
grid[3][3] = 0
board_list.append(grid)
grid = []
for row in range(N_SQ):
grid.append([])
for column in range(N_SQ):
# MAKE A SQUARE SHAPE
grid[row].append(PEG_EXIST) # Append a cell
grid[3][3] = 0
board_list.append(grid)
grid = []
for row in range(N_SQ):
grid.append([])
for column in range(N_SQ):
# MAKE AN H SHAPE
if (row < 2 and column == 3) or (row >= 5 and column == 3):
grid[row].append(PEG_WALL)
else:
grid[row].append(PEG_EXIST)
grid[3][3] = 0
board_list.append(grid)
return board_list
def main(frozensets=None):
# Reference the global variable N_SQ
global N_SQ
# Process a file to get frozen sets
gameBoards = []
diff = ["easier", "harder"]
colors = [RED, BLUE]
color_labels = ["RED", "BLUE"]
if frozensets == None:
fzs = bs.process_frozen_sets(sys.argv[1])
else:
fzs = frozensets
for i, fz in enumerate(fzs):
gameBoards.append(bs.PegSolitaire(fz))
# Shuffle the game boards and difficulty labels in the same way
shuffled_temp = list(zip(gameBoards, diff))
random.shuffle(shuffled_temp)
gameBoards, diff = zip(*shuffled_temp)
gameBoards = [b for b in gameBoards]
# TODO - We need to build in Jacob's predetermination of what boards are shown using board colors.
# TODO - Need 'record' keyword for checking which board was considered harder
boardIdx = 0
# Preserve the original board configurations
originals = copy.deepcopy(gameBoards)
# Grab the number of squares in one row of a game board
N_SQ = len(gameBoards[0][0])
# Initialize pygame
pygame.init()
# Set the HEIGHT and WIDTH of the screen
navWinOrigin = [0, N_SQ * (HEIGHT + MARGIN) + MARGIN]
navWinHeight = HEIGHT * 4
windowSize = [
N_SQ * (WIDTH + MARGIN) + MARGIN,
N_SQ * (HEIGHT + MARGIN) + MARGIN + navWinHeight,
]
screen = pygame.display.set_mode(windowSize)
# Set title of screen
pygame.display.set_caption("Peg Solitaire Puzzles")
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
leftButton = Button(
[
windowSize[0] // 2 - (N_SQ * WIDTH // 2 // 2),
navWinOrigin[1] + (N_SQ * HEIGHT // 2 // 2),
],
False,
)
rightButton = Button(
[
windowSize[0] - (N_SQ * WIDTH // 2),
navWinOrigin[1] + (N_SQ * HEIGHT // 2 // 2),
],
True,
)
button_list = [leftButton, rightButton]
grid = gameBoards[boardIdx]
# Loop until | |
from typing import Optional, Tuple
import numpy as np
from . import c_radavg2
def radavg(data: np.ndarray, dataerr: Optional[np.ndarray], mask: Optional[np.ndarray],
wavelength: float, wavelength_err: float,
distance: float, distance_err: float,
pixelsize: float, pixelsize_err: float,
center_row: float, center_row_err: float,
center_col: float, center_col_err: float,
qbins: Optional[np.ndarray],
errorprop: int = 3, qerrorprop: int = 3,
sector_direction: Optional[float] = None,
sector_width: Optional[float] = None,
sector_symmetric: bool = True):
"""
Perform radial averaging on a scattering pattern.
Inputs:
data (np.ndarray, two dimensions, double dtype): scattering pattern
error (np.ndarray, two dimensions, double dtype): uncertainties of the scattering pattern
mask (np.ndarray, two dimensions, uint8 dtype): mask matrix
wavelength (double): X-ray wavelength, in nm
wavelength_unc (double): uncertainty of the X-ray wavelength, in nm
distance (double): sample-to-detector distance, in mm
distance_unc (double): uncertainty of the sample-to-detector distance, in mm
pixelsize (double): the length of the edge of a square pixel, in mm
pixelsize_unc (double): uncertainty of the pixel size, in mm
center_row (double): beam center position, row coordinate, in pixel units
center_row_unc (double): uncertainty of the beam center row coordinate, in pixel units
center_col (double): beam center position, column coordinate, in pixel units
center_col_unc (double): uncertainty of the beam center column coordinate, in pixel units
qbincenters (np.ndarray, one dimensions, double dtype): centers of the q-bins, 1/nm
errorprop (int, 0-3 inclusive): error propagation type for intensities (see below)
qerrorprop (int, 0-3 inclusive): error propagation type for q (see below)
sector_direction (double or None): direction of the sector (in degrees)
sector_width (double or None): full angular width of the sector (in degrees)
sector_symmetric (bool): if the opposite sector is needed, too
Returns: q, Intensity, Error, qError, Area, pixel
(all one-dimensional np.ndarrays, length of `qbincenters`)
q (dtype: double): scattering variable
Intensity (dtype: double): intensity
Error (dtype: double): propagated uncertainty of the intensity
qError (dtype: double): propagated uncertainty of q
Area (dtype: uint32): number of pixels falling into the bins
pixel (dtype: double): pixel coordinate of the bin (simple mean)
Requirements:
- `qbincenters` must be in ascending order and must not contain NaNs or infs.
- `data`, `error` and `mask` must be of the same type
- values of 0 in `mask` indicate invalid pixels. All other values correspond to valid ones.
- beam center coordinates are expressed in pixels, starting from 0.
Error propagation types (parameters `errorprop` and `qerrorprop`):
0: values falling into the same bin are considered as independent samples from the same quantity.
The bin mean is a weighted mean of the values using 1/sigma^2 as weight. Error is 1/sqrt(sum(sigma^2)).
1: linear: error is simply the mean of errors
2: squared: error is the square root of the mean of squared errors
3: conservative: either `squared` or the RMS of all values, whichever is larger.
Units:
- wavelength and qbincenters must be compatible (nm vs 1/nm or Angström vs. 1/Angström, etc.)
- beam centers are expected in pixel units
- distance and pixel size must be expressed in the same units (mm, cm, etc.)
Binning:
- Bin centers are given in `qbincenters`. B
- Bin edges are determined as the mean of two neighbouring bin centers.
- The left edge of the first bin is the first element in `qbincenters`.
- The right edge of the last bin is the last element in `qbincenters`.
Sector averaging:
- if both `sector_direction` and `sector_width` is defined (not None), the averaging is limited to azimuthal
sectors. Otherwise a full azimuthal averaging is performed.
- `sector_direction` is the azimuth angle corresponding to the center of the sector. If the pixel
(row=0, column=0) is top left, then the zero angle points towards right. The positive angular direction is
counterclockwise.
- `sector_width` is the full angular width of the sector
- if `sector_symmetric` is True, the other sector (at angle `sector_direction` + 180°) is also used.
"""
if qbins is None:
qbins = c_radavg2.autoq(mask, wavelength, distance, pixelsize, center_row, center_col)
if sector_width is not None and sector_direction is not None:
mask = c_radavg2.maskforsectors(mask, center_row, center_col,
phicenter=sector_direction / 180 * np.pi,
phihalfwidth=sector_width / 180 * np.pi * 0.5,
symmetric=sector_symmetric)
q, I, E, qE, A, p = c_radavg2.radavg(data=data,
dataerr=dataerr if dataerr is not None else np.ones_like(data),
mask=mask,
wavelength=wavelength, wavelength_unc=wavelength_err,
distance=distance, distance_unc=distance_err,
pixelsize=pixelsize, pixelsize_unc=pixelsize_err,
center_row=center_row, center_row_unc=center_row_err,
center_col=center_col, center_col_unc=center_col_err,
qbincenters=qbins,
errorprop=errorprop,
qerrorprop=qerrorprop
)
return q, I, E, qE, A, p
def fastradavg(data: np.ndarray, mask: np.ndarray, center_row: float, center_col: float,
dmin: float, dmax: float, N: int,
sector_direction: Optional[float] = None, sector_width: Optional[float] = None,
sector_symmetric: Optional[bool] = True):
"""
Fast radial averaging
Inputs:
data (np.ndarray, two dimensions, dtype: double) scattering pattern
mask (np.ndarray, two dimensions, dtype: uint8) mask matrix
center_row (double): row coordinate of the beam center
center_col (double): column coordinate of the beam center
dmin (double): smallest pixel for the abscissa
dmax (double): largest pixel for the abscissa
N (Py_ssize_t): number of pixels
sector_direction (double or None): direction of the sector (in degrees)
sector_width (double or None): full angular width of the sector (in degrees)
sector_symmetric (bool): if the opposite sector is needed, too
Outputs: pixel, Intensity, Area
(all one-dimensional np.ndarrays of length `N`)
pixel (dtype: double): pixel coordinate of the bin
Intensity (dtype: double): intensity of the bin
Area (dtype: uint32): number of pixels in the bin
Notes:
- this is a reduced version of `radint`. It is significantly faster by omitting several features:
- no error propagation is performed
- treats only pixels (not the "q" scattering variable)
- the abscissa range is not arbitrary: linearly spaced bins between `dmin` and `dmax`.
Sector averaging:
- if both `sector_direction` and `sector_width` is defined (not None), the averaging is limited to azimuthal
sectors. Otherwise a full azimuthal averaging is performed.
- `sector_direction` is the azimuth angle corresponding to the center of the sector. If the pixel
(row=0, column=0) is top left, then the zero angle points towards right. The positive angular direction is
counterclockwise.
- `sector_width` is the full angular width of the sector
- if `sector_symmetric` is True, the other sector (at angle `sector_direction` + 180°) is also used.
"""
if sector_direction is not None and sector_width is not None:
mask = c_radavg2.maskforsectors(mask, center_row, center_col,
phicenter=sector_direction / 180 * np.pi,
phihalfwidth=sector_width / 180 * np.pi * 0.5,
symmetric=sector_symmetric)
p, I, A = c_radavg2.fastradavg(data=data, mask=mask,
center_row=center_row, center_col=center_col,
dmin=dmin, dmax=dmax, N=N)
return p, I, A
def azimavg(data: np.ndarray, dataerr: Optional[np.ndarray], mask: Optional[np.ndarray],
wavelength: float,
distance: float,
pixelsize: float,
center_row: float, center_row_err: float,
center_col: float, center_col_err: float,
N: int,
errorprop: int = 3, phierrorprop: int = 3,
interval: Optional[Tuple[float, float]] = None,
limitsinq: bool = True):
"""
Perform azimuthal averaging on a scattering pattern.
Inputs:
data (np.ndarray, two dimensions, double dtype): scattering pattern
error (np.ndarray, two dimensions, double dtype): uncertainties of the scattering pattern
mask (np.ndarray, two dimensions, uint8 dtype): mask matrix
wavelength (double): X-ray wavelength, in nm
distance (double): sample-to-detector distance, in mm
pixelsize (double): the length of the edge of a square pixel, in mm
center_row (double): beam center position, row coordinate, in pixel units
center_row_unc (double): uncertainty of the beam center row coordinate, in pixel units
center_col (double): beam center position, column coordinate, in pixel units
center_col_unc (double): uncertainty of the beam center column coordinate, in pixel units
N (int): number of bins
errorprop (int, 0-3 inclusive): error propagation type for intensities (see below)
qerrorprop (int, 0-3 inclusive): error propagation type for q (see below)
interval (2-tuple of floats): lower and upper bounds of the annulus for limiting the averaging
limitsinq (bool): the two numbers in `interval` are q values (True) or pixel values (False)
Returns: phi, Intensity, Error, phiError, Area, qmean, qstd
(all one-dimensional np.ndarrays, length of `qbincenters`)
phi (dtype: double): azimuth angle in radians, 0 to 2*pi
Intensity (dtype: double): intensity
Error (dtype: double): propagated uncertainty of the intensity
phiError (dtype: double): propagated uncertainty of phi (radians)
Area (dtype: uint32): number of pixels falling into the bins
qmean (dtype: double): average of q values in | |
# Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tff.aggregator for discretizing input values to the integer grid."""
import collections
import numbers
import tensorflow as tf
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.core.templates import measured_process
OUTPUT_TF_TYPE = tf.int32
class DiscretizationFactory(factory.UnweightedAggregationFactory):
"""Aggregation factory for discretizing of floating point tensors.
The created `tff.templates.AggregationProcess` takes an input tensor structure
and, for each tensor, scales and rounds the values to the integer grid. The
scaling factor is the same for all component tensors, and the rounding scheme
can be one of:
1. Deterministic rounding: each scaled value is deterministically rounded
to the nearest integer.
2. Stochastic rounding: each scaled value is stochastically rounded to
the neighbouring integers. For example, 42.3 has 0.7 probability to be
rounded to 42 and 0.3 probability to 43.
3. Conditionally stochastic rounding: Like stochastic rounding, but the
rounding procedure is resampled if the norm of the rounded vector
exceeds a pre-computed threshold determined by a constant in [0, 1)
that controls the concentration inequality for the probabilistic norm
bound after stochastic rounding. For more details, see Section 4.1 of
https://arxiv.org/pdf/2102.06387.pdf.
The structure of the input is kept, and all values of the component tensors
are scaled, rounded, and casted to tf.int32.
This aggregator is intended to be used as part of a uniform quantization
procedure, which, on top of the discretization procedure carried out by this
aggregator, involves value clipping and (possibly) value shifting.
This factory only accepts `value_type` of either `tff.TensorType` or
`tff.StructWithPythonType` and expects the dtype of component tensors to be
all real floats, and it will otherwise raise an error.
"""
def __init__(self,
inner_agg_factory,
scale_factor=1.0,
stochastic=False,
beta=0.0,
prior_norm_bound=None):
"""Initializes the DiscretizationFactory.
Args:
inner_agg_factory: The inner `UnweightedAggregationFactory` to aggregate
the values after the input values are discretized to the integer grid.
scale_factor: A positive scaling constant to be applied to the input
record before rounding to integers. Generally, the larger the factor,
the smaller the errors from discretization.
stochastic: A bool constant denoting whether to round stochastically to
the nearest integer. Defaults to False (deterministic rounding).
beta: A float constant in [0, 1) controlling the concentration inequality
for the probabilistic norm bound after stochastic rounding. Ignored if
`stochastic` is False. Intuitively, this term controls the bias-variance
trade-off of stochastic rounding: a beta of 0 means the rounding is
unbiased, but the resulting norm could be larger (thus larger added
noise when combined with differential privacy); a larger beta means
the vector norm grows less but at the expense of some bias. Defaults
to 0 (unconditional stochastic rounding).
prior_norm_bound: A float constant denoting the global L2 norm bound of
the inputs (e.g. as a result of global L2 clipping). This is useful when
`prior_norm_bound` is larger than the input norm, in which case we can
allow more leeway during conditional stochastic rounding (`beta` > 0).
If set to None, no prior L2 norm bound is used. Ignored if `stochastic`
is False or `beta` is 0.
Raises:
TypeError: If `inner_agg_factory` is not an instance of
`tff.aggregators.UnweightedAggregationFactory`
ValueError: If `scale_factor` is not a positive number.
ValueError: If `stochastic` is not a boolean constant.
ValueError: If `beta` is not in the range of [0, 1).
ValueError: If `prior_norm_bound` is given but is not a positive number.
"""
if not isinstance(inner_agg_factory, factory.UnweightedAggregationFactory):
raise TypeError('`inner_agg_factory` must have type '
'UnweightedAggregationFactory. '
f'Found {type(inner_agg_factory)}.')
if not isinstance(scale_factor, numbers.Number) or scale_factor <= 0:
raise ValueError('`scale_factor` should be a positive number. '
f'Found {scale_factor}.')
if not isinstance(stochastic, bool):
raise ValueError(f'`stochastic` should be a boolean. Found {stochastic}.')
if not isinstance(beta, numbers.Number) or not 0 <= beta < 1:
raise ValueError(f'`beta` should be a number in [0, 1). Found {beta}.')
if prior_norm_bound is not None and (not isinstance(
prior_norm_bound, numbers.Number) or prior_norm_bound <= 0):
raise ValueError('If specified, `prior_norm_bound` should be a positive '
f'number. Found {prior_norm_bound}.')
self._scale_factor = float(scale_factor)
self._stochastic = stochastic
self._beta = float(beta)
# Use value 0 to denote no prior norm bounds for easier typing.
self._prior_norm_bound = prior_norm_bound or 0.0
self._inner_agg_factory = inner_agg_factory
def create(self, value_type):
# Validate input args and value_type and parse out the TF dtypes.
if value_type.is_tensor():
tf_dtype = value_type.dtype
elif (value_type.is_struct_with_python() and
type_analysis.is_structure_of_tensors(value_type)):
if self._prior_norm_bound:
raise TypeError('If `prior_norm_bound` is specified, `value_type` must '
f'be `TensorType`. Found type: {repr(value_type)}.')
tf_dtype = type_conversions.structure_from_tensor_type_tree(
lambda x: x.dtype, value_type)
else:
raise TypeError('Expected `value_type` to be `TensorType` or '
'`StructWithPythonType` containing only `TensorType`. '
f'Found type: {repr(value_type)}')
# Check that all values are floats.
if not type_analysis.is_structure_of_floats(value_type):
raise TypeError('Component dtypes of `value_type` must all be floats. '
f'Found {repr(value_type)}.')
discretize_fn = _build_discretize_fn(value_type, self._stochastic,
self._beta)
@computations.tf_computation(discretize_fn.type_signature.result,
tf.float32)
def undiscretize_fn(value, scale_factor):
return _undiscretize_struct(value, scale_factor, tf_dtype)
inner_value_type = discretize_fn.type_signature.result
inner_agg_process = self._inner_agg_factory.create(inner_value_type)
@computations.federated_computation()
def init_fn():
state = collections.OrderedDict(
scale_factor=intrinsics.federated_value(self._scale_factor,
placements.SERVER),
prior_norm_bound=intrinsics.federated_value(self._prior_norm_bound,
placements.SERVER),
inner_agg_process=inner_agg_process.initialize())
return intrinsics.federated_zip(state)
@computations.federated_computation(init_fn.type_signature.result,
computation_types.at_clients(value_type)
)
def next_fn(state, value):
server_scale_factor = state['scale_factor']
client_scale_factor = intrinsics.federated_broadcast(server_scale_factor)
server_prior_norm_bound = state['prior_norm_bound']
prior_norm_bound = intrinsics.federated_broadcast(server_prior_norm_bound)
discretized_value = intrinsics.federated_map(
discretize_fn, (value, client_scale_factor, prior_norm_bound))
inner_state = state['inner_agg_process']
inner_agg_output = inner_agg_process.next(inner_state, discretized_value)
undiscretized_agg_value = intrinsics.federated_map(
undiscretize_fn, (inner_agg_output.result, server_scale_factor))
new_state = collections.OrderedDict(
scale_factor=server_scale_factor,
prior_norm_bound=server_prior_norm_bound,
inner_agg_process=inner_agg_output.state)
measurements = collections.OrderedDict(
discretize=inner_agg_output.measurements)
return measured_process.MeasuredProcessOutput(
state=intrinsics.federated_zip(new_state),
result=undiscretized_agg_value,
measurements=intrinsics.federated_zip(measurements))
return aggregation_process.AggregationProcess(init_fn, next_fn)
def _build_discretize_fn(value_type, stochastic, beta):
"""Builds a `tff.tf_computation` for discretization."""
@computations.tf_computation(value_type, tf.float32, tf.float32)
def discretize_fn(value, scale_factor, prior_norm_bound):
return _discretize_struct(value, scale_factor, stochastic, beta,
prior_norm_bound)
return discretize_fn
def _discretize_struct(struct, scale_factor, stochastic, beta,
prior_norm_bound):
"""Scales and rounds each tensor of the structure to the integer grid."""
def discretize_tensor(x):
x = tf.cast(x, tf.float32)
# Scale up the values.
scaled_x = x * scale_factor
scaled_bound = prior_norm_bound * scale_factor # 0 if no prior bound.
# Round to integer grid.
if stochastic:
discretized_x = _stochastic_rounding(scaled_x, scaled_bound,
scale_factor, beta)
else:
discretized_x = tf.round(scaled_x)
return tf.cast(discretized_x, OUTPUT_TF_TYPE)
return tf.nest.map_structure(discretize_tensor, struct)
def _undiscretize_struct(struct, scale_factor, tf_dtype_struct):
"""Unscales the discretized structure and casts back to original dtypes."""
def undiscretize_tensor(x, original_dtype):
unscaled_x = tf.cast(x, tf.float32) / scale_factor
return tf.cast(unscaled_x, original_dtype)
return tf.nest.map_structure(undiscretize_tensor, struct, tf_dtype_struct)
def inflated_l2_norm_bound(l2_norm_bound, gamma, beta, dim):
"""Computes the probabilistic L2 norm bound after stochastic quantization.
The procedure of stochastic quantization can increase the norm of the vector.
This function computes a probabilistic L2 norm bound after the quantization
procedure. See Theorem 1 and Sec 4.1 of https://arxiv.org/pdf/2102.06387.pdf
for more details.
Args:
l2_norm_bound: The L2 norm bound of the vector whose coordinates are to be
stochastically rounded to the specified rounding granularity gamma.
gamma: The rounding granularity. A value of 1 is equivalent to rounding to
the integer grid. Equivalent to the multiplicative inverse of the scale
factor used during the quantization procedure.
beta: A float constant in [0, 1]. See the initializer docstring of the
aggregator for more details.
dim: The dimension of the vector to be rounded.
Returns:
The inflated L2 norm bound after stochastically (possibly conditionally
according to beta) rounding the coordinates to grid specified by the
rounding granularity.
"""
l2_norm_bound = tf.convert_to_tensor(l2_norm_bound)
norm = tf.cast(l2_norm_bound, tf.float32)
gamma = tf.cast(gamma, tf.float32)
gamma_f64 = tf.cast(gamma, tf.float64)
# Use float64 for `dim` as float32 can only represent ints up to 2^24 (~16M).
dim = tf.cast(dim, tf.float64)
beta = tf.cast(beta, tf.float32)
gamma_sqrt_dim = tf.cast(tf.sqrt(dim) * gamma_f64, tf.float32)
beta_term = tf.sqrt(2. * tf.math.log(1. / beta))
bound_1 = norm + gamma_sqrt_dim
squared_bound_2 = tf.square(norm) + 0.25 * tf.square(gamma_sqrt_dim)
squared_bound_2 += beta_term * gamma * (norm + 0.5 * gamma_sqrt_dim)
bound_2 = tf.sqrt(squared_bound_2)
# Fall back to bound_1 if beta == 0.
bound_2 = tf.cond(tf.equal(beta, 0), lambda: bound_1, | |
"""pysemver: Semantic Version comparing for Python.
Provides comparing of semantic versions by using SemVer objects using rich comperations plus the
possibility to match a selector string against versions. Interesting for version dependencies.
Versions look like: "1.7.12+b.133"
Selectors look like: ">1.7.0 || 1.6.9+b.111 - 1.6.9+b.113"
Example usages:
>>> SemVer(1, 2, 3, build=13)
SemVer("1.2.3+13")
>>> SemVer.valid("1.2.3.4")
False
>>> SemVer.clean("this is unimportant text 1.2.3-2 and will be stripped")
"1.2.3-2"
>>> SemVer("1.7.12+b.133").satisfies(">1.7.0 || 1.6.9+b.111 - 1.6.9+b.113")
True
>>> SemSel(">1.7.0 || 1.6.9+b.111 - 1.6.9+b.113").matches(SemVer("1.7.12+b.133"),
... SemVer("1.6.9+b.112"), SemVer("1.6.10"))
[SemVer("1.7.12+b.133"), SemVer("1.6.9+b.112")]
>>> min(_)
SemVer("1.6.9+b.112")
>>> _.patch
9
Exported classes:
* SemVer(collections.namedtuple())
Parses semantic versions and defines methods for them. Supports rich comparisons.
* SemSel(tuple)
Parses semantic version selector strings and defines methods for them.
* SelParseError(Exception)
An error among others raised when parsing a semantic version selector failed.
Other classes:
* SemComparator(object)
* SemSelAndChunk(list)
* SemSelOrChunk(list)
Functions/Variables/Constants:
none
Copyright (c) 2013 <NAME>, FichteFoll
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above copyright notice and this
permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import re
import sys
from collections import namedtuple # Python >=2.6
__all__ = ('SemVer', 'SemSel', 'SelParseError')
if sys.version_info[0] == 3:
basestring = str
def cmp(a, b):
return (a > b) - (a < b)
# @functools.total_ordering would be nice here but was added in 2.7, __cmp__ is not Py3
class SemVer(namedtuple("_SemVer", 'major, minor, patch, prerelease, build')):
"""Semantic Version, consists of 3 to 5 components defining the version's adicity.
See http://semver.org/ (2.0.0-rc.1) for the standard mainly used for this implementation, few
changes have been made.
Information on this particular class and their instances:
- Immutable and hashable.
- Subclasses `collections.namedtuple`.
- Always `True` in boolean context.
- len() returns an int between 3 and 5; 4 when a pre-release is set and 5 when a build is
set. Note: Still returns 5 when build is set but not pre-release.
- Parts of the semantic version can be accessed by integer indexing, key (string) indexing,
slicing and getting an attribute. Returned slices are tuple. Leading '-' and '+' of
optional components are not stripped. Supported keys/attributes:
major, minor, patch, prerelease, build.
Examples:
s = SemVer("1.2.3-4.5+6")
s[2] == 3
s[:3] == (1, 2, 3)
s['build'] == '-4.5'
s.major == 1
Short information on semantic version structure:
Semantic versions consist of:
* a major component (numeric)
* a minor component (numeric)
* a patch component (numeric)
* a pre-release component [optional]
* a build component [optional]
The pre-release component is indicated by a hyphen '-' and followed by alphanumeric[1] sequences
separated by dots '.'. Sequences are compared numerically if applicable (both sequences of two
versions are numeric) or lexicographically. May also include hyphens. The existence of a
pre-release component lowers the actual version; the shorter pre-release component is considered
lower. An 'empty' pre-release component is considered to be the least version for this
major-minor-patch combination (e.g. "1.0.0-").
The build component may follow the optional pre-release component and is indicated by a plus '+'
followed by sequences, just as the pre-release component. Comparing works similarly. However the
existence of a build component raises the actual version and may also raise a pre-release. An
'empty' build component is considered to be the highest version for this
major-minor-patch-prerelease combination (e.g. "1.2.3+").
[1]: Regexp for a sequence: r'[0-9A-Za-z-]+'.
"""
# Static class variables
_base_regex = r'''(?x)
(?P<major>[0-9]+)
\.(?P<minor>[0-9]+)
\.(?P<patch>[0-9]+)
(?:\-(?P<prerelease>(?:[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?))?
(?:\+(?P<build>(?:[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?))?'''
_search_regex = re.compile(_base_regex)
_match_regex = re.compile('^%s$' % _base_regex) # required because of $ anchor
# "Constructor"
def __new__(cls, *args, **kwargs):
"""There are two different constructor styles that are allowed:
- Option 1 allows specification of a semantic version as a string and the option to "clean"
the string before parsing it.
- Option 2 allows specification of each component separately as one parameter.
Note that all the parameters specified in the following sections can be passed either as
positional or as named parameters while considering the usual Python rules for this. As
such, `SemVer(1, 2, minor=1)` will result in an exception and not in `SemVer("1.1.2")`.
Option 1:
Constructor examples:
SemVer("1.0.1")
SemVer("this version 1.0.1-pre.1 here", True)
SemVer(ver="0.0.9-pre-alpha+34", clean=False)
Parameters:
* ver (str)
The string containing the version.
* clean = `False` (bool; optional)
If this is true in boolean context, `SemVer.clean(ver)` is called before
parsing.
Option 2:
Constructor examples:
SemVer(1, 0, 1)
SemVer(1, '0', prerelease='pre-alpha', patch=1, build=34)
SemVer(**dict(minor=2, major=1, patch=3))
Parameters:
* major (int, str, float ...)
* minor (...)
* patch (...)
Major to patch components must be an integer or convertable to an int (e.g. a
string or another number type).
* prerelease = `None` (str, int, float ...; optional)
* build = `None` (...; optional)
Pre-release and build components should be a string (or number) type.
Will be passed to `str()` if not already a string but the final string must
match '^[0-9A-Za-z.-]*$'
Raises:
* TypeError
Invalid parameter type(s) or combination (e.g. option 1 and 2).
* ValueError
Invalid semantic version or option 2 parameters unconvertable.
"""
ver, clean, comps = None, False, None
kw, l = kwargs.copy(), len(args) + len(kwargs)
def inv():
raise TypeError("Invalid parameter combination: args=%s; kwargs=%s" % (args, kwargs))
# Do validation and parse the parameters
if l == 0 or l > 5:
raise TypeError("SemVer accepts at least 1 and at most 5 arguments (%d given)" % l)
elif l < 3:
if len(args) == 2:
ver, clean = args
else:
ver = args[0] if args else kw.pop('ver', None)
clean = kw.pop('clean', clean)
if kw:
inv()
else:
comps = list(args) + [kw.pop(cls._fields[k], None) for k in range(len(args), 5)]
if kw or any(comps[i] is None for i in range(3)):
inv()
typecheck = (int,) * 3 + (basestring,) * 2
for i, (v, t) in enumerate(zip(comps, typecheck)):
if v is None:
continue
elif not isinstance(v, t):
try:
if i < 3:
v = typecheck[i](v)
else: # The real `basestring` can not be instatiated (Py2)
v = str(v)
except ValueError as e:
# Modify the exception message. I can't believe this actually works
e.args = ("Parameter #%d must be of type %s or convertable"
% (i, t.__name__),)
raise
else:
comps[i] = v
if t is basestring and not re.match(r"^[0-9A-Za-z.-]*$", v):
raise ValueError("Build and pre-release strings must match '^[0-9A-Za-z.-]*$'")
# Final adjustments
if not comps:
if ver is None or clean is None:
inv()
ver = clean and cls.clean(ver) or ver
comps = cls._parse(ver)
# Create the obj
return super(SemVer, cls).__new__(cls, *comps)
# Magic methods
def __str__(self):
return ('.'.join(map(str, self[:3]))
+ ('-' + self.prerelease if self.prerelease is not None else '')
+ ('+' + self.build if self.build is not None else ''))
def __repr__(self):
# Use the shortest representation - what would you prefer?
return 'SemVer("%s")' % str(self)
# return 'SemVer(%s)' % ', '.join('%s=%r' % (k, getattr(self, k)) for k in self._fields)
def __len__(self):
return 3 + (self.build is not None and 2 or self.prerelease is not None)
# Magic rich comparing methods
def __gt__(self, other):
return self._compare(other) == 1 if isinstance(other, SemVer) else NotImplemented
def __eq__(self, other):
return self._compare(other) == 0 if isinstance(other, SemVer) else NotImplemented
def __lt__(self, other):
return not (self > other or self == other)
def __ge__(self, other):
return not | |
from itertools import combinations
from typing import List
import numpy as np
from extremitypathfinder.helper_classes import AngleRepresentation, PolygonVertex
# TODO numba precompilation of some parts possible?! do line speed profiling first! speed impact
def inside_polygon(x, y, coords, border_value):
# should return the border value for point equal to any polygon vertex
# TODO overflow possible with large values when comparing slopes, change procedure
for c in coords[:]:
if np.all(c == [x, y]):
return border_value
# and if the point p lies on any polygon edge
p = np.array([x, y])
p1 = coords[-1, :]
for p2 in coords[:]:
if abs((AngleRepresentation(p1 - p).value - AngleRepresentation(p2 - p).value)) == 2.0:
return border_value
p1 = p2
contained = False
# the edge from the last to the first point is checked first
i = -1
y1 = coords[-1, 1]
y_gt_y1 = y > y1
for y2 in coords[:, 1]:
y_gt_y2 = y > y2
if y_gt_y1:
if not y_gt_y2:
x1 = coords[i, 0]
x2 = coords[i + 1, 0]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
# compare the slope of the line [p1-p2] and [p-p2]
# depending on the position of p2 this determines whether the polygon edge is right or left of the point
# to avoid expensive division the divisors (of the slope dy/dx) are brought to the other side
# ( dy/dx > a == dy > a * dx )
if (x1GEx and x2GEx) or ((x1GEx or x2GEx) and (y2 - y) * (x2 - x1) <= (y2 - y1) * (x2 - x)):
contained = not contained
else:
if y_gt_y2:
x1 = coords[i, 0]
x2 = coords[i + 1, 0]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
if (x1GEx and x2GEx) or ((x1GEx or x2GEx) and (y2 - y) * (x2 - x1) >= (y2 - y1) * (x2 - x)):
contained = not contained
y1 = y2
y_gt_y1 = y_gt_y2
i += 1
return contained
def no_identical_consequent_vertices(coords):
p1 = coords[-1]
for p2 in coords:
# TODO adjust allowed difference: rtol, atol
if np.allclose(p1, p2):
return False
p1 = p2
return True
def get_intersection_status(p1, p2, q1, q2):
# return:
# 0: no intersection
# 1: intersection in ]p1;p2[
# TODO 4 different possibilities
# 2: intersection directly in p1 or p2
# 3: intersection directly in q1 or q2
# solve the set of equations
# (p2-p1) lambda + (p1) = (q2-q1) mu + (q1)
# in matrix form A x = b:
# [(p2-p1) (q1-q2)] (lambda, mu)' = (q1-p1)
A = np.array([p2 - p1, q1 - q2]).T
b = np.array(q1 - p1)
try:
x = np.linalg.solve(A, b)
# not crossing the line segment is considered to be ok
# so x == 0.0 or x == 1.0 is not considered an intersection
# assert np.allclose((p2 - p1) * x[0] + p1, (q2 - q1) * x[1] + q1)
# assert np.allclose(np.dot(A, x), b)
if x[0] <= 0.0 or x[1] <= 0.0 or x[0] >= 1.0 or x[1] >= 1.0:
return 0
# if np.all(0.0 <= x) and np.all(x <= 1.0):
# return 2
except np.linalg.LinAlgError:
# line segments are parallel (matrix is singular, set of equations is not solvable)
return 0
return 1
# special case of has_intersection()
def lies_behind(p1, p2, v):
# solve the set of equations
# (p2-p1) lambda + (p1) = (v) mu
# in matrix form A x = b:
# [(p1-p2) (v)] (lambda, mu)' = (p1)
# because the vertex lies within the angle range between the two edge vertices
# (together with the other conditions on the polygons)
# this set of linear equations is always solvable (the matrix is regular)
A = np.array([p1 - p2, v]).T
b = np.array(p1)
x = np.linalg.solve(A, b)
# Debug:
# try:
# x = np.linalg.solve(A, b)
# except np.linalg.LinAlgError:
# raise ValueError
# assert np.allclose((p2 - p1) * x[0] + p1, v * x[1])
# assert np.allclose(np.dot(A, x), b)
# vertices on the edge are possibly visible! ( < not <=)
return x[1] < 1.0
def no_self_intersection(coords):
polygon_length = len(coords)
# again_check = []
for index_p1, index_q1 in combinations(range(polygon_length), 2):
# always: index_p1 < index_q1
if index_p1 == index_q1 - 1 or index_p1 == index_q1 + 1:
# neighbouring edges never have an intersection
continue
p1, p2 = coords[index_p1], coords[(index_p1 + 1) % polygon_length]
q1, q2 = coords[index_q1], coords[(index_q1 + 1) % polygon_length]
intersect_status = get_intersection_status(p1, p2, q1, q2)
if intersect_status == 1:
return False
# if intersect_status == 2:
# TODO 4 different options. check which side the other edge lies on.
# if edge changes sides this is a an intersection
# again_check.append((p1, p2, q1, q2))
# print(p1, p2, q1, q2)
# TODO check for intersections across 2 edges! use computed intersection
return True
def has_clockwise_numbering(coords):
""" tests if a polygon has clockwise vertex numbering
approach: Sum over the edges, (x2 − x1)(y2 + y1). If the result is positive the curve is clockwise.
from:
https://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order
:param coords: the list of (x,y) coordinates representing the polygon to be tested
:return: true if the polygon has been given in clockwise numbering
"""
total_sum = 0.0
p1 = coords[-1]
for p2 in coords:
x1, y1 = p1
x2, y2 = p2
total_sum += (x2 - x1) * (y2 + y1)
p1 = p2
return total_sum > 0
def check_polygon(polygon):
""" ensures that all the following conditions on the polygons are fulfilled:
- must at least contain 3 vertices
- no consequent vertices with identical coordinates in the polygons! In general might have the same coordinates
- a polygon must not have self intersections (intersections with other polygons are allowed)
"""
if not polygon.shape[0] >= 3:
raise TypeError('Given polygons must at least contain 3 vertices.')
if not polygon.shape[1] == 2:
raise TypeError('Each point of a polygon must consist of two values (x,y).')
if not no_identical_consequent_vertices(polygon):
raise ValueError('Consequent vertices of a polynomial must not be identical.')
if not no_self_intersection(polygon):
raise ValueError('A polygon must not intersect itself.')
# TODO test
# todo - polygons must not intersect each other
def check_data_requirements(boundary_coords: np.ndarray, list_hole_coords: List[np.ndarray]):
""" ensures that all the following conditions on the polygons are fulfilled:
- basic polygon requirements (s. above)
- edge numbering has to follow this convention (for easier computations):
* outer boundary polygon: counter clockwise
* holes: clockwise
:param boundary_coords:
:param list_hole_coords:
:return:
"""
check_polygon(boundary_coords)
if has_clockwise_numbering(boundary_coords):
raise ValueError('Vertex numbering of the boundary polygon must be counter clockwise.')
for hole_coords in list_hole_coords:
check_polygon(hole_coords)
if not has_clockwise_numbering(hole_coords):
raise ValueError('Vertex numbering of hole polygon must be clockwise.')
# TODO data rectification
def find_within_range(repr1, repr2, repr_diff, vertex_set, angle_range_less_180, equal_repr_allowed):
"""
filters out all vertices whose representation lies within the range between
the two given angle representations
which range ('clockwise' or 'counter-clockwise') should be checked is determined by:
- query angle (range) is < 180deg or not (>= 180deg)
:param repr1:
:param repr2:
:param repr_diff: abs(repr1-repr2)
:param vertex_set:
:param angle_range_less_180: whether the angle between repr1 and repr2 is < 180 deg
:param equal_repr_allowed: whether vertices with the same representation should also be returned
:return:
"""
if len(vertex_set) == 0:
return vertex_set
if repr_diff == 0.0:
return set()
min_repr_val = min(repr1, repr2)
max_repr_val = max(repr1, repr2) # = min_angle + angle_diff
def lies_within(vertex):
# vertices with the same representation will not NOT be returned!
return min_repr_val < vertex.get_angle_representation() < max_repr_val
def lies_within_eq(vertex):
# vertices with the same representation will be returned!
return min_repr_val <= vertex.get_angle_representation() <= max_repr_val
# when the range contains the 0.0 value (transition from 3.99... -> 0.0)
# it is easier to check if a representation does NOT lie within this range
# -> filter_fct = not_within
def not_within(vertex):
# vertices with the same representation will NOT be returned!
return not (min_repr_val <= vertex.get_angle_representation() <= max_repr_val)
| |
# This behavior is not seen with a single return token (see testRepeater5 directly below.)
second = pp.matchPreviousExpr(first)
expr = first + bridge.suppress() + second
tst = "aaa ddd 12 aaa ddd"
expected = [["aaa", "ddd"], ["aaa", "ddd"]]
result = expr.parseString(tst)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRepeater5(self):
"""a simplified testRepeater4 to examine matchPreviousExpr with a single repeater token"""
if ParserElement._packratEnabled:
print("skipping this test, not compatible with packratting")
return
first = pp.Word(pp.alphas)
bridge = pp.Word(pp.nums)
second = pp.matchPreviousExpr(first)
expr = first + bridge.suppress() + second
tst = "aaa 12 aaa"
expected = tst.replace("12", "").split()
result = expr.parseString(tst)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRecursiveCombine(self):
from pyparsing import Forward, Word, alphas, nums, Optional, Combine
testInput = "myc(114)r(11)dd"
stream = Forward()
stream <<= Optional(Word(alphas)) + Optional("(" + Word(nums) + ")" + stream)
expected = ["".join(stream.parseString(testInput))]
print(expected)
stream = Forward()
stream << Combine(
Optional(Word(alphas)) + Optional("(" + Word(nums) + ")" + stream)
)
testVal = stream.parseString(testInput)
print(testVal)
self.assertParseResultsEquals(testVal, expected_list=expected)
def testInfixNotationBasicArithEval(self):
from pyparsing import Word, nums, alphas, Literal, oneOf, infixNotation, opAssoc
import ast
integer = Word(nums).setParseAction(lambda t: int(t[0]))
variable = Word(alphas, exact=1)
operand = integer | variable
expop = Literal("^")
signop = oneOf("+ -")
multop = oneOf("* /")
plusop = oneOf("+ -")
factop = Literal("!")
expr = infixNotation(
operand,
[
(factop, 1, opAssoc.LEFT),
(expop, 2, opAssoc.RIGHT),
(signop, 1, opAssoc.RIGHT),
(multop, 2, opAssoc.LEFT),
(plusop, 2, opAssoc.LEFT),
],
)
test = [
"9 + 2 + 3",
"9 + 2 * 3",
"(9 + 2) * 3",
"(9 + -2) * 3",
"(9 + --2) * 3",
"(9 + -2) * 3^2^2",
"(9! + -2) * 3^2^2",
"M*X + B",
"M*(X + B)",
"1+2*-3^4*5+-+-6",
"3!!",
]
expected = """[[9, '+', 2, '+', 3]]
[[9, '+', [2, '*', 3]]]
[[[9, '+', 2], '*', 3]]
[[[9, '+', ['-', 2]], '*', 3]]
[[[9, '+', ['-', ['-', 2]]], '*', 3]]
[[[9, '+', ['-', 2]], '*', [3, '^', [2, '^', 2]]]]
[[[[9, '!'], '+', ['-', 2]], '*', [3, '^', [2, '^', 2]]]]
[[['M', '*', 'X'], '+', 'B']]
[['M', '*', ['X', '+', 'B']]]
[[1, '+', [2, '*', ['-', [3, '^', 4]], '*', 5], '+', ['-', ['+', ['-', 6]]]]]
[[3, '!', '!']]""".split(
"\n"
)
expected = [ast.literal_eval(x.strip()) for x in expected]
for test_str, exp_list in zip(test, expected):
self.assertParseAndCheckList(expr, test_str, exp_list, verbose=True)
def testInfixNotationEvalBoolExprUsingAstClasses(self):
from pyparsing import infixNotation, Word, alphas, oneOf, opAssoc
boolVars = {"True": True, "False": False}
class BoolOperand:
reprsymbol = ""
def __init__(self, t):
self.args = t[0][0::2]
def __str__(self):
sep = " %s " % self.reprsymbol
return "(" + sep.join(map(str, self.args)) + ")"
class BoolAnd(BoolOperand):
reprsymbol = "&"
def __bool__(self):
for a in self.args:
if isinstance(a, str):
v = boolVars[a]
else:
v = bool(a)
if not v:
return False
return True
class BoolOr(BoolOperand):
reprsymbol = "|"
def __bool__(self):
for a in self.args:
if isinstance(a, str):
v = boolVars[a]
else:
v = bool(a)
if v:
return True
return False
class BoolNot(BoolOperand):
def __init__(self, t):
self.arg = t[0][1]
def __str__(self):
return "~" + str(self.arg)
def __bool__(self):
if isinstance(self.arg, str):
v = boolVars[self.arg]
else:
v = bool(self.arg)
return not v
boolOperand = Word(alphas, max=1) | oneOf("True False")
boolExpr = infixNotation(
boolOperand,
[
("not", 1, opAssoc.RIGHT, BoolNot),
("and", 2, opAssoc.LEFT, BoolAnd),
("or", 2, opAssoc.LEFT, BoolOr),
],
)
test = [
"p and not q",
"not not p",
"not(p and q)",
"q or not p and r",
"q or not p or not r",
"q or not (p and r)",
"p or q or r",
"p or q or r and False",
"(p or q or r) and False",
]
boolVars["p"] = True
boolVars["q"] = False
boolVars["r"] = True
print("p =", boolVars["p"])
print("q =", boolVars["q"])
print("r =", boolVars["r"])
print()
for t in test:
res = boolExpr.parseString(t)
print(t, "\n", res[0], "=", bool(res[0]), "\n")
expected = eval(t, {}, boolVars)
self.assertEqual(
expected, bool(res[0]), "failed boolean eval test {}".format(t)
)
def testInfixNotationMinimalParseActionCalls(self):
from pyparsing import infixNotation, Word, alphas, oneOf, opAssoc, nums, Literal
global count
count = 0
def evaluate_int(t):
global count
value = int(t[0])
print("evaluate_int", value)
count += 1
return value
integer = Word(nums).setParseAction(evaluate_int)
variable = Word(alphas, exact=1)
operand = integer | variable
expop = Literal("^")
signop = oneOf("+ -")
multop = oneOf("* /")
plusop = oneOf("+ -")
factop = Literal("!")
expr = infixNotation(
operand,
[
("!", 1, opAssoc.LEFT),
("^", 2, opAssoc.LEFT),
(signop, 1, opAssoc.RIGHT),
(multop, 2, opAssoc.LEFT),
(plusop, 2, opAssoc.LEFT),
],
)
test = ["9"]
for t in test:
count = 0
print("%r => %s (count=%d)" % (t, expr.parseString(t), count))
self.assertEqual(1, count, "count evaluated too many times!")
def testInfixNotationWithParseActions(self):
word = pp.Word(pp.alphas)
def supLiteral(s):
"""Returns the suppressed literal s"""
return pp.Literal(s).suppress()
def booleanExpr(atom):
ops = [
(supLiteral("!"), 1, pp.opAssoc.RIGHT, lambda s, l, t: ["!", t[0][0]]),
(pp.oneOf("= !="), 2, pp.opAssoc.LEFT),
(supLiteral("&"), 2, pp.opAssoc.LEFT, lambda s, l, t: ["&", t[0]]),
(supLiteral("|"), 2, pp.opAssoc.LEFT, lambda s, l, t: ["|", t[0]]),
]
return pp.infixNotation(atom, ops)
f = booleanExpr(word) + pp.StringEnd()
tests = [
("bar = foo", [["bar", "=", "foo"]]),
(
"bar = foo & baz = fee",
["&", [["bar", "=", "foo"], ["baz", "=", "fee"]]],
),
]
for test, expected in tests:
print(test)
results = f.parseString(test)
print(results)
self.assertParseResultsEquals(results, expected_list=expected)
print()
def testInfixNotationGrammarTest5(self):
expop = pp.Literal("**")
signop = pp.oneOf("+ -")
multop = pp.oneOf("* /")
plusop = pp.oneOf("+ -")
class ExprNode:
def __init__(self, tokens):
self.tokens = tokens[0]
def eval(self):
return None
class NumberNode(ExprNode):
def eval(self):
return self.tokens
class SignOp(ExprNode):
def eval(self):
mult = {"+": 1, "-": -1}[self.tokens[0]]
return mult * self.tokens[1].eval()
class BinOp(ExprNode):
def eval(self):
ret = self.tokens[0].eval()
for op, operand in zip(self.tokens[1::2], self.tokens[2::2]):
ret = self.opn_map[op](ret, operand.eval())
return ret
class ExpOp(BinOp):
opn_map = {"**": lambda a, b: b ** a}
class MultOp(BinOp):
import operator
opn_map = {"*": operator.mul, "/": operator.truediv}
class AddOp(BinOp):
import operator
opn_map = {"+": operator.add, "-": operator.sub}
operand = ppc.number().setParseAction(NumberNode)
expr = pp.infixNotation(
operand,
[
(expop, 2, pp.opAssoc.LEFT, (lambda pr: [pr[0][::-1]], ExpOp)),
(signop, 1, pp.opAssoc.RIGHT, SignOp),
(multop, 2, pp.opAssoc.LEFT, MultOp),
(plusop, 2, pp.opAssoc.LEFT, AddOp),
],
)
tests = """\
2+7
2**3
2**3**2
3**9
3**3**2
"""
for t in tests.splitlines():
t = t.strip()
if not t:
continue
parsed = expr.parseString(t)
eval_value = parsed[0].eval()
self.assertEqual(
eval(t),
eval_value,
"Error evaluating {!r}, expected {!r}, got {!r}".format(
t, eval(t), eval_value
),
)
def testInfixNotationExceptions(self):
num = pp.Word(pp.nums)
# arity 3 with None opExpr - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [(None, 3, pp.opAssoc.LEFT),])
# arity 3 with invalid tuple - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [(("+", "-", "*"), 3, pp.opAssoc.LEFT)])
# left arity > 3 - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [("*", 4, pp.opAssoc.LEFT)])
# right arity > 3 - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [("*", 4, pp.opAssoc.RIGHT)])
# assoc not from opAssoc - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [("*", 2, "LEFT")])
def testInfixNotationWithNonOperators(self):
# left arity 2 with None expr
# right arity 2 with None expr
num = pp.Word(pp.nums).addParseAction(pp.tokenMap(int))
ident = ppc.identifier()
for assoc in (pp.opAssoc.LEFT, pp.opAssoc.RIGHT):
expr = pp.infixNotation(
num | ident, [(None, 2, assoc), ("+", 2, pp.opAssoc.LEFT)]
)
self.assertParseAndCheckList(expr, "3x+2", [[[3, "x"], "+", 2]])
def testInfixNotationTernaryOperator(self):
# left arity 3
# right arity 3
num = pp.Word(pp.nums).addParseAction(pp.tokenMap(int))
for assoc in (pp.opAssoc.LEFT, pp.opAssoc.RIGHT):
expr = pp.infixNotation(
num, [("+", 2, pp.opAssoc.LEFT), (("?", ":"), 3, assoc),]
)
self.assertParseAndCheckList(
expr, "3 + 2? 12: 13", [[[3, "+", 2], "?", 12, ":", 13]]
)
def testParseResultsPickle(self):
import pickle
# test 1
body = pp.makeHTMLTags("BODY")[0]
result = body.parseString("<BODY BGCOLOR='#00FFBB' FGCOLOR=black>")
print(result.dump())
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
print("Test pickle dump protocol", protocol)
try:
pickleString = pickle.dumps(result, protocol)
except Exception as e:
print("dumps exception:", e)
newresult = pp.ParseResults()
else:
newresult = pickle.loads(pickleString)
print(newresult.dump())
self.assertEqual(
result.dump(),
newresult.dump(),
"Error pickling ParseResults object (protocol=%d)" % protocol,
)
def testParseResultsPickle2(self):
import pickle
word = pp.Word(pp.alphas + "'.")
salutation = pp.OneOrMore(word)
comma = pp.Literal(",")
greetee = pp.OneOrMore(word)
endpunc = pp.oneOf("! ?")
greeting = (
salutation("greeting")
+ pp.Suppress(comma)
+ greetee("greetee")
+ endpunc("punc*")[1, ...]
)
string = "Good morning, <NAME>!"
result = greeting.parseString(string)
self.assertParseResultsEquals(
result,
["Good", "morning", | |
"\u0648",
"\u0648\u0633\u0637\u0650",
"\u0648\u0642\u062a\u064a",
"\u0648\u0642\u062a\u06cc\u06a9\u0647",
"\u0648\u0644\u06cc",
"\u0648\u064a",
"\u0648\u06af\u0648",
"\u064a\u0627",
"\u064a\u0627\u0628\u062f",
"\u064a\u0643",
"\u064a\u0643\u062f\u064a\u06af\u0631",
"\u064a\u0643\u064a",
"\u0651\u0647",
"\u067e\u0627\u0639\u06cc\u0646\u0650",
"\u067e\u0633",
"\u067e\u0646\u062c",
"\u067e\u064a\u0634",
"\u067e\u06cc\u0634",
"\u067e\u06cc\u0634\u0650",
"\u0686\u0631\u0627",
"\u0686\u0637\u0648\u0631",
"\u0686\u0646\u062f",
"\u0686\u0646\u062f\u06cc\u0646",
"\u0686\u0646\u064a\u0646",
"\u0686\u0647",
"\u0686\u0647\u0627\u0631",
"\u0686\u0648\u0646",
"\u0686\u064a\u0632\u064a",
"\u0686\u06af\u0648\u0646\u0647",
"\u0686\u06cc\u0632",
"\u0686\u06cc\u0632\u06cc",
"\u0686\u06cc\u0633\u062a",
"\u06a9\u062c\u0627",
"\u06a9\u062c\u0627\u0633\u062a",
"\u06a9\u062f\u0627\u0645",
"\u06a9\u0633",
"\u06a9\u0633\u06cc",
"\u06a9\u0646\u0627\u0631\u0650",
"\u06a9\u0647",
"\u06a9\u064e\u06cc",
"\u06a9\u06cc",
"\u06af\u0630\u0627\u0631\u064a",
"\u06af\u0630\u0627\u0634\u062a\u0647",
"\u06af\u0631\u062f\u062f",
"\u06af\u0631\u0641\u062a",
"\u06af\u0631\u0641\u062a\u0647",
"\u06af\u0631\u0648\u0647\u064a",
"\u06af\u0641\u062a",
"\u06af\u0641\u062a\u0647",
"\u06af\u0648\u064a\u062f",
"\u06af\u0648\u064a\u0646\u062f",
"\u06af\u064a\u0631\u062f",
"\u06af\u064a\u0631\u064a",
"\u06cc\u0627",
"\u06cc\u06a9",
],
"fi": [
"aiemmin",
"aika",
"aikaa",
"aikaan",
"aikaisemmin",
"aikaisin",
"aikajen",
"aikana",
"aikoina",
"aikoo",
"aikovat",
"aina",
"ainakaan",
"ainakin",
"ainoa",
"ainoat",
"aiomme",
"aion",
"aiotte",
"aist",
"aivan",
"ajan",
"alas",
"alemmas",
"alkuisin",
"alkuun",
"alla",
"alle",
"aloitamme",
"aloitan",
"aloitat",
"aloitatte",
"aloitattivat",
"aloitettava",
"aloitettevaksi",
"aloitettu",
"aloitimme",
"aloitin",
"aloitit",
"aloititte",
"aloittaa",
"aloittamatta",
"aloitti",
"aloittivat",
"alta",
"aluksi",
"alussa",
"alusta",
"annettavaksi",
"annetteva",
"annettu",
"ansiosta",
"antaa",
"antamatta",
"antoi",
"aoua",
"apu",
"asia",
"asiaa",
"asian",
"asiasta",
"asiat",
"asioiden",
"asioihin",
"asioita",
"asti",
"avuksi",
"avulla",
"avun",
"avutta",
"edelle",
"edelleen",
"edell\u00e4",
"edelt\u00e4",
"edemm\u00e4s",
"edes",
"edess\u00e4",
"edest\u00e4",
"ehk\u00e4",
"ei",
"eik\u00e4",
"eilen",
"eiv\u00e4t",
"eli",
"ellei",
"elleiv\u00e4t",
"ellemme",
"ellen",
"ellet",
"ellette",
"emme",
"en",
"enemm\u00e4n",
"eniten",
"ennen",
"ensi",
"ensimm\u00e4inen",
"ensimm\u00e4iseksi",
"ensimm\u00e4isen",
"ensimm\u00e4isen\u00e4",
"ensimm\u00e4iset",
"ensimm\u00e4isiksi",
"ensimm\u00e4isin\u00e4",
"ensimm\u00e4isi\u00e4",
"ensimm\u00e4ist\u00e4",
"ensin",
"entinen",
"entisen",
"entisi\u00e4",
"entisten",
"entist\u00e4",
"en\u00e4\u00e4",
"eri",
"eritt\u00e4in",
"erityisesti",
"er\u00e4iden",
"er\u00e4s",
"er\u00e4\u00e4t",
"esi",
"esiin",
"esill\u00e4",
"esimerkiksi",
"et",
"eteen",
"etenkin",
"etessa",
"ette",
"ettei",
"ett\u00e4",
"haikki",
"halua",
"haluaa",
"haluamatta",
"haluamme",
"haluan",
"haluat",
"haluatte",
"haluavat",
"halunnut",
"halusi",
"halusimme",
"halusin",
"halusit",
"halusitte",
"halusivat",
"halutessa",
"haluton",
"he",
"hei",
"heid\u00e4n",
"heihin",
"heille",
"heilt\u00e4",
"heiss\u00e4",
"heist\u00e4",
"heit\u00e4",
"helposti",
"heti",
"hetkell\u00e4",
"hieman",
"hitaasti",
"hoikein",
"huolimatta",
"huomenna",
"hyvien",
"hyviin",
"hyviksi",
"hyville",
"hyvilt\u00e4",
"hyvin",
"hyvin\u00e4",
"hyviss\u00e4",
"hyvist\u00e4",
"hyvi\u00e4",
"hyv\u00e4",
"hyv\u00e4t",
"hyv\u00e4\u00e4",
"h\u00e4n",
"h\u00e4neen",
"h\u00e4nelle",
"h\u00e4nell\u00e4",
"h\u00e4nelt\u00e4",
"h\u00e4nen",
"h\u00e4ness\u00e4",
"h\u00e4nest\u00e4",
"h\u00e4net",
"ihan",
"ilman",
"ilmeisesti",
"itse",
"itsens\u00e4",
"itse\u00e4\u00e4n",
"ja",
"jo",
"johon",
"joiden",
"joihin",
"joiksi",
"joilla",
"joille",
"joilta",
"joissa",
"joista",
"joita",
"joka",
"jokainen",
"jokin",
"joko",
"joku",
"jolla",
"jolle",
"jolloin",
"jolta",
"jompikumpi",
"jonka",
"jonkin",
"jonne",
"joo",
"jopa",
"jos",
"joskus",
"jossa",
"josta",
"jota",
"jotain",
"joten",
"jotenkin",
"jotenkuten",
"jotka",
"jotta",
"jouduimme",
"jouduin",
"jouduit",
"jouduitte",
"joudumme",
"joudun",
"joudutte",
"joukkoon",
"joukossa",
"joukosta",
"joutua",
"joutui",
"joutuivat",
"joutumaan",
"joutuu",
"joutuvat",
"juuri",
"j\u00e4lkeen",
"j\u00e4lleen",
"j\u00e4\u00e4",
"kahdeksan",
"kahdeksannen",
"kahdella",
"kahdelle",
"kahdelta",
"kahden",
"kahdessa",
"kahdesta",
"kahta",
"kahteen",
"kai",
"kaiken",
"kaikille",
"kaikilta",
"kaikkea",
"kaikki",
"kaikkia",
"kaikkiaan",
"kaikkialla",
"kaikkialle",
"kaikkialta",
"kaikkien",
"kaikkin",
"kaksi",
"kannalta",
"kannattaa",
"kanssa",
"kanssaan",
"kanssamme",
"kanssani",
"kanssanne",
"kanssasi",
"kauan",
"kauemmas",
"kaukana",
"kautta",
"kehen",
"keiden",
"keihin",
"keiksi",
"keille",
"keill\u00e4",
"keilt\u00e4",
"kein\u00e4",
"keiss\u00e4",
"keist\u00e4",
"keitten",
"keitt\u00e4",
"keit\u00e4",
"keneen",
"keneksi",
"kenelle",
"kenell\u00e4",
"kenelt\u00e4",
"kenen",
"kenen\u00e4",
"keness\u00e4",
"kenest\u00e4",
"kenet",
"kenett\u00e4",
"kenness\u00e4st\u00e4",
"kenties",
"kerran",
"kerta",
"kertaa",
"keskell\u00e4",
"kesken",
"keskim\u00e4\u00e4rin",
"ketk\u00e4",
"ket\u00e4",
"kiitos",
"kohti",
"koko",
"kokonaan",
"kolmas",
"kolme",
"kolmen",
"kolmesti",
"koska",
"koskaan",
"kovin",
"kuin",
"kuinka",
"kuinkan",
"kuitenkaan",
"kuitenkin",
"kuka",
"kukaan",
"kukin",
"kukka",
"kumpainen",
"kumpainenkaan",
"kumpi",
"kumpikaan",
"kumpikin",
"kun",
"kuten",
"kuuden",
"kuusi",
"kuutta",
"kylliksi",
"kyll\u00e4",
"kymmenen",
"kyse",
"liian",
"liki",
"lis\u00e4ksi",
"lis\u00e4\u00e4",
"lla",
"luo",
"luona",
"l\u00e4hekk\u00e4in",
"l\u00e4helle",
"l\u00e4hell\u00e4",
"l\u00e4helt\u00e4",
"l\u00e4hemm\u00e4s",
"l\u00e4hes",
"l\u00e4hinn\u00e4",
"l\u00e4htien",
"l\u00e4pi",
"mahdollisimman",
"mahdollista",
"me",
"meid\u00e4n",
"meille",
"meill\u00e4",
"melkein",
"melko",
"menee",
"meneet",
"menemme",
"menen",
"menet",
"menette",
"menev\u00e4t",
"meni",
"menimme",
"menin",
"menit",
"meniv\u00e4t",
"menness\u00e4",
"mennyt",
"menossa",
"mihin",
"mikin",
"miksi",
"mik\u00e4",
"mik\u00e4li",
"mik\u00e4\u00e4n",
"milloin",
"milloinkan",
"minne",
"minun",
"minut",
"min\u00e4",
"miss\u00e4",
"mist\u00e4",
"miten",
"mit\u00e4",
"mit\u00e4\u00e4n",
"moi",
"molemmat",
"mones",
"monesti",
"monet",
"moni",
"moniaalla",
"moniaalle",
"moniaalta",
"monta",
"muassa",
"muiden",
"muita",
"muka",
"mukaan",
"mukaansa",
"mukana",
"mutta",
"muu",
"muualla",
"muualle",
"muualta",
"muuanne",
"muulloin",
"muun",
"muut",
"muuta",
"muutama",
"muutaman",
"muuten",
"my\u00f6hemmin",
"my\u00f6s",
"my\u00f6skin",
"my\u00f6sk\u00e4\u00e4n",
"my\u00f6t\u00e4",
"ne",
"nelj\u00e4",
"nelj\u00e4n",
"nelj\u00e4\u00e4",
"niiden",
"niin",
"niist\u00e4",
"niit\u00e4",
"noin",
"nopeammin",
"nopeasti",
"nopeiten",
"nro",
"nuo",
"nyt",
"n\u00e4iden",
"n\u00e4in",
"n\u00e4iss\u00e4",
"n\u00e4iss\u00e4hin",
"n\u00e4iss\u00e4lle",
"n\u00e4iss\u00e4lt\u00e4",
"n\u00e4iss\u00e4st\u00e4",
"n\u00e4it\u00e4",
"n\u00e4m\u00e4",
"ohi",
"oikea",
"oikealla",
"oikein",
"ole",
"olemme",
"olen",
"olet",
"olette",
"oleva",
"olevan",
"olevat",
"oli",
"olimme",
"olin",
"olisi",
"olisimme",
"olisin",
"olisit",
"olisitte",
"olisivat",
"olit",
"olitte",
"olivat",
"olla",
"olleet",
"olli",
"ollut",
"oma",
"omaa",
"omaan",
"omaksi",
"omalle",
"omalta",
"oman",
"omassa",
"omat",
"omia",
"omien",
"omiin",
"omiksi",
"omille",
"omilta",
"omissa",
"omista",
"on",
"onkin",
"onko",
"ovat",
"paikoittain",
"paitsi",
"pakosti",
"paljon",
"paremmin",
"parempi",
"parhaillaan",
"parhaiten",
"perusteella",
"per\u00e4ti",
"pian",
"pieneen",
"pieneksi",
"pienelle",
"pienell\u00e4",
"pienelt\u00e4",
"pienempi",
"pienest\u00e4",
"pieni",
"pienin",
"puolesta",
"puolestaan",
"p\u00e4\u00e4lle",
"runsaasti",
"saakka",
"sadam",
"sama",
"samaa",
"samaan",
"samalla",
"samallalta",
"samallassa",
"samallasta",
"saman",
"samat",
"samoin",
"sata",
"sataa",
"satojen",
"se",
"seitsem\u00e4n",
"sek\u00e4",
"sen",
"seuraavat",
"siell\u00e4",
"sielt\u00e4",
"siihen",
"siin\u00e4",
"siis",
"siit\u00e4",
"sijaan",
"siksi",
"silloin",
"sill\u00e4",
"silti",
"sinne",
"sinua",
"sinulle",
"sinulta",
"sinun",
"sinussa",
"sinusta",
"sinut",
"sin\u00e4",
"sis\u00e4kk\u00e4in",
"sis\u00e4ll\u00e4",
"siten",
"sitten",
"sit\u00e4",
"ssa",
"sta",
"suoraan",
"suuntaan",
"suuren",
"suuret",
"suuri",
"suuria",
"suurin",
"suurten",
"taa",
"taas",
"taemmas",
"tahansa",
"tai",
"takaa",
"takaisin",
"takana",
"takia",
"tapauksessa",
"tarpeeksi",
"tavalla",
"tavoitteena",
"te",
"tietysti",
"todella",
"toinen",
"toisaalla",
"toisaalle",
"toisaalta",
"toiseen",
"toiseksi",
"toisella",
"toiselle",
"toiselta",
"toisemme",
"toisen",
"toisensa",
"toisessa",
"toisesta",
"toista",
"toistaiseksi",
"toki",
"tosin",
"tuhannen",
"tuhat",
"tule",
"tulee",
"tulemme",
"tulen",
"tulet",
"tulette",
"tulevat",
"tulimme",
"tulin",
"tulisi",
"tulisimme",
"tulisin",
"tulisit",
"tulisitte",
"tulisivat",
"tulit",
"tulitte",
"tulivat",
"tulla",
"tulleet",
"tullut",
"tuntuu",
"tuo",
"tuolla",
"tuolloin",
"tuolta",
"tuonne",
"tuskin",
"tyk\u00f6",
"t\u00e4h\u00e4n",
"t\u00e4ll\u00e4",
"t\u00e4ll\u00f6in",
"t\u00e4m\u00e4",
"t\u00e4m\u00e4n",
"t\u00e4nne",
"t\u00e4n\u00e4",
"t\u00e4n\u00e4\u00e4n",
"t\u00e4ss\u00e4",
"t\u00e4st\u00e4",
"t\u00e4ten",
"t\u00e4t\u00e4",
"t\u00e4ysin",
"t\u00e4ytyv\u00e4t",
"t\u00e4ytyy",
"t\u00e4\u00e4ll\u00e4",
"t\u00e4\u00e4lt\u00e4",
"ulkopuolella",
"usea",
"useasti",
"useimmiten",
"usein",
"useita",
"uudeksi",
"uudelleen",
"uuden",
"uudet",
"uusi",
"uusia",
"uusien",
"uusinta",
"uuteen",
"uutta",
"vaan",
"vahemm\u00e4n",
"vai",
"vaiheessa",
"vaikea",
"vaikean",
"vaikeat",
"vaikeilla",
"vaikeille",
"vaikeilta",
"vaikeissa",
"vaikeista",
"vaikka",
"vain",
"varmasti",
"varsin",
"varsinkin",
"varten",
"vasen",
"vasenmalla",
"vasta",
"vastaan",
"vastakkain",
"vastan",
"verran",
"viel\u00e4",
"vierekk\u00e4in",
"vieress\u00e4",
"vieri",
"viiden",
"viime",
"viimeinen",
"viimeisen",
"viimeksi",
"viisi",
"voi",
"voidaan",
"voimme",
"voin",
"voisi",
"voit",
"voitte",
"voivat",
"vuoden",
"vuoksi",
"vuosi",
"vuosien",
"vuosina",
"vuotta",
"v\u00e4hemm\u00e4n",
"v\u00e4hint\u00e4\u00e4n",
"v\u00e4hiten",
"v\u00e4h\u00e4n",
"v\u00e4lill\u00e4",
"yhdeks\u00e4n",
"yhden",
"yhdess\u00e4",
"yhteen",
"yhteens\u00e4",
"yhteydess\u00e4",
"yhteyteen",
"yht\u00e4",
"yht\u00e4\u00e4lle",
"yht\u00e4\u00e4ll\u00e4",
"yht\u00e4\u00e4lt\u00e4",
"yht\u00e4\u00e4n",
"yh\u00e4",
"yksi",
"yksin",
"yksitt\u00e4in",
"yleens\u00e4",
"ylemm\u00e4s",
"yli",
"yl\u00f6s",
"ymp\u00e4ri",
"\u00e4lk\u00f6\u00f6n",
"\u00e4l\u00e4",
],
"fr": [
"a",
"abord",
"absolument",
"afin",
"ah",
"ai",
"aie",
"ailleurs",
"ainsi",
"ait",
"allaient",
"allo",
"allons",
"all\u00f4",
"alors",
"anterieur",
"anterieure",
"anterieures",
"apres",
"apr\u00e8s",
"as",
"assez",
"attendu",
"au",
"aucun",
"aucune",
"aujourd",
"aujourd'hui",
"aupres",
"auquel",
"aura",
"auraient",
"aurait",
"auront",
"aussi",
"autre",
"autrefois",
"autrement",
"autres",
"autrui",
"aux",
"auxquelles",
"auxquels",
"avaient",
"avais",
"avait",
"avant",
"avec",
"avoir",
"avons",
"ayant",
"b",
"bah",
"bas",
"basee",
"bat",
"beau",
"beaucoup",
"bien",
"bigre",
"boum",
"bravo",
"brrr",
"c",
"car",
"ce",
"ceci",
"cela",
"celle",
"celle-ci",
"celle-l\u00e0",
"celles",
"celles-ci",
"celles-l\u00e0",
"celui",
"celui-ci",
"celui-l\u00e0",
"cent",
"cependant",
"certain",
"certaine",
"certaines",
"certains",
"certes",
"ces",
"cet",
"cette",
"ceux",
"ceux-ci",
"ceux-l\u00e0",
"chacun",
"chacune",
"chaque",
"cher",
"chers",
"chez",
"chiche",
"chut",
"ch\u00e8re",
"ch\u00e8res",
"ci",
"cinq",
"cinquantaine",
"cinquante",
"cinquanti\u00e8me",
"cinqui\u00e8me",
"clac",
"clic",
"combien",
"comme",
"comment",
"comparable",
"comparables",
"compris",
"concernant",
"contre",
"couic",
"crac",
"d",
"da",
"dans",
"de",
"debout",
"dedans",
"dehors",
"deja",
"del\u00e0",
"depuis",
"dernier",
"derniere",
"derriere",
"derri\u00e8re",
"des",
"desormais",
"desquelles",
"desquels",
"dessous",
"dessus",
"deux",
"deuxi\u00e8me",
"deuxi\u00e8mement",
"devant",
"devers",
"devra",
"different",
"differentes",
"differents",
"diff\u00e9rent",
"diff\u00e9rente",
"diff\u00e9rentes",
"diff\u00e9rents",
"dire",
"directe",
"directement",
"dit",
"dite",
"dits",
"divers",
"diverse",
"diverses",
"dix",
"dix-huit",
"dix-neuf",
"dix-sept",
"dixi\u00e8me",
"doit",
"doivent",
"donc",
"dont",
"douze",
"douzi\u00e8me",
"dring",
"du",
"duquel",
"durant",
"d\u00e8s",
"d\u00e9sormais",
"e",
"effet",
"egale",
"egalement",
"egales",
"eh",
"elle",
"elle-m\u00eame",
"elles",
"elles-m\u00eames",
"en",
"encore",
"enfin",
"entre",
"envers",
"environ",
"es",
"est",
"et",
"etant",
"etc",
"etre",
"eu",
"euh",
"eux",
"eux-m\u00eames",
"exactement",
"except\u00e9",
"extenso",
"exterieur",
"f",
"fais",
"faisaient",
"faisant",
"fait",
"fa\u00e7on",
"feront",
"fi",
"flac",
"floc",
"font",
"g",
"gens",
"h",
"ha",
"hein",
"hem",
"hep",
"hi",
"ho",
"hol\u00e0",
"hop",
"hormis",
"hors",
"hou",
"houp",
"hue",
"hui",
"huit",
"huiti\u00e8me",
"hum",
"hurrah",
"h\u00e9",
"h\u00e9las",
"i",
"il",
"ils",
"importe",
"j",
"je",
"jusqu",
"jusque",
"juste",
"k",
"l",
"la",
"laisser",
"laquelle",
"las",
"le",
"lequel",
"les",
"lesquelles",
"lesquels",
"leur",
"leurs",
"longtemps",
"lors",
"lorsque",
"lui",
"lui-meme",
"lui-m\u00eame",
"l\u00e0",
"l\u00e8s",
"m",
"ma",
"maint",
"maintenant",
"mais",
"malgre",
"malgr\u00e9",
"maximale",
"me",
"meme",
"memes",
"merci",
"mes",
"mien",
"mienne",
"miennes",
"miens",
"mille",
"mince",
"minimale",
"moi",
"moi-meme",
"moi-m\u00eame",
"moindres",
"moins",
"mon",
"moyennant",
"multiple",
"multiples",
"m\u00eame",
"m\u00eames",
"n",
"na",
"naturel",
"naturelle",
"naturelles",
"ne",
"neanmoins",
"necessaire",
"necessairement",
"neuf",
"neuvi\u00e8me",
"ni",
"nombreuses",
"nombreux",
"non",
"nos",
"notamment",
"notre",
"nous",
"nous-m\u00eames",
"nouveau",
"nul",
"n\u00e9anmoins",
"n\u00f4tre",
"n\u00f4tres",
"o",
"oh",
"oh\u00e9",
"oll\u00e9",
"ol\u00e9",
"on",
"ont",
"onze",
"onzi\u00e8me",
"ore",
"ou",
"ouf",
"ouias",
"oust",
"ouste",
"outre",
"ouvert",
"ouverte",
"ouverts",
"o|",
"o\u00f9",
"p",
"paf",
"pan",
"par",
"parce",
"parfois",
"parle",
"parlent",
"parler",
"parmi",
"parseme",
"partant",
"particulier",
"particuli\u00e8re",
"particuli\u00e8rement",
"pas",
"pass\u00e9",
"pendant",
"pense",
"permet",
"personne",
"peu",
"peut",
"peuvent",
"peux",
"pff",
"pfft",
"pfut",
"pif",
"pire",
"plein",
"plouf",
"plus",
"plusieurs",
"plut\u00f4t",
"possessif",
"possessifs",
"possible",
"possibles",
"pouah",
"pour",
"pourquoi",
"pourrais",
"pourrait",
"pouvait",
"prealable",
"precisement",
"premier",
"premi\u00e8re",
"premi\u00e8rement",
"pres",
"probable",
"probante",
"procedant",
"proche",
"pr\u00e8s",
"psitt",
"pu",
"puis",
"puisque",
"pur",
"pure",
"q",
"qu",
"quand",
"quant",
"quant-\u00e0-soi",
"quanta",
"quarante",
"quatorze",
"quatre",
"quatre-vingt",
"quatri\u00e8me",
"quatri\u00e8mement",
"que",
"quel",
"quelconque",
"quelle",
"quelles",
"quelqu'un",
"quelque",
"quelques",
"quels",
"qui",
"quiconque",
"quinze",
"quoi",
"quoique",
"r",
"rare",
"rarement",
"rares",
"relative",
"relativement",
"remarquable",
"rend",
"rendre",
"restant",
"reste",
| |
####################
#
# Copyright (c) 2018 Fox-IT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####################
import logging
import queue
import threading
import calendar
from bloodhound.ad.utils import ADUtils, AceResolver
from bloodhound.ad.computer import ADComputer
from bloodhound.ad.structures import LDAP_SID
from bloodhound.enumeration.acls import AclEnumerator, parse_binary_acl
from bloodhound.enumeration.outputworker import OutputWorker
class MembershipEnumerator(object):
"""
Class to enumerate memberships in the domain.
Contains the dumping functions which
methods from the bloodhound.ad module.
"""
def __init__(self, addomain, addc, collect, disable_pooling):
"""
Membership enumeration. Enumerates all groups/users/other memberships.
"""
self.addomain = addomain
self.addc = addc
# Store collection methods specified
self.collect = collect
self.disable_pooling = disable_pooling
self.aclenumerator = AclEnumerator(addomain, addc, collect)
self.aceresolver = AceResolver(addomain, addomain.objectresolver)
self.result_q = None
def get_membership(self, member):
"""
Attempt to resolve the membership (DN) of a group to an object
"""
# First assume it is a user
try:
resolved_entry = self.addomain.users[member]
except KeyError:
# Try if it is a group
try:
resolved_entry = self.addomain.groups[member]
except KeyError:
# Try if it is a computer
try:
entry = self.addomain.computers[member]
# Computers are stored as raw entries
resolved_entry = ADUtils.resolve_ad_entry(entry)
except KeyError:
use_gc = ADUtils.ldap2domain(member) != self.addomain.domain
qobject = self.addomain.objectresolver.resolve_distinguishedname(member, use_gc=use_gc)
if qobject is None:
return None
resolved_entry = ADUtils.resolve_ad_entry(qobject)
# Store it in the cache
if resolved_entry['type'] == 'User':
self.addomain.users[member] = resolved_entry
if resolved_entry['type'] == 'Group':
self.addomain.groups[member] = resolved_entry
# Computers are stored as raw entries
if resolved_entry['type'] == 'Computer':
self.addomain.computers[member] = qobject
return {
"ObjectIdentifier": resolved_entry['objectid'],
"ObjectType": resolved_entry['type'].capitalize()
}
@staticmethod
def get_primary_membership(entry):
"""
Construct primary membership from RID to SID (BloodHound 3.0 only)
"""
try:
primarygroupid = int(entry['attributes']['primaryGroupID'])
except (TypeError, KeyError):
# Doesn't have a primarygroupid, means it is probably a Group instead of a user
return None
return '%s-%d' % ('-'.join(entry['attributes']['objectSid'].split('-')[:-1]), primarygroupid)
@staticmethod
def add_user_properties(user, entry):
"""
Resolve properties for user objects
"""
props = user['Properties']
# print entry
# Is user enabled? Checked by seeing if the UAC flag 2 (ACCOUNT_DISABLED) is not set
props['enabled'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 2 == 0
props['lastlogon'] = ADUtils.win_timestamp_to_unix(
ADUtils.get_entry_property(entry, 'lastLogon', default=0, raw=True)
)
props['lastlogontimestamp'] = ADUtils.win_timestamp_to_unix(
ADUtils.get_entry_property(entry, 'lastlogontimestamp', default=0, raw=True)
)
if props['lastlogontimestamp'] == 0:
props['lastlogontimestamp'] = -1
props['pwdlastset'] = ADUtils.win_timestamp_to_unix(
ADUtils.get_entry_property(entry, 'pwdLastSet', default=0, raw=True)
)
props['dontreqpreauth'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00400000 == 0x00400000
props['pwdneverexpires'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00010000 == 0x00010000
props['sensitive'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00100000 == 0x00100000
props['serviceprincipalnames'] = ADUtils.get_entry_property(entry, 'servicePrincipalName', [])
props['hasspn'] = len(props['serviceprincipalnames']) > 0
props['displayname'] = ADUtils.get_entry_property(entry, 'displayName')
props['email'] = ADUtils.get_entry_property(entry, 'mail')
props['title'] = ADUtils.get_entry_property(entry, 'title')
props['homedirectory'] = ADUtils.get_entry_property(entry, 'homeDirectory')
props['description'] = ADUtils.get_entry_property(entry, 'description')
props['userpassword'] = ADUtils.ensure_string(ADUtils.get_entry_property(entry, 'userPassword'))
props['admincount'] = ADUtils.get_entry_property(entry, 'adminCount', 0) == 1
if len(ADUtils.get_entry_property(entry, 'msDS-AllowedToDelegateTo', [])) > 0:
props['allowedtodelegate'] = ADUtils.get_entry_property(entry, 'msDS-AllowedToDelegateTo', [])
props['sidhistory'] = [LDAP_SID(bsid).formatCanonical() for bsid in ADUtils.get_entry_property(entry, 'sIDHistory', [])]
# v4 props
whencreated = ADUtils.get_entry_property(entry, 'whencreated', default=0)
if isinstance(whencreated, int):
props['whencreated'] = whencreated
else:
props['whencreated'] = calendar.timegm(whencreated.timetuple())
props['unixpassword'] = ADUtils.ensure_string(ADUtils.get_entry_property(entry, 'unixuserpassword'))
props['unicodepassword'] = ADUtils.ensure_string(ADUtils.get_entry_property(entry, 'unicodepwd'))
# Non-default schema?
# props['sfupassword'] = ADUtils.ensure_string(ADUtils.get_entry_property(entry, 'msSFU30Password'))
props['sfupassword'] = None
def enumerate_users(self, timestamp=""):
filename = timestamp + 'users.json'
# Should we include extra properties in the query?
with_properties = 'objectprops' in self.collect
acl = 'acl' in self.collect
entries = self.addc.get_users(include_properties=with_properties, acl=acl)
logging.debug('Writing users to file: %s', filename)
# Use a separate queue for processing the results
self.result_q = queue.Queue()
results_worker = threading.Thread(target=OutputWorker.membership_write_worker, args=(self.result_q, 'users', filename))
results_worker.daemon = True
results_worker.start()
if acl and not self.disable_pooling:
self.aclenumerator.init_pool()
# This loops over a generator, results are fetched from LDAP on the go
for entry in entries:
resolved_entry = ADUtils.resolve_ad_entry(entry)
# Skip trust objects
if resolved_entry['type'] == 'trustaccount':
continue
user = {
"AllowedToDelegate": [],
"ObjectIdentifier": ADUtils.get_entry_property(entry, 'objectSid'),
"PrimaryGroupSID": MembershipEnumerator.get_primary_membership(entry),
"Properties": {
"name": resolved_entry['principal'],
"domain": self.addomain.domain.upper(),
"domainsid": self.addomain.domain_object.sid,
"distinguishedname":ADUtils.get_entry_property(entry, 'distinguishedName').upper(),
"unconstraineddelegation": ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00080000 == 0x00080000,
"trustedtoauth": ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x01000000 == 0x01000000,
"passwordnotreqd": ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00000020 == 0x00000020
},
"Aces": [],
"SPNTargets": [],
"HasSIDHistory": [],
"IsDeleted": ADUtils.get_entry_property(entry, 'isDeleted', default=False)
}
if with_properties:
MembershipEnumerator.add_user_properties(user, entry)
if 'allowedtodelegate' in user['Properties']:
for host in user['Properties']['allowedtodelegate']:
try:
target = host.split('/')[1]
except IndexError:
logging.warning('Invalid delegation target: %s', host)
continue
try:
sid = self.addomain.computersidcache.get(target.lower())
user['AllowedToDelegate'].append(sid)
except KeyError:
if '.' in target:
user['AllowedToDelegate'].append(target.upper())
# Parse SID history
if len(user['Properties']['sidhistory']) > 0:
for historysid in user['Properties']['sidhistory']:
user['HasSIDHistory'].append(self.aceresolver.resolve_sid(historysid))
# If this is a GMSA, process it's ACL. We don't bother with threads/processes here
# since these accounts shouldn't be that common and neither should they have very complex
# DACLs which control who can read their password
if ADUtils.get_entry_property(entry, 'msDS-GroupMSAMembership', default=b'', raw=True) != b'':
self.parse_gmsa(user, entry)
self.addomain.users[entry['dn']] = resolved_entry
# If we are enumerating ACLs, we break out of the loop here
# this is because parsing ACLs is computationally heavy and therefor is done in subprocesses
if acl:
if self.disable_pooling:
# Debug mode, don't run this pooled since it hides exceptions
self.process_acldata(parse_binary_acl(user, 'user', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map))
else:
# Process ACLs in separate processes, then call the processing function to resolve entries and write them to file
self.aclenumerator.pool.apply_async(parse_binary_acl, args=(user, 'user', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata)
else:
# Write it to the queue -> write to file in separate thread
# this is solely for consistency with acl parsing, the performance improvement is probably minimal
self.result_q.put(user)
self.write_default_users()
# If we are parsing ACLs, close the parsing pool first
# then close the result queue and join it
if acl and not self.disable_pooling:
self.aclenumerator.pool.close()
self.aclenumerator.pool.join()
self.result_q.put(None)
else:
self.result_q.put(None)
self.result_q.join()
logging.debug('Finished writing users')
def enumerate_groups(self, timestamp=""):
highvalue = ["S-1-5-32-544", "S-1-5-32-550", "S-1-5-32-549", "S-1-5-32-551", "S-1-5-32-548"]
def is_highvalue(sid):
if sid.endswith("-512") or sid.endswith("-516") or sid.endswith("-519") or sid.endswith("-520"):
return True
if sid in highvalue:
return True
return False
# Should we include extra properties in the query?
with_properties = 'objectprops' in self.collect
acl = 'acl' in self.collect
filename = timestamp + 'groups.json'
entries = self.addc.get_groups(include_properties=with_properties, acl=acl)
logging.debug('Writing groups to file: %s', filename)
# Use a separate queue for processing the results
self.result_q = queue.Queue()
results_worker = threading.Thread(target=OutputWorker.membership_write_worker, args=(self.result_q, 'groups', filename))
results_worker.daemon = True
results_worker.start()
if acl and not self.disable_pooling:
self.aclenumerator.init_pool()
for entry in entries:
resolved_entry = ADUtils.resolve_ad_entry(entry)
self.addomain.groups[entry['dn']] = resolved_entry
try:
sid = entry['attributes']['objectSid']
except KeyError:
#Somehow we found a group without a sid?
logging.warning('Could not determine SID for group %s', entry['attributes']['distinguishedName'])
continue
group = {
"ObjectIdentifier": sid,
"Properties": {
"domain": self.addomain.domain.upper(),
"domainsid": self.addomain.domain_object.sid,
"name": resolved_entry['principal'],
"distinguishedname": ADUtils.get_entry_property(entry, 'distinguishedName').upper()
},
"Members": [],
"Aces": [],
"IsDeleted": ADUtils.get_entry_property(entry, 'isDeleted', default=False)
}
if sid in ADUtils.WELLKNOWN_SIDS:
# Prefix it with the domain
group['ObjectIdentifier'] = '%s-%s' % (self.addomain.domain.upper(), sid)
if with_properties:
group['Properties']['admincount'] = ADUtils.get_entry_property(entry, 'adminCount', default=0) == 1
group['Properties']['description'] = ADUtils.get_entry_property(entry, 'description')
whencreated = ADUtils.get_entry_property(entry, 'whencreated', default=0)
group['Properties']['whencreated'] = calendar.timegm(whencreated.timetuple())
for member in entry['attributes']['member']:
resolved_member = self.get_membership(member)
if resolved_member:
group['Members'].append(resolved_member)
# If we are enumerating ACLs, we break out of the loop here
# this is because parsing ACLs is computationally heavy and therefor is done in subprocesses
if acl:
if self.disable_pooling:
# Debug mode, don't run this pooled since it hides exceptions
self.process_acldata(parse_binary_acl(group, 'group', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map))
else:
# Process ACLs in separate processes, then call the processing function to resolve entries and write them to file
self.aclenumerator.pool.apply_async(parse_binary_acl, args=(group, | |
# -*- coding: utf-8 -*-
import logging
import numpy
import itertools
from interact import __module__
from interact.core.helpers import set_contact_type
from interact.core.geometry import plane_fit, distance, angle, projection, vector_angle
from interact.interactions.utils import is_pication
logger = logging.getLogger(__module__)
def eval_saltbridge(contact_frame, topology, max_charge_dist=0.55):
"""
Evaluate contacts between centers of positive and negative charge.
Physiological relevant pH is assumed.
Algorithm:
1) Primary selection is between all source and target atoms that are
max_charge_dist apart according to (Barlow and Thornton, 1983) +
0.15 nm
2) Select all residues in previous selection that have a formal
positive or negative charge according to the sum of partial charges
in the 'charge' column. The latter charges are Gasteiger partial
charges by default.
3) Select all atoms that are likely a part of the charged group in
in the residues from step 2 based on SYBYL atom types following:
amino-acid type atom charge
--------------------------------------------------------------------
Arginine - Arg - R N.pl3 RNHC(NH2)2+ +
Lysine - Lys - K N.4 RNH3 +
Histidine - His - H N.ar ND1, NE2 +
Aspartic acid - Asp - D O.co2 RCOO- -
Glutamic acid - Glu - E O.co2 RCOO- -
Ligands type atom charge
--------------------------------------------------------------------
quaterny ammonium N.4 +
tertiary amines N.am +
sulfonium groups S.3 +
guanidine groups C.cat +
phosphate O.co2 PO4 -
sulfonate S.3 RSO2O- -
sulfonic acid S.O2 -
carboxylate O.co2 -
4) Select neighbouring atoms not of element type 'C' or 'H' in the
selection from step 3 to define all atoms part of the charged group
5) Evaluate salt-bridges by the distance between the geometrical centers
of two charged groups of opposite sign being smaller or equal to
max_charge_dist.
Although multiple atoms of both charged groups take part in the salt-bridge
only the pair with the shortest atom-atom distance is reported using the
labels:
- 'sb-pn': for a positive charged source to negative target contact.
- 'sb-np': for a negative charged source to positive target contact.
Because salt-bridges are composed of hydrogen-bonded and charged
interactions, the reported atom pair often was reported before as taking
part in a hydrogen-bonded interactions when the `eval_hbonds` function was
used. The salt-bridge label will be added to the contact column maintaining
the hydrogen bond label.
:param contact_frame: contact DataFrame
:type contact_frame: :py:DataFrame
:param topology: Pandas DataFrame representing the structure
:type topology: :interact:ToplogyDataFrame
:param max_charge_dist: maximum distance cutoff between charge centers
:type max_charge_dist: :py:float
:return: Adds the labels 'sb-np' or 'sb-pn' to the
'contact' column of the input contact frame.
:rtype: :pandas:DataFrame
"""
# Preselect all contacts below max_charge_dist
chdist = contact_frame[contact_frame['target', 'distance'] <= max_charge_dist]
# Select all charged source and target residues
charged_groups = {'source': [], 'target': []}
for group in charged_groups.keys():
for charge_group in topology[topology['serial'].isin(chdist[group, 'serial'])].find_charged_centers():
if charge_group[1] <= -1:
charged_groups[group].append(('n', charge_group[0]))
else:
charged_groups[group].append(('p', charge_group[0]))
if not len(charged_groups['source']) or not len(charged_groups['target']):
logger.info('Not running salt-bridge detection. Charged groups in source: {0}, target: {1}'.format(
len(charged_groups['source']), len(charged_groups['target'])))
return contact_frame
logger.info(
"Run salt-bridge detection on {0} source and {1} target charged groups using: max_charge_dist={2}".format(
len(charged_groups['source']), len(charged_groups['target']), max_charge_dist))
# Loop over combinations of source and target charged groups
for s, l in itertools.product(charged_groups['source'], charged_groups['target']):
if s[0] != l[0]:
center_distance = distance(s[1].center(), l[1].center())
sb_type = 'sb-{0}{1}'.format(s[0], l[0])
source_center = repr(list(s[1]['serial'])).strip('[]')
target_center = repr(list(l[1]['serial'])).strip('[]')
if center_distance <= max_charge_dist:
logger.info('{0} between {1}-{2} and {3}-{4}. D: {5:.3f} nm between groups {6} and {7}'.format(
sb_type, s[1]['resSeq'].unique()[0], s[1]['resName'].unique()[0], l[1]['resSeq'].unique()[0],
l[1]['resName'].unique()[0], center_distance, source_center, target_center))
# Report salt-bridges
sb_selection = contact_frame[(contact_frame['source', 'serial'].isin(s[1]['serial'])) &
contact_frame['target', 'serial'].isin(l[1]['serial'])]
report_to = sb_selection.sort_values(by=('target', 'distance')).head(n=1)
contact_frame.loc[report_to.index, 'contact'] = set_contact_type(
contact_frame.loc[report_to.index, 'contact'], sb_type)
return contact_frame
def eval_heme_coordination(contact_frame, topology, rings=None, heme_dist_prefilter=0.55, heme_dist_max=0.35,
heme_dist_min=0, min_heme_coor_angle=105, max_heme_coor_angle=160, fe_ox_dist=0.16,
exclude=('H', 'O.3', 'O.2', 'O.co2', 'O.spc', 'O.t3p', 'C.cat', 'S.o2')):
"""
Evaluate heme coordination of ligand atoms
"""
rings = rings or []
# Select all atoms within heme_dist_prefilter distance from Fe excluding atoms in exclude list
fedist = contact_frame[(contact_frame['target', 'name'] == 'FE') &
(~contact_frame['source', 'attype'].isin(exclude)) &
(contact_frame['target', 'distance'] < heme_dist_prefilter)]
if fedist.empty:
return contact_frame
# Get Fe atom
fe = topology[(topology['resName'] == 'HEM') & (topology['name'] == 'FE')]
if fe.empty:
logger.warn("Unable to asses heme coordination. Fe atom not found")
return contact_frame
# Get four nitrogen atoms coordinating the Fe atom
fe_neigh = fe.neighbours(cutoff=0.3)
fe_coordinating = fe_neigh[(fe_neigh['resName'] == 'HEM') & (fe_neigh['element'] == 'N')].sort_values(by='name')
if len(fe_coordinating) != 4:
logger.warn("Unable to asses heme coordination. Found {0} nitrogen atoms coordinating Fe. Expected 4".format(
len(fe_coordinating)))
return contact_frame
logger.debug("Run heme coordination detection on {0} possible contacts using: heme_dist_prefilter={1:.2f}, "
"heme_dist_min={2:.2f}, heme_dist_max={3:.2f}, min_heme_coor_angle={4:.2f}, max_heme_coor_angle={5:.2f}, "
"fe_ox_dist={6:.2f}".format(fedist.shape[0], heme_dist_prefilter, heme_dist_min, heme_dist_max,
min_heme_coor_angle, max_heme_coor_angle, fe_ox_dist))
# Calculate normals between Nitrogens -> Fe vectors
fe_coor = fe.coord
n_coor = fe_coordinating.coord - fe_coor
m1 = numpy.cross(n_coor[0], n_coor[1])
m2 = numpy.cross(n_coor[1], n_coor[2])
m3 = numpy.cross(n_coor[2], n_coor[3])
m4 = numpy.cross(n_coor[3], n_coor[0])
# Is there an Oxygen above the heme (complex I) or do we need to place a dummy
close_fe_neigh = fe.neighbours(cutoff=0.2)
dummyox = close_fe_neigh[(close_fe_neigh['resName'] == 'HEM') & (close_fe_neigh['element'] == 'O')]
mv = numpy.mean(numpy.vstack((m1, m2, m3, m4)), axis=0)
if len(dummyox) == 1:
dummyox = dummyox.coord
logger.info('Oxygen atom bonded to Fe (complex I)')
else:
# Calculate dummy O atom from the average of the four normals
# Normalize normal mean, change vector size to 1.6 A and set point
dummyox = ((mv / numpy.linalg.norm(mv)) * fe_ox_dist) + fe_coor
logger.info("Reconstructed oxygen atom placed {0}nm above Heme Fe at position {1}".format(fe_ox_dist, ' '.join(
['{0:.3f}'.format(c) for c in dummyox])))
# Check the coordination of the Fe atom by the SG atom of the Cys below Heme
sg = fe_neigh[(fe_neigh['resName'] == 'CYS') & (fe_neigh['name'] == 'SG')]
if not sg.empty:
sg_angle = angle(dummyox, fe_coor, sg.coord)
if not 160 < sg_angle < 200:
logger.warn("Angle between reconstructed oxygen -> Fe -> Cys SG has unusual value {0:.3f}".format(sg_angle))
else:
logger.warn("No CYS SG atom in a distance of 0.3nm of the Heme Fe atom")
# Check if there are rings with there center of mass below heme_dist_prefilter from heme FE.
# Calculate ring normals
ring_normals = []
for aromatic in rings:
aromatic_center = aromatic.center()
aromatic_fe_dist = distance(fe_coor, aromatic_center)
if aromatic_fe_dist < heme_dist_prefilter:
aromatic_norm = plane_fit(aromatic.coord, center=aromatic_center)
aromatic_norm_angle = vector_angle(aromatic_norm, mv, deg=True)
aromatic_norm_angle = min(aromatic_norm_angle,
180 - aromatic_norm_angle if not 180 - aromatic_norm_angle < 0 else
aromatic_norm_angle)
ring = aromatic.index.tolist()
ring_normals.append((aromatic_center, aromatic_norm, aromatic_norm_angle, ring))
logger.info("Ring {0} close to heme Fe: distance center-Fe {1:.2f}nm, normal angle heme plane-ring:"
"{2:.2f} deg.".format(ring, aromatic_fe_dist, aromatic_norm_angle))
# Get ligand atoms coordinated
for idx, n in fedist.iterrows():
source = topology[topology.index == n['source', 'index']]
source_atom_type = n['source', 'attype']
z = source.coord
# Check for heme coordination by aromatic nitrogens. label as 'hc'
if source_atom_type in ('N.ar', 'N.2', 'N.3'):
ar_norm_angle = 90
for ring in ring_normals:
if n['source', 'index'] in ring[-1]:
ar_norm_angle = ring[2]
break
fe_dist = distance(z, fe_coor)
fe_offset = distance(projection(mv, fe_coor, z), fe_coor)
if 45 < ar_norm_angle < 95 and fe_dist < 0.35 and fe_offset < 0.1:
contact_frame.loc[idx, 'contact'] = set_contact_type(contact_frame.loc[idx, 'contact'], 'hc')
contact_frame.loc[idx, ('target', 'angle')] = ar_norm_angle
logger.info(
"Heme Fe coordination with {0} {1}. Distance: {2:.2f} A. offset: {3:.2f} A plane normal angle: {4:.2f}".format(
n['source', 'serial'],
n['source', 'name'], fe_dist, fe_offset, ar_norm_angle))
# Check for possible sites of metabolism and label as 'hm'.
# Filter on covalent neighbours and apply knowledge based rules.
if source_atom_type in ('C.2', 'C.3', 'C.ar', 'N.1', 'N.2', 'N.4', 'N.pl3', 'S.3'):
cutoff = 0.16
if source_atom_type == 'S.3': cutoff = 0.18
neigh = source.neighbours(cutoff=cutoff)
neigh_atom_types = set(neigh['attype'])
# If ligand atom is of type C.3 or C.ar it should contain at least one covalently bonded atom
# of type ['H','Cl','I','Br','F','Hal']
if source_atom_type in ('C.3', 'C.ar') and len(
neigh_atom_types.intersection({'H', 'Cl', 'I', 'Br', 'F', 'Hal'})) == 0:
logger.debug(
"Ligand target atom {0}-{1} excluded. Atom type {2} not covalently bonded to: H,Cl,I,Br,F or Hal".format(
n['source', 'serial'], n['source', 'name'], source_atom_type))
continue
# If ligand atom is of type N.4 it should contain at least one covalently bonded atom of type H
if source_atom_type == 'N.4' and not 'H' in neigh_atom_types:
logger.debug(
"Ligand target atom {0}-{1} excluded. Atom type N.4 not covalently bonded to hydrogen".format(
n['source', 'serial'], n['source', 'name']))
continue
# | |
<filename>jks/bks.py<gh_stars>10-100
# vim: set et ai ts=4 sts=4 sw=4:
import struct
import hashlib
from pyasn1.codec.ber import decoder
from pyasn1_modules import rfc5208, rfc2459
from Cryptodome.Hash import HMAC, SHA
from .util import *
from .jks import KeyStore, TrustedCertEntry
from . import rfc7292
ENTRY_TYPE_CERTIFICATE = 1
ENTRY_TYPE_KEY = 2 # plaintext key entry as would otherwise be stored inside a sealed entry (type 4); no longer supported at the time of writing (BC 1.54)
ENTRY_TYPE_SECRET = 3 # for keys that were added to the store in already-protected form; can be arbitrary data
ENTRY_TYPE_SEALED = 4 # for keys that were protected by the BC keystore implementation upon adding
KEY_TYPE_PRIVATE = 0 #: Type indicator for private keys in :class:`BksKeyEntry`.
KEY_TYPE_PUBLIC = 1 #: Type indicator for public keys in :class:`BksKeyEntry`.
KEY_TYPE_SECRET = 2 #: Type indicator for secret keys in :class:`BksKeyEntry`. Indicates a key for use with a symmetric encryption algorithm.
class AbstractBksEntry(AbstractKeystoreEntry):
"""Abstract superclass for BKS keystore entry types"""
def __init__(self, **kwargs):
super(AbstractBksEntry, self).__init__(**kwargs)
# All BKS entries can carry an arbitrary number of associated certificates
self.cert_chain = kwargs.get("cert_chain", [])
self._encrypted = kwargs.get("encrypted")
class BksTrustedCertEntry(TrustedCertEntry):
"""Represents a trusted certificate entry in a BKS or UBER keystore."""
pass # identical
class BksKeyEntry(AbstractBksEntry):
"""
Represents a non-encrypted cryptographic key (public, private or secret) stored in a BKS keystore.
May exceptionally appear as a top-level entry type in (very) old keystores, but you are most likely
to encounter these as the nested object inside a :class:`BksSealedKeyEntry` once decrypted.
"""
def __init__(self, type, format, algorithm, encoded, **kwargs):
super(BksKeyEntry, self).__init__(**kwargs)
self.type = type
"""An integer indicating the type of key: one of :const:`KEY_TYPE_PRIVATE`, :const:`KEY_TYPE_PUBLIC`, :const:`KEY_TYPE_SECRET`."""
self.format = format
"""A string indicating the format or encoding in which the key is stored. One of: ``PKCS8``, ``PKCS#8``, ``X.509``, ``X509``, ``RAW``."""
self.algorithm = algorithm
"""A string indicating the algorithm for which the key is valid."""
self.encoded = encoded
"""A byte string containing the key, formatted as indicated by the :attr:`format` attribute."""
if self.type == KEY_TYPE_PRIVATE:
if self.format not in ["PKCS8", "PKCS#8"]:
raise UnexpectedKeyEncodingException("Unexpected encoding for private key entry: '%s'" % self.format)
# self.encoded is a PKCS#8 PrivateKeyInfo
private_key_info = decoder.decode(self.encoded, asn1Spec=rfc5208.PrivateKeyInfo())[0]
self.pkey_pkcs8 = self.encoded
self.pkey = private_key_info['privateKey'].asOctets()
self.algorithm_oid = private_key_info['privateKeyAlgorithm']['algorithm'].asTuple()
elif self.type == KEY_TYPE_PUBLIC:
if self.format not in ["X.509", "X509"]:
raise UnexpectedKeyEncodingException("Unexpected encoding for public key entry: '%s'" % self.format)
# self.encoded is an X.509 SubjectPublicKeyInfo
spki = decoder.decode(self.encoded, asn1Spec=rfc2459.SubjectPublicKeyInfo())[0]
self.public_key_info = self.encoded
self.public_key = bitstring_to_bytes(spki['subjectPublicKey'])
self.algorithm_oid = spki['algorithm']['algorithm'].asTuple()
elif self.type == KEY_TYPE_SECRET:
if self.format != "RAW":
raise UnexpectedKeyEncodingException("Unexpected encoding for raw key entry: '%s'" % self.format)
# self.encoded is an unwrapped/raw cryptographic key
self.key = encoded
self.key_size = len(encoded)*8
else:
raise UnexpectedKeyEncodingException("Key format '%s' not recognized" % self.format)
def is_decrypted(self):
"""Always returns ``True`` for this entry type."""
return True
def decrypt(self, key_password):
"""Does nothing for this entry type; these entries are stored in non-encrypted form."""
pass
@classmethod
def type2str(cls, t):
"""
Returns a string representation of the given key type. Returns one of ``PRIVATE``, ``PUBLIC`` or ``SECRET``, or ``None``
if no such key type is known.
:param int t: Key type constant. One of :const:`KEY_TYPE_PRIVATE`, :const:`KEY_TYPE_PUBLIC`, :const:`KEY_TYPE_SECRET`.
"""
if t == KEY_TYPE_PRIVATE:
return "PRIVATE"
elif t == KEY_TYPE_PUBLIC:
return "PUBLIC"
elif t == KEY_TYPE_SECRET:
return "SECRET"
return None
class BksSecretKeyEntry(AbstractBksEntry): # TODO: consider renaming this to SecretValueEntry, since it's arbitrary secret data
"""
Conceptually similar to, but not to be confused with, :class:`BksKeyEntry` objects of type :const:`KEY_TYPE_SECRET`:
- :class:`BksSecretKeyEntry` objects store the result of arbitrary user-supplied byte[]s, which, per the Java Keystore SPI, keystores are
obligated to assume have already been protected by the user in some unspecified way. Because of this assumption, no password is
provided for these entries when adding them to the keystore, and keystores are thus forced to store these bytes as-is.
Produced by a call to ``KeyStore.setKeyEntry(String alias, byte[] key, Certificate[] chain)`` call.
The bouncycastle project appears to have completely abandoned these entry types well over a decade ago now, and it is no
longer possible to retrieve these entries through the Java APIs in any (remotely) recent BC version.
- :class:`BksKeyEntry` objects of type :const:`KEY_TYPE_SECRET` store the result of a getEncoded() call on proper Java objects of type SecretKey.
Produced by a call to ``KeyStore.setKeyEntry(String alias, Key key, char[] password, Certificate[] chain)``.
The difference here is that the KeyStore implementation knows it's getting a proper (Secret)Key Java object, and can decide
for itself how to store it given the password supplied by the user. I.e., in this version of setKeyEntry it is left up to
the keystore implementation to encode and protect the supplied Key object, instead of in advance by the user.
"""
def __init__(self, **kwargs):
super(BksSecretKeyEntry, self).__init__(**kwargs)
self.key = self._encrypted
"""A byte string containing the secret key/value."""
def is_decrypted(self):
"""Always returns ``True`` for this entry type."""
return True
def decrypt(self, key_password):
"""Does nothing for this entry type; these entries stored arbitrary user-supplied data, unclear how to decrypt (may not be encrypted at all)."""
pass
class BksSealedKeyEntry(AbstractBksEntry):
"""
PBEWithSHAAnd3-KeyTripleDES-CBC-encrypted wrapper around a :class:`BksKeyEntry`. The contained key type is unknown until decrypted.
Once decrypted, objects of this type can be used in the same way as :class:`BksKeyEntry`: attribute accesses are forwarded
to the wrapped :class:`BksKeyEntry` object.
"""
def __init__(self, **kwargs):
super(BksSealedKeyEntry, self).__init__(**kwargs)
self._nested = None # nested BksKeyEntry once decrypted
def __getattr__(self, name):
if not self.is_decrypted():
raise NotYetDecryptedException("Cannot access attribute '%s'; entry not yet decrypted, call decrypt() with the correct password first" % name)
# if it's an attribute that exists here, return it; otherwise forward the request to the nested entry
if "_"+name in self.__dict__:
return self.__dict__["_"+name]
else:
return getattr(self._nested, name)
def is_decrypted(self):
return (not self._encrypted)
def decrypt(self, key_password):
if self.is_decrypted():
return
pos = 0
data = self._encrypted
salt, pos = BksKeyStore._read_data(data, pos)
iteration_count = b4.unpack_from(data, pos)[0]; pos += 4
encrypted_blob = data[pos:]
# The intention of the BKS entry decryption routine in BcKeyStoreSpi.StoreEntry.getObject(char[] password) appears to be:
# - try to decrypt with "PBEWithSHAAnd3-KeyTripleDES-CBC" first (1.2.840.113549.172.16.31.10);
# - if that fails, try again with "BrokenPBEWithSHAAnd3-KeyTripleDES-CBC";
# - if that still fails, try again with "OldPBEWithSHAAnd3-KeyTripleDES-CBC"
# - give up with an UnrecoverableKeyException
#
# However, at the time of writing (bcprov-jdk15on-1.53 and 1.54), the second and third cases can never successfully execute
# because their implementation requests non-existent SecretKeyFactory objects for the Broken/Old algorithm names.
# Inquiry through the BC developer mailing list tells us that this is indeed old functionality that has been retired long ago
# and is not expected to be operational anymore, and should be cleaned up.
#
# So in practice, the real behaviour is:
# - try to decrypt with "PBEWithSHAAnd3-KeyTripleDES-CBC" (1.2.840.113549.172.16.31.10);
# - give up with an UnrecoverableKeyException
#
# Implementation classes:
# PBEWithSHAAnd3-KeyTripleDES-CBC -> org.bouncycastle.jcajce.provider.symmetric.DESede$PBEWithSHAAndDES3Key
# BrokenPBEWithSHAAnd3-KeyTripleDES-CBC -> org.bouncycastle.jce.provider.BrokenJCEBlockCipher$BrokePBEWithSHAAndDES3Key
# OldPBEWithSHAAnd3-KeyTripleDES-CBC -> org.bouncycastle.jce.provider.BrokenJCEBlockCipher$OldPBEWithSHAAndDES3Key
#
try:
decrypted = rfc7292.decrypt_PBEWithSHAAnd3KeyTripleDESCBC(encrypted_blob, key_password, salt, iteration_count)
except BadDataLengthException:
raise BadKeystoreFormatException("Bad BKS entry format: %s" % str(e))
except BadPaddingException:
raise DecryptionFailureException("Failed to decrypt data for key '%s'; wrong password?" % self.alias)
# the plaintext content of a SealedEntry is a KeyEntry
key_entry, dummy = BksKeyStore._read_bks_key(decrypted, 0, self.store_type)
key_entry.store_type = self.store_type
key_entry.cert_chain = self.cert_chain
key_entry.alias = self.alias
key_entry.timestamp = self.timestamp
self._nested = key_entry
self._encrypted = None
decrypt.__doc__ = AbstractBksEntry.decrypt.__doc__
is_decrypted.__doc__ = AbstractBksEntry.is_decrypted.__doc__
class BksKeyStore(AbstractKeystore):
"""
Bouncycastle "BKS" keystore parser. Supports both the current V2 and old V1 formats.
"""
def __init__(self, store_type, entries, version=2):
super(BksKeyStore, self).__init__(store_type, entries)
self.version = version
"""Version of the keystore format, if loaded."""
@property
def certs(self):
"""A subset of the :attr:`entries` dictionary, filtered down to only
those entries of type :class:`BksTrustedCertEntry`."""
return dict([(a, e) for a, e in self.entries.items()
if isinstance(e, BksTrustedCertEntry)])
@property
def secret_keys(self):
"""A subset of the :attr:`entries` dictionary, filtered down to only
those entries of type :class:`BksSecretKeyEntry`."""
return dict([(a, e) for a, e in self.entries.items()
if isinstance(e, BksSecretKeyEntry)])
@property
def sealed_keys(self):
"""A subset of the :attr:`entries` dictionary, filtered down to only
those entries of type :class:`BksSealedKeyEntry`."""
return dict([(a, e) for a, e in self.entries.items()
| |
<reponame>alexander-bauer/awx
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'JobHostSummary', fields ['job', 'host']
db.delete_unique(u'main_jobhostsummary', ['job_id', 'host_id'])
# Deleting model 'JobTemplate'
db.delete_table(u'main_jobtemplate')
# Deleting model 'InventorySource'
db.delete_table(u'main_inventorysource')
# Deleting model 'Project'
db.delete_table(u'main_project')
# Deleting model 'ProjectUpdate'
db.delete_table(u'main_projectupdate')
# Deleting model 'InventoryUpdate'
db.delete_table(u'main_inventoryupdate')
# Deleting model 'Job'
db.delete_table(u'main_job')
# Deleting field 'Host.last_job'
db.delete_column(u'main_host', 'last_job_id')
# Removing M2M table for field inventory_sources on 'Host'
db.delete_table(db.shorten_name(u'main_host_inventory_sources'))
# Removing M2M table for field projects on 'Organization'
db.delete_table(db.shorten_name(u'main_organization_projects'))
# Removing M2M table for field projects on 'Team'
db.delete_table(db.shorten_name(u'main_team_projects'))
# Deleting field 'Permission.project'
db.delete_column(u'main_permission', 'project_id')
# Deleting field 'JobHostSummary.job'
db.delete_column(u'main_jobhostsummary', 'job_id')
# Changing field 'JobHostSummary.new_job'
db.alter_column(u'main_jobhostsummary', 'new_job_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['main.JobNew']))
# Removing M2M table for field inventory_sources on 'Group'
db.delete_table(db.shorten_name(u'main_group_inventory_sources'))
# Deleting field 'JobEvent.job'
db.delete_column(u'main_jobevent', 'job_id')
# Changing field 'JobEvent.new_job'
db.alter_column(u'main_jobevent', 'new_job_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['main.JobNew']))
# Removing M2M table for field inventory_update on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_inventory_update'))
# Removing M2M table for field project_update on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_project_update'))
# Removing M2M table for field inventory_source on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_inventory_source'))
# Removing M2M table for field job_template on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_job_template'))
# Removing M2M table for field job on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_job'))
# Removing M2M table for field project on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_project'))
def backwards(self, orm):
# Adding model 'JobTemplate'
db.create_table(u'main_jobtemplate', (
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobtemplates', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'jobtemplate', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('extra_vars', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('verbosity', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, blank=True)),
('job_tags', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('job_type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'jobtemplate', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='job_templates', null=True, on_delete=models.SET_NULL, to=orm['main.Project'])),
('host_config_key', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('limit', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('inventory', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobtemplates', null=True, on_delete=models.SET_NULL, to=orm['main.Inventory'])),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('forks', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, blank=True)),
('playbook', self.gf('django.db.models.fields.CharField')(default='', max_length=1024)),
('cloud_credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobtemplates_as_cloud_credential+', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=512, unique=True)),
))
db.send_create_signal('main', ['JobTemplate'])
# Adding model 'InventorySource'
db.create_table(u'main_inventorysource', (
('last_updated', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('source_regions', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('current_update', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='inventory_source_as_current_update+', null=True, on_delete=models.SET_NULL, to=orm['main.InventoryUpdate'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('overwrite', self.gf('django.db.models.fields.BooleanField')(default=False)),
('source_vars', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('group', self.gf('awx.main.fields.AutoOneToOneField')(default=None, related_name='inventory_source', unique=True, null=True, to=orm['main.Group'])),
('last_update_failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'inventorysource', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('last_update', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='inventory_source_as_last_update+', null=True, on_delete=models.SET_NULL, to=orm['main.InventoryUpdate'])),
('source', self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True)),
('inventory', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='inventory_sources', null=True, to=orm['main.Inventory'])),
('update_cache_timeout', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('status', self.gf('django.db.models.fields.CharField')(default='none', max_length=32)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='inventorysources', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('overwrite_vars', self.gf('django.db.models.fields.BooleanField')(default=False)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'inventorysource', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('update_on_launch', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('source_path', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
))
db.send_create_signal('main', ['InventorySource'])
# Adding model 'Project'
db.create_table(u'main_project', (
('scm_branch', self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True)),
('scm_update_cache_timeout', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('scm_clean', self.gf('django.db.models.fields.BooleanField')(default=False)),
('scm_delete_on_update', self.gf('django.db.models.fields.BooleanField')(default=False)),
('current_update', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='project_as_current_update+', null=True, on_delete=models.SET_NULL, to=orm['main.ProjectUpdate'])),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'project', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('last_update_failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'project', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('last_update', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='project_as_last_update+', null=True, on_delete=models.SET_NULL, to=orm['main.ProjectUpdate'])),
('local_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('scm_delete_on_next_update', self.gf('django.db.models.fields.BooleanField')(default=False)),
('status', self.gf('django.db.models.fields.CharField')(default='ok', max_length=32, null=True)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='projects', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('scm_type', self.gf('django.db.models.fields.CharField')(default='', max_length=8, blank=True)),
('scm_update_on_launch', self.gf('django.db.models.fields.BooleanField')(default=False)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=512, unique=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('scm_url', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
))
db.send_create_signal('main', ['Project'])
# Adding model 'ProjectUpdate'
db.create_table(u'main_projectupdate', (
('cancel_flag', self.gf('django.db.models.fields.BooleanField')(default=False)),
('scm_branch', self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True)),
('scm_clean', self.gf('django.db.models.fields.BooleanField')(default=False)),
('scm_delete_on_update', self.gf('django.db.models.fields.BooleanField')(default=False)),
('start_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('celery_task_id', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'projectupdate', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('job_cwd', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'projectupdate', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('local_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=20)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='projectupdates', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('result_traceback', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('scm_type', self.gf('django.db.models.fields.CharField')(default='', max_length=8, blank=True)),
('job_env', self.gf('jsonfield.fields.JSONField')(default={}, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('result_stdout_file', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('job_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('scm_url', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='project_updates', to=orm['main.Project'])),
('_result_stdout', self.gf('django.db.models.fields.TextField')(default='', db_column='result_stdout', blank=True)),
))
db.send_create_signal('main', ['ProjectUpdate'])
# Adding model 'InventoryUpdate'
db.create_table(u'main_inventoryupdate', (
('cancel_flag', self.gf('django.db.models.fields.BooleanField')(default=False)),
('source_regions', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('license_error', self.gf('django.db.models.fields.BooleanField')(default=False)),
('start_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('celery_task_id', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('overwrite', self.gf('django.db.models.fields.BooleanField')(default=False)),
('source_vars', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'inventoryupdate', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('job_cwd', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('source', self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'inventoryupdate', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=20)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='inventoryupdates', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('overwrite_vars', self.gf('django.db.models.fields.BooleanField')(default=False)),
('result_traceback', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('job_env', self.gf('jsonfield.fields.JSONField')(default={}, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('result_stdout_file', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('inventory_source', self.gf('django.db.models.fields.related.ForeignKey')(related_name='inventory_updates', to=orm['main.InventorySource'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('job_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('source_path', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('_result_stdout', self.gf('django.db.models.fields.TextField')(default='', db_column='result_stdout', blank=True)),
))
db.send_create_signal('main', ['InventoryUpdate'])
# Adding model 'Job'
db.create_table(u'main_job', (
('cancel_flag', self.gf('django.db.models.fields.BooleanField')(default=False)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('result_traceback', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('job_type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('start_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('job_tags', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('celery_task_id', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
('playbook', self.gf('django.db.models.fields.CharField')(default='', max_length=1024)),
('job_env', self.gf('jsonfield.fields.JSONField')(default={}, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('result_stdout_file', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=20)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'job', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('job_cwd', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('job_template', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs', on_delete=models.SET_NULL, default=None, to=orm['main.JobTemplate'], blank=True, null=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('extra_vars', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('verbosity', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, blank=True)),
('job_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'job', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs', null=True, on_delete=models.SET_NULL, to=orm['main.Project'])),
('failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('inventory', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs', null=True, on_delete=models.SET_NULL, to=orm['main.Inventory'])),
('_result_stdout', self.gf('django.db.models.fields.TextField')(default='', db_column='result_stdout', blank=True)),
('limit', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('forks', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, blank=True)),
('cloud_credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs_as_cloud_credential+', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('launch_type', self.gf('django.db.models.fields.CharField')(default='manual', max_length=20)),
))
db.send_create_signal('main', ['Job'])
# Adding field 'Host.last_job'
db.add_column(u'main_host', 'last_job',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='hosts_as_last_job+', null=True, on_delete=models.SET_NULL, to=orm['main.Job']),
keep_default=False)
# Adding M2M table for field inventory_sources on 'Host'
m2m_table_name = db.shorten_name(u'main_host_inventory_sources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('host', models.ForeignKey(orm['main.host'], null=False)),
('inventorysource', models.ForeignKey(orm['main.inventorysource'], null=False))
))
db.create_unique(m2m_table_name, ['host_id', 'inventorysource_id'])
# Adding M2M table for field projects on 'Organization'
m2m_table_name = db.shorten_name(u'main_organization_projects')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('organization', models.ForeignKey(orm['main.organization'], null=False)),
('project', models.ForeignKey(orm['main.project'], null=False))
))
db.create_unique(m2m_table_name, ['organization_id', 'project_id'])
# Adding M2M table for field projects on 'Team'
m2m_table_name = db.shorten_name(u'main_team_projects')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('team', models.ForeignKey(orm['main.team'], null=False)),
('project', models.ForeignKey(orm['main.project'], null=False))
))
db.create_unique(m2m_table_name, ['team_id', 'project_id'])
# Adding field 'Permission.project'
db.add_column(u'main_permission', 'project',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='permissions', null=True, to=orm['main.Project'], on_delete=models.SET_NULL, blank=True),
keep_default=False)
# Adding field 'JobHostSummary.job'
db.add_column(u'main_jobhostsummary', 'job',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='job_host_summaries', null=True, to=orm['main.Job']),
keep_default=False)
# Changing field 'JobHostSummary.new_job'
db.alter_column(u'main_jobhostsummary', 'new_job_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['main.JobNew']))
# Adding unique constraint on 'JobHostSummary', fields ['job', 'host']
db.create_unique(u'main_jobhostsummary', ['job_id', 'host_id'])
# Adding M2M table for field inventory_sources on 'Group'
m2m_table_name = db.shorten_name(u'main_group_inventory_sources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('group', models.ForeignKey(orm['main.group'], null=False)),
('inventorysource', models.ForeignKey(orm['main.inventorysource'], null=False))
))
db.create_unique(m2m_table_name, ['group_id', 'inventorysource_id'])
# Adding field 'JobEvent.job'
db.add_column(u'main_jobevent', 'job',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='job_events', null=True, to=orm['main.Job']),
keep_default=False)
# Changing field 'JobEvent.new_job'
db.alter_column(u'main_jobevent', 'new_job_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['main.JobNew']))
# Adding M2M table for field inventory_update on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_inventory_update')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('inventoryupdate', models.ForeignKey(orm['main.inventoryupdate'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'inventoryupdate_id'])
# Adding M2M table for field project_update on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_project_update')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('projectupdate', models.ForeignKey(orm['main.projectupdate'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'projectupdate_id'])
# Adding M2M table for field inventory_source on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_inventory_source')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('inventorysource', models.ForeignKey(orm['main.inventorysource'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'inventorysource_id'])
# Adding M2M table for field job_template on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_job_template')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('jobtemplate', models.ForeignKey(orm['main.jobtemplate'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'jobtemplate_id'])
# Adding M2M table for field job on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_job')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('job', models.ForeignKey(orm['main.job'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'job_id'])
# Adding M2M table for field project on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_project')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('project', models.ForeignKey(orm['main.project'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'project_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.