code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from fastapi import APIRouter, Depends, HTTPException, Response, Request
from schemas import py_models as pm
from typing import List
import app_services.services as serv
import sqlalchemy.orm as orm
from sqlalchemy import distinct
from models import sql_models as sql
import plotly.graph_objects as go
import pandas as pd
import plotly.express as px
import numpy as np
from charts import combined_trace as ct
from fastapi.templating import Jinja2Templates
app = APIRouter()
'''
Configuring templates and staticfiles via Jinja2 and aiofilies respectively
'''
templates = Jinja2Templates(directory="templates")
@app.get('/reports', include_in_schema=False)
async def get_report(request: Request, db: orm.Session=Depends(serv.get_db)):
'''
This endpoint generates a graphical report for the librarian and renders it via a HTML template to him/her.
Uses Plotly to create all the charts as in 'charts' folder and writes them to a file.
'''
try:
db_dist_books = db.query(sql.Transactions.book_id.distinct())
db_dist_members = db.query(sql.Transactions.member_id.distinct())
# print(db_dist_members.count())
members_name = list()
members_spending = list()
books_names = list()
books_issues = list()
'''
Fetching Member names and their net Spend as a List.
Fetching Book names and the number of times, they have been issued as a List.
'''
for b in db_dist_members.all():
db_member = db.query(sql.Members).filter(sql.Members.id == b[0]).first()
member_spend = db_member.total_spend
members_name.append(db_member.name)
members_spending.append(member_spend)
for b in db_dist_books.all():
db_book = db.query(sql.Books).filter(sql.Books.bookID == b[0]).first()
book_issue = db_book.net_issue
books_names.append(db_book.title)
books_issues.append(book_issue)
members_name = np.array(members_name)
members_spending = np.array(members_spending)
books_names = np.array(books_names)
books_issues = np.array(books_issues)
ct.final_plots(members_name, members_spending, books_names, books_issues)
return templates.TemplateResponse("chart.html", {"request": request})
except Exception as e:
return Response(content=str(e))
| [
"models.sql_models.Transactions.member_id.distinct",
"charts.combined_trace.final_plots",
"fastapi.templating.Jinja2Templates",
"numpy.array",
"fastapi.APIRouter",
"models.sql_models.Transactions.book_id.distinct",
"fastapi.Depends"
] | [((465, 476), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (474, 476), False, 'from fastapi import APIRouter, Depends, HTTPException, Response, Request\n'), ((575, 613), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', ([], {'directory': '"""templates"""'}), "(directory='templates')\n", (590, 613), False, 'from fastapi.templating import Jinja2Templates\n'), ((716, 736), 'fastapi.Depends', 'Depends', (['serv.get_db'], {}), '(serv.get_db)\n', (723, 736), False, 'from fastapi import APIRouter, Depends, HTTPException, Response, Request\n'), ((2068, 2090), 'numpy.array', 'np.array', (['members_name'], {}), '(members_name)\n', (2076, 2090), True, 'import numpy as np\n'), ((2118, 2144), 'numpy.array', 'np.array', (['members_spending'], {}), '(members_spending)\n', (2126, 2144), True, 'import numpy as np\n'), ((2167, 2188), 'numpy.array', 'np.array', (['books_names'], {}), '(books_names)\n', (2175, 2188), True, 'import numpy as np\n'), ((2212, 2234), 'numpy.array', 'np.array', (['books_issues'], {}), '(books_issues)\n', (2220, 2234), True, 'import numpy as np\n'), ((2252, 2325), 'charts.combined_trace.final_plots', 'ct.final_plots', (['members_name', 'members_spending', 'books_names', 'books_issues'], {}), '(members_name, members_spending, books_names, books_issues)\n', (2266, 2325), True, 'from charts import combined_trace as ct\n'), ((999, 1034), 'models.sql_models.Transactions.book_id.distinct', 'sql.Transactions.book_id.distinct', ([], {}), '()\n', (1032, 1034), True, 'from models import sql_models as sql\n'), ((1071, 1108), 'models.sql_models.Transactions.member_id.distinct', 'sql.Transactions.member_id.distinct', ([], {}), '()\n', (1106, 1108), True, 'from models import sql_models as sql\n')] |
from online_inference import SecondBackend, build_network, inference
import numpy as np
import csv
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
BOX_COLOUR_SCHEME = {
'Car': '#00FF00', # Green
'Pedestrian': '#00FFFF', # Teal
'Cyclist': '#FFFF00' # Yellow
}
fig_size = (16, 9)
gt_classes = ['Car', 'Pedestrian']
class ObjectLabel:
"""Object Label Class
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
"""
def __init__(self):
self.type = "" # Type of object
self.truncation = 0.
self.occlusion = 0.
self.alpha = 0.
self.x1 = 0.
self.y1 = 0.
self.x2 = 0.
self.y2 = 0.
self.h = 0.
self.w = 0.
self.l = 0.
self.t = (0., 0., 0.)
self.ry = 0.
self.score = 0.
def __eq__(self, other):
"""Compares the given object to the current ObjectLabel instance.
:param other: object to compare to this instance against
:return: True, if other and current instance is the same
"""
if not isinstance(other, ObjectLabel):
return False
if self.__dict__ != other.__dict__:
return False
else:
return True
def visualization(image, display=True):
"""Forms the plot figure and axis for the visualization
Keyword arguments:
:param image_dir -- directory of image files in the wavedata
:param display -- display the image in non-blocking fashion
:param fig_size -- (optional) size of the figure
"""
def set_plot_limits(axes, image):
# Set the plot limits to the size of the image, y is inverted
axes.set_xlim(0, image.shape[1])
axes.set_ylim(image.shape[0], 0)
# Create the figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=fig_size, sharex=True)
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, hspace=0.0)
# plot images
ax1.imshow(image)
ax2.imshow(image)
set_plot_limits(ax1, image)
set_plot_limits(ax2, image)
if display:
plt.show(block=False)
return fig, ax1, ax2
def visualization_single_plot(image, display=True):
"""Forms the plot figure and axis for the visualization
Keyword arguments:
:param image_dir -- directory of image files in the wavedata
:param img_idx -- index of the image file to present
:param flipped -- flag to enable image flipping
:param display -- display the image in non-blocking fashion
:param fig_size -- (optional) size of the figure
"""
# Create the figure
fig, ax = plt.subplots(1, figsize=fig_size, facecolor='black')
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0,
hspace=0.0, wspace=0.0)
# Set axes settings
ax.set_axis_off()
ax.set_xlim(0, image.shape[1])
ax.set_ylim(image.shape[0], 0)
# plot images
ax.imshow(image)
if display:
plt.show(block=False)
return fig, ax
def project_to_image(point_cloud, p):
""" Projects a 3D point cloud to 2D points for plotting
:param point_cloud: 3D point cloud (3, N)
:param p: Camera matrix (3, 4)
:return: pts_2d: the image coordinates of the 3D points in the shape (2, N)
"""
pts_2d = np.dot(p, np.append(point_cloud,
np.ones((1, point_cloud.shape[1])),
axis=0))
pts_2d[0, :] = pts_2d[0, :] / pts_2d[2, :]
pts_2d[1, :] = pts_2d[1, :] / pts_2d[2, :]
pts_2d = np.delete(pts_2d, 2, 0)
return pts_2d
def compute_box_corners_3d(object_label):
"""Computes the 3D bounding box corner positions from an ObjectLabel
:param object_label: ObjectLabel to compute corners from
:return: a numpy array of 3D corners if the box is in front of the camera,
an empty array otherwise
"""
# Compute rotational matrix
rot = np.array([[+np.cos(object_label.ry), 0, +np.sin(object_label.ry)],
[0, 1, 0],
[-np.sin(object_label.ry), 0, +np.cos(object_label.ry)]])
l = object_label.l
w = object_label.w
h = object_label.h
# 3D BB corners
x_corners = np.array(
[l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2])
y_corners = np.array([0, 0, 0, 0, -h, -h, -h, -h])
z_corners = np.array(
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2])
corners_3d = np.dot(rot, np.array([x_corners, y_corners, z_corners]))
corners_3d[0, :] = corners_3d[0, :] + object_label.t[0]
corners_3d[1, :] = corners_3d[1, :] + object_label.t[1]
corners_3d[2, :] = corners_3d[2, :] + object_label.t[2]
return corners_3d
def project_box3d_to_image(corners_3d, p):
"""Computes the 3D bounding box projected onto
image space.
Keyword arguments:
obj -- object file to draw bounding box
p -- transform matrix
Returns:
corners : numpy array of corner points projected
onto image space.
face_idx: numpy array of 3D bounding box face
"""
# index for 3d bounding box face
# it is converted to 4x4 matrix
face_idx = np.array([0, 1, 5, 4, # front face
1, 2, 6, 5, # left face
2, 3, 7, 6, # back face
3, 0, 4, 7]).reshape((4, 4)) # right face
return project_to_image(corners_3d, p), face_idx
def compute_orientation_3d(obj, p):
"""Computes the orientation given object and camera matrix
Keyword arguments:
obj -- object file to draw bounding box
p -- transform matrix
"""
# compute rotational matrix
rot = np.array([[+np.cos(obj.ry), 0, +np.sin(obj.ry)],
[0, 1, 0],
[-np.sin(obj.ry), 0, +np.cos(obj.ry)]])
orientation3d = np.array([0.0, obj.l, 0.0, 0.0, 0.0, 0.0]).reshape(3, 2)
orientation3d = np.dot(rot, orientation3d)
orientation3d[0, :] = orientation3d[0, :] + obj.t[0]
orientation3d[1, :] = orientation3d[1, :] + obj.t[1]
orientation3d[2, :] = orientation3d[2, :] + obj.t[2]
# only draw for boxes that are in front of the camera
for idx in np.arange(orientation3d.shape[1]):
if orientation3d[2, idx] < 0.1:
return None
return project_to_image(orientation3d, p)
def draw_box_2d(ax, obj, color_tm='g'):
"""Draws the 2D boxes given the subplot and the object properties
Keyword arguments:
:param ax -- subplot handle
:param obj -- object file to draw bounding box
"""
# draw the boxes
# we also don't care about labels here
rect = patches.Rectangle((obj.x1, obj.y1),
obj.x2 - obj.x1,
obj.y2 - obj.y1,
linewidth=2,
edgecolor=color_tm,
facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
def draw_box_3d(ax, obj, p, show_orientation=True,
color_table=None, line_width=3, double_line=True,
box_color=None):
"""Draws the 3D boxes given the subplot, object label,
and frame transformation matrix
:param ax: subplot handle
:param obj: object file to draw bounding box
:param p:stereo frame transformation matrix
:param show_orientation: optional, draw a line showing orientaion
:param color_table: optional, a custom table for coloring the boxes,
should have 4 values to match the 4 truncation values. This color
scheme is used to display boxes colored based on difficulty.
:param line_width: optional, custom line width to draw the box
:param double_line: optional, overlays a thinner line inside the box lines
:param box_color: optional, use a custom color for box (instead of
the default color_table.
"""
corners3d = compute_box_corners_3d(obj)
corners, face_idx = project_box3d_to_image(corners3d, p)
# define colors
if color_table:
if len(color_table) != 4:
raise ValueError('Invalid color table length, must be 4')
else:
color_table = ["#00cc00", 'y', 'r', 'w']
trun_style = ['solid', 'dashed']
trc = int(obj.truncation > 0.1)
if len(corners) > 0:
for i in range(4):
x = np.append(corners[0, face_idx[i, ]],
corners[0, face_idx[i, 0]])
y = np.append(corners[1, face_idx[i, ]],
corners[1, face_idx[i, 0]])
# Draw the boxes
if box_color is None:
box_color = color_table[int(obj.occlusion)]
ax.plot(x, y, linewidth=line_width,
color=box_color,
linestyle=trun_style[trc])
# Draw a thinner second line inside
if double_line:
ax.plot(x, y, linewidth=line_width / 3.0, color='b')
if show_orientation:
# Compute orientation 3D
orientation = compute_orientation_3d(obj, p)
if orientation is not None:
x = np.append(orientation[0, ], orientation[0, ])
y = np.append(orientation[1, ], orientation[1, ])
# draw the boxes
ax.plot(x, y, linewidth=4, color='w')
ax.plot(x, y, linewidth=2, color='k')
def main(BACKEND, image, points, calib, idx):
"""This demo shows RPN proposals and SECOND predictions in 3D
and 2D in image space. Given certain thresholds for proposals
and predictions, it selects and draws the bounding boxes on
the image sample. It goes through the entire proposal and
prediction samples for the given dataset split.
The proposals, overlaid, and prediction images can be toggled on or off
separately in the options section.
The prediction score and IoU with ground truth can be toggled on or off
as well, shown as (score, IoU) above the detection.
"""
annos = inference(BACKEND, points, calib, image.shape[:2], idx)
pred_objects = [ObjectLabel() for prediction in annos["labels"]]
# Fail if only one object?
for i in range(len(pred_objects)):
obj = pred_objects[i]
obj.type = annos["labels"][i]
obj.truncation = 0 # Not needed
obj.occlusion = 0 # Not needed
obj.alpha = annos["alpha"][i] # Not needed
obj.x1, obj.y1, obj.x2, obj.y2 = annos["bbox"][i]
#obj.w, obj.l, obj.h = annos["dims"][i] # Different order from Kitti
obj.l, obj.h, obj.w = annos["dims"][i]
obj.t = annos["locs"][i]
#loc = annos["locs"][i]
#obj.t = (-loc[1], -loc[2] + obj.h/2, loc[0]) # Seems to be in lidar format initially with centroid not on ground plane
obj.ry = -annos["rots"][i][2] # Only value not 0, negative seems more correct
#prop_fig, prop_2d_axes, prop_3d_axes = visualization(image, display=False)
prop_fig, prop_3d_axes = visualization_single_plot(image, display=False)
draw_predictions(pred_objects, None, prop_3d_axes, calib["P2"][:3])
out_name = "/notebooks/second_output/images/%06d.png" % idx
plt.savefig(out_name)
plt.close(prop_fig)
def draw_predictions(objects, prop_2d_axes, prop_3d_axes, p_matrix):
# Draw filtered ground truth boxes
for obj in objects:
# Draw 2D boxes
#draw_box_2d(prop_2d_axes, obj, color_tm='r')
# Draw 3D boxes
draw_box_3d(prop_3d_axes, obj, p_matrix,
show_orientation=False,
color_table=['r', 'y', 'r', 'w'],
line_width=2,
double_line=False,
box_color=BOX_COLOUR_SCHEME[obj.type] if obj.type in gt_classes else None)
def read_calibration(path):
"""Reads in Calibration file from Kitti Dataset.
Keyword Arguments:
------------------
calib_dir : Str
Directory of the calibration files.
img_idx : Int
Index of the image.
cam : Int
Camera used from 0-3.
Returns:
--------
frame_calibration_info : FrameCalibrationData
Contains a frame's full calibration data.
"""
calib = dict()
data_file = open(path, 'r')
data_reader = csv.reader(data_file, delimiter=' ')
data = []
for row in data_reader:
data.append(row)
data_file.close()
p_all = []
for i in range(4):
p = data[i]
p = p[1:]
p = [float(p[i]) for i in range(len(p))]
p = np.reshape(p, (3, 4))
p = np.vstack((p, [0, 0, 0, 1]))
p_all.append(p)
calib["P2"] = p_all[2]
# Read in rectification matrix
tr_rect = data[4]
tr_rect = tr_rect[1:]
tr_rect = [float(tr_rect[i]) for i in range(len(tr_rect))]
tr_rect = np.reshape(tr_rect, (3, 3))
tr_rect = np.insert(tr_rect, 3, 0, axis=1)
calib["R0_rect"] = np.vstack((tr_rect, [0, 0, 0, 1]))
# Read in velodyne to cam matrix
tr_v2c = data[5]
tr_v2c = tr_v2c[1:]
tr_v2c = [float(tr_v2c[i]) for i in range(len(tr_v2c))]
tr_v2c = np.reshape(tr_v2c, (3, 4))
calib['Tr_velo_to_cam'] = np.vstack((tr_v2c, [0, 0, 0, 1]))
return calib
if __name__ == '__main__':
BACKEND = SecondBackend()
BACKEND.checkpoint_path = "/notebooks/second_models/carla_carped_finetune_v2/voxelnet-72000.tckpt"
BACKEND.config_path = "/notebooks/second_models/carla_carped_finetune_v2/pipeline.config"
build_network(BACKEND)
for idx in range(0, 5709, 20):
#for idx in range(980, 981):
filename = "%06d" % idx
dataset = "Arctic"
# Loop below on more images
image = np.array(Image.open("/notebooks/DATA/" + dataset + "/object/testing/image_2/" + filename + ".png"), dtype=np.uint8)
v_path = "/notebooks/DATA/" + dataset + "/object/testing/velodyne/" + filename + ".bin"
num_features = 4
points = np.fromfile(str(v_path), dtype=np.float32, count=-1).reshape([-1, num_features])
calib = read_calibration("/notebooks/DATA/" + dataset + "/object/testing/calib/" + filename + ".txt")
main(BACKEND, image, points, calib, idx)
print(idx, '/', 5708)
| [
"online_inference.build_network",
"online_inference.SecondBackend",
"numpy.array",
"numpy.sin",
"numpy.arange",
"numpy.reshape",
"numpy.delete",
"matplotlib.pyplot.close",
"numpy.dot",
"numpy.vstack",
"csv.reader",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.cos",
"matplotlib.pyplo... | [((2935, 2984), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': 'fig_size', 'sharex': '(True)'}), '(2, 1, figsize=fig_size, sharex=True)\n', (2947, 2984), True, 'import matplotlib.pyplot as plt\n'), ((3740, 3792), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'fig_size', 'facecolor': '"""black"""'}), "(1, figsize=fig_size, facecolor='black')\n", (3752, 3792), True, 'import matplotlib.pyplot as plt\n'), ((4668, 4691), 'numpy.delete', 'np.delete', (['pts_2d', '(2)', '(0)'], {}), '(pts_2d, 2, 0)\n', (4677, 4691), True, 'import numpy as np\n'), ((5340, 5410), 'numpy.array', 'np.array', (['[l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]'], {}), '([l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2])\n', (5348, 5410), True, 'import numpy as np\n'), ((5436, 5474), 'numpy.array', 'np.array', (['[0, 0, 0, 0, -h, -h, -h, -h]'], {}), '([0, 0, 0, 0, -h, -h, -h, -h])\n', (5444, 5474), True, 'import numpy as np\n'), ((5491, 5561), 'numpy.array', 'np.array', (['[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]'], {}), '([w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2])\n', (5499, 5561), True, 'import numpy as np\n'), ((7045, 7071), 'numpy.dot', 'np.dot', (['rot', 'orientation3d'], {}), '(rot, orientation3d)\n', (7051, 7071), True, 'import numpy as np\n'), ((7318, 7351), 'numpy.arange', 'np.arange', (['orientation3d.shape[1]'], {}), '(orientation3d.shape[1])\n', (7327, 7351), True, 'import numpy as np\n'), ((7767, 7891), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(obj.x1, obj.y1)', '(obj.x2 - obj.x1)', '(obj.y2 - obj.y1)'], {'linewidth': '(2)', 'edgecolor': 'color_tm', 'facecolor': '"""none"""'}), "((obj.x1, obj.y1), obj.x2 - obj.x1, obj.y2 - obj.y1,\n linewidth=2, edgecolor=color_tm, facecolor='none')\n", (7784, 7891), True, 'import matplotlib.patches as patches\n'), ((11104, 11159), 'online_inference.inference', 'inference', (['BACKEND', 'points', 'calib', 'image.shape[:2]', 'idx'], {}), '(BACKEND, points, calib, image.shape[:2], idx)\n', (11113, 11159), False, 'from online_inference import SecondBackend, build_network, inference\n'), ((12266, 12287), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_name'], {}), '(out_name)\n', (12277, 12287), True, 'import matplotlib.pyplot as plt\n'), ((12292, 12311), 'matplotlib.pyplot.close', 'plt.close', (['prop_fig'], {}), '(prop_fig)\n', (12301, 12311), True, 'import matplotlib.pyplot as plt\n'), ((13442, 13478), 'csv.reader', 'csv.reader', (['data_file'], {'delimiter': '""" """'}), "(data_file, delimiter=' ')\n", (13452, 13478), False, 'import csv\n'), ((13985, 14012), 'numpy.reshape', 'np.reshape', (['tr_rect', '(3, 3)'], {}), '(tr_rect, (3, 3))\n', (13995, 14012), True, 'import numpy as np\n'), ((14027, 14059), 'numpy.insert', 'np.insert', (['tr_rect', '(3)', '(0)'], {'axis': '(1)'}), '(tr_rect, 3, 0, axis=1)\n', (14036, 14059), True, 'import numpy as np\n'), ((14083, 14117), 'numpy.vstack', 'np.vstack', (['(tr_rect, [0, 0, 0, 1])'], {}), '((tr_rect, [0, 0, 0, 1]))\n', (14092, 14117), True, 'import numpy as np\n'), ((14274, 14300), 'numpy.reshape', 'np.reshape', (['tr_v2c', '(3, 4)'], {}), '(tr_v2c, (3, 4))\n', (14284, 14300), True, 'import numpy as np\n'), ((14331, 14364), 'numpy.vstack', 'np.vstack', (['(tr_v2c, [0, 0, 0, 1])'], {}), '((tr_v2c, [0, 0, 0, 1]))\n', (14340, 14364), True, 'import numpy as np\n'), ((14426, 14441), 'online_inference.SecondBackend', 'SecondBackend', ([], {}), '()\n', (14439, 14441), False, 'from online_inference import SecondBackend, build_network, inference\n'), ((14643, 14665), 'online_inference.build_network', 'build_network', (['BACKEND'], {}), '(BACKEND)\n', (14656, 14665), False, 'from online_inference import SecondBackend, build_network, inference\n'), ((3216, 3237), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3224, 3237), True, 'import matplotlib.pyplot as plt\n'), ((4089, 4110), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (4097, 4110), True, 'import matplotlib.pyplot as plt\n'), ((5601, 5644), 'numpy.array', 'np.array', (['[x_corners, y_corners, z_corners]'], {}), '([x_corners, y_corners, z_corners])\n', (5609, 5644), True, 'import numpy as np\n'), ((13709, 13730), 'numpy.reshape', 'np.reshape', (['p', '(3, 4)'], {}), '(p, (3, 4))\n', (13719, 13730), True, 'import numpy as np\n'), ((13743, 13771), 'numpy.vstack', 'np.vstack', (['(p, [0, 0, 0, 1])'], {}), '((p, [0, 0, 0, 1]))\n', (13752, 13771), True, 'import numpy as np\n'), ((4482, 4516), 'numpy.ones', 'np.ones', (['(1, point_cloud.shape[1])'], {}), '((1, point_cloud.shape[1]))\n', (4489, 4516), True, 'import numpy as np\n'), ((6304, 6362), 'numpy.array', 'np.array', (['[0, 1, 5, 4, 1, 2, 6, 5, 2, 3, 7, 6, 3, 0, 4, 7]'], {}), '([0, 1, 5, 4, 1, 2, 6, 5, 2, 3, 7, 6, 3, 0, 4, 7])\n', (6312, 6362), True, 'import numpy as np\n'), ((6968, 7010), 'numpy.array', 'np.array', (['[0.0, obj.l, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, obj.l, 0.0, 0.0, 0.0, 0.0])\n', (6976, 7010), True, 'import numpy as np\n'), ((9475, 9538), 'numpy.append', 'np.append', (['corners[0, face_idx[i,]]', 'corners[0, face_idx[i, 0]]'], {}), '(corners[0, face_idx[i,]], corners[0, face_idx[i, 0]])\n', (9484, 9538), True, 'import numpy as np\n'), ((9582, 9645), 'numpy.append', 'np.append', (['corners[1, face_idx[i,]]', 'corners[1, face_idx[i, 0]]'], {}), '(corners[1, face_idx[i,]], corners[1, face_idx[i, 0]])\n', (9591, 9645), True, 'import numpy as np\n'), ((10241, 10284), 'numpy.append', 'np.append', (['orientation[0,]', 'orientation[0,]'], {}), '(orientation[0,], orientation[0,])\n', (10250, 10284), True, 'import numpy as np\n'), ((10303, 10346), 'numpy.append', 'np.append', (['orientation[1,]', 'orientation[1,]'], {}), '(orientation[1,], orientation[1,])\n', (10312, 10346), True, 'import numpy as np\n'), ((14856, 14949), 'PIL.Image.open', 'Image.open', (["('/notebooks/DATA/' + dataset + '/object/testing/image_2/' + filename + '.png')"], {}), "('/notebooks/DATA/' + dataset + '/object/testing/image_2/' +\n filename + '.png')\n", (14866, 14949), False, 'from PIL import Image\n'), ((5069, 5092), 'numpy.cos', 'np.cos', (['object_label.ry'], {}), '(object_label.ry)\n', (5075, 5092), True, 'import numpy as np\n'), ((5098, 5121), 'numpy.sin', 'np.sin', (['object_label.ry'], {}), '(object_label.ry)\n', (5104, 5121), True, 'import numpy as np\n'), ((5177, 5200), 'numpy.sin', 'np.sin', (['object_label.ry'], {}), '(object_label.ry)\n', (5183, 5200), True, 'import numpy as np\n'), ((5206, 5229), 'numpy.cos', 'np.cos', (['object_label.ry'], {}), '(object_label.ry)\n', (5212, 5229), True, 'import numpy as np\n'), ((6819, 6833), 'numpy.cos', 'np.cos', (['obj.ry'], {}), '(obj.ry)\n', (6825, 6833), True, 'import numpy as np\n'), ((6839, 6853), 'numpy.sin', 'np.sin', (['obj.ry'], {}), '(obj.ry)\n', (6845, 6853), True, 'import numpy as np\n'), ((6909, 6923), 'numpy.sin', 'np.sin', (['obj.ry'], {}), '(obj.ry)\n', (6915, 6923), True, 'import numpy as np\n'), ((6929, 6943), 'numpy.cos', 'np.cos', (['obj.ry'], {}), '(obj.ry)\n', (6935, 6943), True, 'import numpy as np\n')] |
import random
import logging
import numpy as np
import math
from baseline.utils import export
__all__ = []
exporter = export(__all__)
logger = logging.getLogger('baseline')
@exporter
class DataFeed(object):
"""Data collection that, when iterated, produces an epoch of data
This class manages producing a dataset to the trainer, by iterating an epoch and producing
a single step at a time. The data can be shuffled per epoch, if requested, otherwise it is
returned in the order of the dateset
"""
def __init__(self):
self.steps = 0
self.shuffle = False
def _batch(self, i):
pass
def __getitem__(self, i):
return self._batch(i)
def __iter__(self):
shuffle = np.random.permutation(np.arange(self.steps)) if self.shuffle else np.arange(self.steps)
for i in range(self.steps):
si = shuffle[i]
yield self._batch(si)
def __len__(self):
return self.steps
@exporter
class ExampleDataFeed(DataFeed):
"""Abstract base class that works on a list of examples
"""
def __init__(self, examples, batchsz, **kwargs):
"""Constructor from a list of examples
Use the examples requested to provide data. Options for batching and shuffling are supported,
along with some optional processing function pointers
:param examples: A list of examples
:param batchsz: Batch size per step
:param kwargs: See below
:Keyword Arguments:
* *shuffle* -- Shuffle the data per epoch? Defaults to `False`
* *vec_alloc* -- Allocate a new tensor. Defaults to ``numpy.zeros``
* *vec_shape* -- Function to retrieve tensor shape. Defaults to ``numpy.shape``
* *trim* -- Trim batches to the maximum length seen in the batch (defaults to `False`)
This can lead to batches being shorter than the maximum length provided to the system.
Not supported in all frameworks.
* *src_vec_trans* -- A transform function to use on the source tensor (`None`)
* *truncate* -- bool, If true the datastream will be cut short when
a full batch cannot be made, otherwise the final batch is smaller
than normal batches.
"""
super(ExampleDataFeed, self).__init__()
self.examples = examples
self.batchsz = batchsz
self.shuffle = bool(kwargs.get('shuffle', False))
self.truncate = bool(kwargs.get('truncate', False))
if self.truncate:
self.steps = int(math.floor(len(self.examples) / float(batchsz)))
else:
self.steps = (len(self.examples) + batchsz - 1) // batchsz
self.trim = bool(kwargs.get('trim', False))
def _batch(self, i):
"""
Get a batch of data at step `i`
:param i: (``int``) step index
:return: A batch tensor x, batch tensor y
"""
batch = self.examples.batch(i, self.batchsz, trim=self.trim)
return batch
@exporter
class DictExamples(object):
"""This object holds a list of dictionaries, and knows how to shuffle, sort and batch them
"""
def __init__(self, example_list, do_shuffle=True, sort_key=None):
"""Constructor
:param example_list: A list of examples
:param do_shuffle: (``bool``) Shuffle the data? Defaults to `True`
:param do_sort: (``bool``) Sort the data. Defaults to `True`
"""
self.example_list = example_list
if do_shuffle:
random.shuffle(self.example_list)
if sort_key is not None:
self.example_list = sorted(self.example_list, key=lambda x: x[sort_key])
self.sort_key = sort_key
def __getitem__(self, i):
"""Get a single example
:param i: (``int``) simple index
:return: an example
"""
return self.example_list[i]
def __len__(self):
"""Number of examples
:return: (``int``) length of data
"""
return len(self.example_list)
def _trim_batch(self, batch, keys, max_src_len):
if max_src_len == 0:
return batch
for k in keys:
if len(batch[k].shape) == 3:
batch[k] = batch[k][:, 0:max_src_len, :]
elif len(batch[k].shape) == 2:
batch[k] = batch[k][:, :max_src_len]
return batch
def batch(self, start, batchsz, trim=False):
"""Get a batch of data
:param start: (``int``) The step index
:param batchsz: (``int``) The batch size
:param trim: (``bool``) Trim to maximum length in a batch
:return batched dictionary
"""
ex = self.example_list[start]
keys = ex.keys()
batch = {}
for k in keys:
batch[k] = []
sz = len(self.example_list)
idx = start * batchsz
max_src_len = 0
for i in range(batchsz):
if idx >= sz:
break
ex = self.example_list[idx]
for k in keys:
batch[k].append(ex[k])
# Trim all batches along the sort_key if it exists
if trim and self.sort_key is not None:
max_src_len = max(max_src_len, ex[self.sort_key])
idx += 1
for k in keys:
batch[k] = np.stack(batch[k])
return self._trim_batch(batch, keys, max_src_len) if trim else batch
@exporter
class Seq2SeqExamples(object):
"""Paired training examples
"""
def __init__(self, example_list, do_shuffle=True, src_sort_key=None):
"""Constructor
:param example_list: Training pair examples
:param do_shuffle: Shuffle the data (defaults to `True`)
:param do_sort: Sort the data (defaults to `True`)
"""
self.example_list = example_list
if do_shuffle:
random.shuffle(self.example_list)
if src_sort_key is not None:
self.example_list = sorted(self.example_list, key=lambda x: x[src_sort_key])
self.src_sort_key = src_sort_key
def __getitem__(self, i):
"""Get `ith` example
:param i: (``int``) index of example
:return: example dict
"""
return self.example_list[i]
def __len__(self):
return len(self.example_list)
def _trim_batch(self, batch, max_src_len, max_tgt_len):
for k in batch.keys():
max_len = max_src_len
if k == 'tgt':
max_len = max_tgt_len
if max_len == 0:
continue
if len(batch[k].shape) == 3:
batch[k] = batch[k][:, 0:max_len, :]
elif len(batch[k].shape) == 2:
batch[k] = batch[k][:, :max_len]
return batch
def batch(self, start, batchsz, trim=False):
"""Get a batch of data
:param start: (``int``) The step index
:param batchsz: (``int``) The batch size
:param trim: (``bool``) Trim to maximum length in a batch
:param vec_alloc: A vector allocator
:param vec_shape: A vector shape function
:return: batched `x` word vector, `x` character vector, batched `y` vector, `length` vector, `ids`
"""
ex = self.example_list[start]
keys = ex.keys()
batch = {}
for k in keys:
batch[k] = []
sz = len(self.example_list)
idx = start * batchsz
max_src_len = 0
max_tgt_len = 0
for i in range(batchsz):
if idx >= sz:
break
ex = self.example_list[idx]
for k in keys:
batch[k].append(ex[k])
# Trim all batches along the sort_key if it exists
if trim and self.src_sort_key is not None:
max_src_len = max(max_src_len, ex[self.src_sort_key])
if trim:
max_tgt_len = max(max_tgt_len, ex['tgt_lengths'])
idx += 1
for k in keys:
batch[k] = np.stack(batch[k])
return self._trim_batch(batch, max_src_len, max_tgt_len) if trim else batch
# This one is a little different at the moment
@exporter
class SeqWordCharDataFeed(DataFeed):
"""Data feed to return language modeling training data
"""
def __init__(self, examples, nctx, batchsz, tgt_key=None):
"""Constructor
:param examples: word tensor
:param nctx: Number of steps of BPTT
:param batchsz: Batch size
:param tgt_key: Which field to treat as the target key (this will share an embedding vocab with the source)
"""
super(SeqWordCharDataFeed, self).__init__()
self.examples = dict()
# This identifies which vector to use for targets
self.tgt_key = 'x' if tgt_key is None else tgt_key
num_examples = examples['{}_dims'.format(tgt_key)][0]
rest = num_examples // batchsz
self.steps = rest // nctx
# if num_examples is divisible by batchsz * nctx (equivalent to rest is divisible by nctx), we
# have a problem. reduce rest in that case.
if rest % nctx == 0:
rest = rest-1
for k in examples.keys():
if k.endswith('_dims'):
continue
dim_key = '{}_dims'.format(k)
shp = examples[dim_key]
if len(shp) == 2:
width = shp[1]
else:
width = 1
trunc = batchsz * rest * width
vec = examples[k].reshape(-1)[:trunc]
self.examples[k] = vec.reshape((batchsz, rest * width))
logger.info('Truncating %s from %d to %d', k, num_examples, trunc)
self.examples[k].flatten()
self.examples[dim_key] = shp
self.nctx = nctx
self.batchsz = batchsz
def _batch(self, i):
example = {}
for k in self.examples.keys():
if k.endswith('_dims'):
continue
x = self.examples[k]
dims = self.examples['{}_dims'.format(k)]
if len(dims) == 1:
width = 1
else:
width = dims[1]
example[k] = x[:, i*self.nctx * width:(i + 1) * self.nctx * width]
if len(dims) == 1:
reshape_dims = (self.batchsz, self.nctx)
else:
reshape_dims = (self.batchsz, self.nctx, width)
example[k] = example[k].reshape(reshape_dims)
if self.tgt_key == k:
example['y'] = x[:, i*self.nctx * width + 1:(i + 1) * self.nctx * width + 1].reshape(reshape_dims)
return example
#return {
# 'x': self.x[:, i*self.nbptt:(i+1)*self.nbptt].reshape((self.batchsz, self.nbptt)),
# 'xch': self.xch[:, i*self.stride_ch:(i+1)*self.stride_ch].reshape((self.batchsz, self.nbptt, self.wsz)),
# 'y': self.x[:, i*self.nbptt+1:(i+1)*self.nbptt+1].reshape((self.batchsz, self.nbptt))
#}
| [
"logging.getLogger",
"random.shuffle",
"baseline.utils.export",
"numpy.stack",
"numpy.arange"
] | [((119, 134), 'baseline.utils.export', 'export', (['__all__'], {}), '(__all__)\n', (125, 134), False, 'from baseline.utils import export\n'), ((144, 173), 'logging.getLogger', 'logging.getLogger', (['"""baseline"""'], {}), "('baseline')\n", (161, 173), False, 'import logging\n'), ((806, 827), 'numpy.arange', 'np.arange', (['self.steps'], {}), '(self.steps)\n', (815, 827), True, 'import numpy as np\n'), ((3571, 3604), 'random.shuffle', 'random.shuffle', (['self.example_list'], {}), '(self.example_list)\n', (3585, 3604), False, 'import random\n'), ((5384, 5402), 'numpy.stack', 'np.stack', (['batch[k]'], {}), '(batch[k])\n', (5392, 5402), True, 'import numpy as np\n'), ((5926, 5959), 'random.shuffle', 'random.shuffle', (['self.example_list'], {}), '(self.example_list)\n', (5940, 5959), False, 'import random\n'), ((8066, 8084), 'numpy.stack', 'np.stack', (['batch[k]'], {}), '(batch[k])\n', (8074, 8084), True, 'import numpy as np\n'), ((762, 783), 'numpy.arange', 'np.arange', (['self.steps'], {}), '(self.steps)\n', (771, 783), True, 'import numpy as np\n')] |
import sys
sys.path.append("../")
import pickle
import api.leap as leap
import api.register.user.registration as user_reg
import api.leap_fn as leap_fn
import api.codes as codes
import api.local.functions as leap_functions
import random
import argparse
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('num_sites', metavar='n', type=int)
args = parser.parse_args()
leap_fed_learn = leap_fn.FedLearnFunction()
selector = {
"type": codes.DEFAULT,
"useLocalData": True
}
leap_fed_learn.selector = selector
module = leap_functions.resnet
leap_fed_learn.get_model = module.get_model
leap_fed_learn.get_optimizer = module.get_optimizer
leap_fed_learn.get_criterion = module.get_criterion
leap_fed_learn.get_dataloader = module.get_dataloader
random.seed(1)
ids = list(range(1,10001))
random_ids = random.sample(ids, 10000)
train_ids = random_ids[:8000]
val_ids = random_ids[8000:]
sites = np.arange(args.num_sites)
hyperparams = {
"lr": 1e-4,
"d_x": 224, # input dimension
"d_y": 2, # output dimension
"batch_size": 16,
"max_iters": 25,
"iters_per_epoch": 10,
"train_ids": train_ids,
"val_ids": val_ids,
"num_sites": len(sites)
}
leap_fed_learn.hyperparams = hyperparams
fd = open("../certs/myCA.crt", "rb")
root_cert = fd.read()
fd = open("../certs/cloudalgo.key", "rb")
priv_key = fd.read()
fd = open("../certs/cloudalgo.crt", "rb")
cert_chain = fd.read()
#user_reg.register_user("TestUser", "123456", "10.0.0.6:50000", True, priv_key, cert_chain, root_cert, "Coord")
auth_res = user_reg.authenticate_user("TestUser", "123456", "10.0.0.6:50000",
True, priv_key, cert_chain, root_cert, "Coord")
leap = leap.DistributedLeap(leap_fed_learn, "10.0.0.6:50000", auth_res.token,
True, root_cert, priv_key, cert_chain)
result = leap.get_result(sites)
print(result)
| [
"random.sample",
"argparse.ArgumentParser",
"api.leap.DistributedLeap",
"api.leap.get_result",
"random.seed",
"api.leap_fn.FedLearnFunction",
"api.register.user.registration.authenticate_user",
"sys.path.append",
"numpy.arange"
] | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((313, 338), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (336, 338), False, 'import argparse\n'), ((452, 478), 'api.leap_fn.FedLearnFunction', 'leap_fn.FedLearnFunction', ([], {}), '()\n', (476, 478), True, 'import api.leap_fn as leap_fn\n'), ((874, 888), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (885, 888), False, 'import random\n'), ((937, 962), 'random.sample', 'random.sample', (['ids', '(10000)'], {}), '(ids, 10000)\n', (950, 962), False, 'import random\n'), ((1046, 1071), 'numpy.arange', 'np.arange', (['args.num_sites'], {}), '(args.num_sites)\n', (1055, 1071), True, 'import numpy as np\n'), ((1755, 1873), 'api.register.user.registration.authenticate_user', 'user_reg.authenticate_user', (['"""TestUser"""', '"""123456"""', '"""10.0.0.6:50000"""', '(True)', 'priv_key', 'cert_chain', 'root_cert', '"""Coord"""'], {}), "('TestUser', '123456', '10.0.0.6:50000', True,\n priv_key, cert_chain, root_cert, 'Coord')\n", (1781, 1873), True, 'import api.register.user.registration as user_reg\n'), ((1923, 2036), 'api.leap.DistributedLeap', 'leap.DistributedLeap', (['leap_fed_learn', '"""10.0.0.6:50000"""', 'auth_res.token', '(True)', 'root_cert', 'priv_key', 'cert_chain'], {}), "(leap_fed_learn, '10.0.0.6:50000', auth_res.token, True,\n root_cert, priv_key, cert_chain)\n", (1943, 2036), True, 'import api.leap as leap\n'), ((2085, 2107), 'api.leap.get_result', 'leap.get_result', (['sites'], {}), '(sites)\n', (2100, 2107), True, 'import api.leap as leap\n')] |
import socket
import struct
import pickle
import numpy as np
import os
import gym
from stable_baselines3 import DQN
from stable_baselines3.common.callbacks import CheckpointCallback
from stable_baselines3.common.monitor import Monitor
class Connection:
def __init__(self, s):
self._socket = s
self._buffer = bytearray()
def receive_object(self):
while len(self._buffer) < 4 or len(self._buffer) < struct.unpack("<L", self._buffer[:4])[0] + 4:
new_bytes = self._socket.recv(16)
if len(new_bytes) == 0:
return None
self._buffer += new_bytes
length = struct.unpack("<L", self._buffer[:4])[0]
header, body = self._buffer[:4], self._buffer[4:length + 4]
obj = pickle.loads(body)
self._buffer = self._buffer[length + 4:]
return obj
def send_object(self, d):
body = pickle.dumps(d, protocol=2)
header = struct.pack("<L", len(body))
msg = header + body
self._socket.send(msg)
class Env(gym.Env):
def __init__(self, addr):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(addr)
s.listen(1)
clientsocket, address = s.accept()
self._socket = clientsocket
self._conn = Connection(clientsocket)
self.action_space = gym.spaces.Discrete(6) #when you define the new action, change the action_space.
self.observation_space = gym.spaces.Box(low=-2., high=2., shape=(37,)) #when you define a new observation, you should change the number of low,high and shape.
def reset(self):
self._conn.send_object("reset")
msg = self._conn.receive_object()
self.action_space = eval(msg["info"]["action_space"])
self.observation_space = eval(msg["info"]["observation_space"])
return msg["observation"]
def step(self, action):
self._conn.send_object(action.tolist())
msg = self._conn.receive_object()
obs = msg["observation"]
rwd = msg["reward"]
done = msg["done"]
info = msg["info"]
return obs, rwd, done, info
def close(self):
self._conn.send_object("close")
self._socket.close()
addr = ("127.0.0.1", 50710)
env = Monitor(Env(addr))
obs = env.reset()
num_experiments = 0 #give a number for experiments
for i in range(num_experiments):
seed = np.random.randint(0,1000)
log_dir = 'DQN-2D short curve open_100_3000' + '_seed' + str(seed) + '__' + str(i) #you can find the file of experiments in the C:\users\userName folder
os.makedirs(log_dir)
checkpoint_callback = CheckpointCallback(save_freq=100, save_path='./'+str(log_dir),
name_prefix='model')
model = DQN('MlpPolicy', env, learning_rate=3e-3, gamma=0.99, learning_starts=100, train_freq=1, target_update_interval=100, seed=seed, batch_size=64, verbose=1,exploration_fraction=0.2, tensorboard_log="./"+str(log_dir))
#hyperparameters can be adjusted in different projects
model.learn(total_timesteps=3000, log_interval=1, callback=checkpoint_callback)
env.reset()
num_experiments = 0 #give a number for experiments
for i in range(num_experiments):
seed = np.random.randint(0,1000)
log_dir = 'DQN-2D long curve open_100_6000' + '_seed' + str(seed) + '__' + str(i) #you can find the file of experiments in the C:\users\userName folder
os.makedirs(log_dir)
checkpoint_callback = CheckpointCallback(save_freq=100, save_path='./'+str(log_dir),
name_prefix='model')
model = DQN('MlpPolicy', env, learning_rate=3e-3, gamma=0.99, learning_starts=100, train_freq=1, target_update_interval=100, seed=seed, batch_size=64, verbose=1,exploration_fraction=0.2, tensorboard_log="./"+str(log_dir))
#hyperparameters can be adjusted in different projects
model.learn(total_timesteps=6000, log_interval=1, callback=checkpoint_callback)
env.reset()
num_experiments = 0 #give a number for experiments
for i in range(num_experiments):
seed = np.random.randint(0,1000)
log_dir = 'DQN-3D short curve open_100_3000' + '_seed' + str(seed) + '__' + str(i) #you can find the file of experiments in the C:\users\userName folder
os.makedirs(log_dir)
checkpoint_callback = CheckpointCallback(save_freq=100, save_path='./'+str(log_dir),
name_prefix='model')
model = DQN('MlpPolicy', env, learning_rate=3e-3, gamma=0.99, learning_starts=100, train_freq=1, target_update_interval=100, seed=seed, batch_size=64, verbose=1,exploration_fraction=0.2, tensorboard_log="./"+str(log_dir))
#hyperparameters can be adjusted in different projects
model.learn(total_timesteps=3000, log_interval=1, callback=checkpoint_callback)
env.reset()
num_experiments = 0 #give a number for experiments
for i in range(num_experiments):
seed = np.random.randint(0,1000)
log_dir = 'DQN-3D long curve open_100_9000' + '_seed' + str(seed) + '__' + str(i) #you can find the file of experiments in the C:\users\userName folder
os.makedirs(log_dir)
checkpoint_callback = CheckpointCallback(save_freq=100, save_path='./'+str(log_dir),
name_prefix='model')
model = DQN('MlpPolicy', env, learning_rate=3e-3, gamma=0.99, learning_starts=100, train_freq=1, target_update_interval=100, seed=seed, batch_size=64, verbose=1,exploration_fraction=0.1, tensorboard_log="./"+str(log_dir))
#hyperparameters can be adjusted in different projects
model.learn(total_timesteps=9000, log_interval=1, callback=checkpoint_callback)
env.reset()
num_experiments = 0 #give a number for experiments
for i in range(num_experiments):
seed = np.random.randint(0,1000)
log_dir = 'DQN-3D short curve closed_100_6000' + '_seed' + str(seed) + '__' + str(i) #you can find the file of experiments in the C:\users\userName folder
os.makedirs(log_dir)
checkpoint_callback = CheckpointCallback(save_freq=100, save_path='./'+str(log_dir),
name_prefix='model')
model = DQN('MlpPolicy', env, learning_rate=3e-3, gamma=0.99, learning_starts=100, train_freq=1, target_update_interval=100, seed=seed, batch_size=64, verbose=1,exploration_fraction=0.1, tensorboard_log="./"+str(log_dir))
#hyperparameters can be adjusted in different projects
model.learn(total_timesteps=6000, log_interval=1, callback=checkpoint_callback)
env.reset()
num_experiments = 0 #give a number for experiments
for i in range(num_experiments):
seed = np.random.randint(0,1000)
log_dir = 'DQN-3D long curve closed_100_12000' + '_seed' + str(seed) + '__' + str(i) #you can find the file of experiments in the C:\users\userName folder
os.makedirs(log_dir)
checkpoint_callback = CheckpointCallback(save_freq=100, save_path='./'+str(log_dir),
name_prefix='model')
model = DQN('MlpPolicy', env, learning_rate=3e-3, gamma=0.99, learning_starts=100, train_freq=1, target_update_interval=100, seed=seed, batch_size=64, verbose=1,exploration_fraction=0.2, tensorboard_log="./"+str(log_dir))
#hyperparameters can be adjusted in different projects
model.learn(total_timesteps=12000, log_interval=1, callback=checkpoint_callback)
env.reset()
cum_rwd = 0
obs = env.reset()
for i in range(300):
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
print(i, action, reward, done, info)
if done:
obs = env.reset()
print("Return = ", cum_rwd)
cum_rwd = 0
env.close()
| [
"socket.socket",
"os.makedirs",
"pickle.dumps",
"gym.spaces.Discrete",
"gym.spaces.Box",
"numpy.random.randint",
"struct.unpack",
"pickle.loads"
] | [((2396, 2422), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (2413, 2422), True, 'import numpy as np\n'), ((2584, 2604), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (2595, 2604), False, 'import os\n'), ((3244, 3270), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (3261, 3270), True, 'import numpy as np\n'), ((3431, 3451), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (3442, 3451), False, 'import os\n'), ((4091, 4117), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (4108, 4117), True, 'import numpy as np\n'), ((4279, 4299), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (4290, 4299), False, 'import os\n'), ((4939, 4965), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (4956, 4965), True, 'import numpy as np\n'), ((5126, 5146), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (5137, 5146), False, 'import os\n'), ((5789, 5815), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (5806, 5815), True, 'import numpy as np\n'), ((5979, 5999), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (5990, 5999), False, 'import os\n'), ((6639, 6665), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (6656, 6665), True, 'import numpy as np\n'), ((6829, 6849), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (6840, 6849), False, 'import os\n'), ((766, 784), 'pickle.loads', 'pickle.loads', (['body'], {}), '(body)\n', (778, 784), False, 'import pickle\n'), ((899, 926), 'pickle.dumps', 'pickle.dumps', (['d'], {'protocol': '(2)'}), '(d, protocol=2)\n', (911, 926), False, 'import pickle\n'), ((1095, 1144), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1108, 1144), False, 'import socket\n'), ((1341, 1363), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(6)'], {}), '(6)\n', (1360, 1363), False, 'import gym\n'), ((1456, 1503), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(-2.0)', 'high': '(2.0)', 'shape': '(37,)'}), '(low=-2.0, high=2.0, shape=(37,))\n', (1470, 1503), False, 'import gym\n'), ((643, 680), 'struct.unpack', 'struct.unpack', (['"""<L"""', 'self._buffer[:4]'], {}), "('<L', self._buffer[:4])\n", (656, 680), False, 'import struct\n'), ((432, 469), 'struct.unpack', 'struct.unpack', (['"""<L"""', 'self._buffer[:4]'], {}), "('<L', self._buffer[:4])\n", (445, 469), False, 'import struct\n')] |
import sys
from PyQt5.QtWidgets import (QWidget, QApplication, QComboBox, QHBoxLayout,
QLabel, QPushButton, QTextEdit,
QVBoxLayout, QSlider, QDesktopWidget, QMainWindow)
from PyQt5.QtCore import QTimer, QTime, QCoreApplication, Qt
from PyQt5.QtGui import QFont
from davisinteractive.metrics import batched_jaccard, batched_f_measure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import csv
import random
from datetime import datetime
from davisinteractive.utils.visualization import overlay_mask, _pascal_color_map
from libs import utils_custom
from PIL import Image, ImageFont, ImageDraw
class App(QWidget):
def __init__(self, DIE, model, root, video_indices, save_imgs=False):
super().__init__()
self.DIE = DIE
self.model = model
self.root = root
sequence_list = DIE.videos
self.sequence_list = sequence_list
self.video_indices = video_indices
self.video_idx = 0
if self.video_indices is not None:
self.video_idx = self.video_indices
self.sequence = sequence_list[self.video_idx]
print(str(str(self.video_idx) + self.sequence))
self.frames = utils_custom.load_frames(os.path.join(root, 'JPEGImages', '480p', self.sequence)) # f h w 3
self.num_frames, self.height, self.width = self.frames.shape[:3]
self.vis_frames = self.frames.copy()
self.gts_overlayed = self.frames.copy()
self.gts = utils_custom.load_gts_multi(os.path.join(root, 'Annotations', '480p', self.sequence))
for fr in range(self.num_frames):
self.gts_overlayed[fr] = overlay_mask(self.frames[fr], self.gts[fr], alpha=0.4, contour_thickness=2)
self.cmap = _pascal_color_map()
self.n_obj = self.gts.max()
self._palette = Image.open('etc/00000.png').getpalette()
# font declare
font_helveltica = 'etc/fonts/helvetica.ttf'
self.selected_font = ImageFont.truetype(font_helveltica, size=20)
# init model
self.model.init_with_new_video(self.frames, self.n_obj)
self.current_object = 1
# Other variables
self.first_scr = None
self.current_round = 0
self.scribble_timesteps = []
self.operate_timesteps = []
self.finding_timesteps = []
self.VOS_once_executed_bool = False
self.not_started = True
self.after_candidates_decided = True
self.candidate_frames = []
self.text_print = ''
self.save_imgs = save_imgs
# window settings
self.setWindowTitle('Demo: CVPR2021_GIS-RAmap')
self.setGeometry(100, 100, int(self.width*1.2)+800, (int(self.height*1.2)+200))
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
self.show()
# object buttons
self.obj1_button = QPushButton('\nAnnotate \nobject 1 [1]\n')
self.obj1_button.clicked.connect(self.obj1_pressed)
self.obj1_button.setMaximumHeight(80)
self.obj1_button.setStyleSheet("background-color: red")
self.obj1_button.setCheckable(True)
self.obj1_button.setShortcut('1')
self.obj2_button = QPushButton('\nAnnotate \nobject 2 [2]\n')
self.obj2_button.clicked.connect(self.obj2_pressed)
self.obj2_button.setMaximumHeight(80)
self.obj2_button.setStyleSheet("background-color: green")
self.obj2_button.setCheckable(True)
self.obj3_button = QPushButton('\nAnnotate \nobject 3 [3]\n')
self.obj3_button.clicked.connect(self.obj3_pressed)
self.obj3_button.setMaximumHeight(80)
self.obj3_button.setStyleSheet("background-color: yellow")
self.obj3_button.setCheckable(True)
self.obj4_button = QPushButton('\nAnnotate \nobject 4 [4]\n')
self.obj4_button.clicked.connect(self.obj4_pressed)
self.obj4_button.setMaximumHeight(80)
self.obj4_button.setStyleSheet("background-color: blue")
self.obj4_button.setCheckable(True)
self.obj5_button = QPushButton('\nAnnotate \nobject 5 [5]\n')
self.obj5_button.clicked.connect(self.obj5_pressed)
self.obj5_button.setMaximumHeight(80)
self.obj5_button.setStyleSheet("background-color: purple")
self.obj5_button.setCheckable(True)
if self.n_obj>=2:
self.obj2_button.setShortcut('2')
if self.n_obj>=3:
self.obj3_button.setShortcut('3')
if self.n_obj>=4:
self.obj4_button.setShortcut('4')
if self.n_obj>=5:
self.obj5_button.setShortcut('5')
# buttons
self.prev_button = QPushButton('Prev [<-]')
self.prev_button.clicked.connect(self.on_prev)
self.prev_button.setShortcut(Qt.Key_Left)
self.next_button = QPushButton('Next [->]')
self.next_button.clicked.connect(self.on_next)
self.next_button.setShortcut(Qt.Key_Right)
self.play_button = QPushButton('Play [P]')
self.play_button.clicked.connect(self.on_play)
self.play_button.setShortcut('P')
self.restart_button = QPushButton('Restart the video')
self.restart_button.clicked.connect(self.restart_video)
self.run_button = QPushButton('Run VOS [R]')
self.run_button.pressed.connect(self.on_run_dschange)
self.run_button.clicked.connect(self.on_run)
self.run_button.setShortcut('R')
self.end_button = QPushButton('Satisfied [S]')
self.end_button.clicked.connect(self.on_end)
self.end_button.setShortcut('S')
self.cand1_button = QPushButton('Candidate A [A]')
self.cand1_button.clicked.connect(self.on_candidateA)
self.cand1_button.setShortcut('A')
self.cand2_button = QPushButton('Candidate B [B]')
self.cand2_button.clicked.connect(self.on_candidateB)
self.cand2_button.setShortcut('B')
self.cand3_button = QPushButton('Candidate C [C]')
self.cand3_button.clicked.connect(self.on_candidateC)
self.cand3_button.setShortcut('C')
self.cand4_button = QPushButton('Candidate D [D]')
self.cand4_button.clicked.connect(self.on_candidateD)
self.cand4_button.setShortcut('D')
# LCD
self.lcd1 = QTextEdit()
self.lcd1.setReadOnly(True)
self.lcd1.setMaximumHeight(28)
self.lcd1.setMaximumWidth(100)
self.lcd1.setText('{: 3d} / {: 3d}'.format(0, self.num_frames-1))
# LCD#2
self.lcd2 = QTextEdit()
self.lcd2.setReadOnly(True)
self.lcd2.setMaximumHeight(28)
self.lcd2.setMaximumWidth(self.width)
self.lcd2.setText('Current round : {:02d}'.format(self.current_round+1))
# LCD#3
self.lcd3 = QTextEdit()
self.lcd3.setReadOnly(True)
self.lcd3.setMaximumHeight(600)
self.lcd3.setMaximumWidth(600)
self.text_print += 'Round [{:02d}]\n'.format(self.current_round+1)
self.lcd3.setText(self.text_print)
# slide
self.slider = QSlider(Qt.Horizontal)
self.slider.setMinimum(0)
self.slider.setMaximum(self.num_frames-1)
self.slider.setValue(0)
self.slider.setTickPosition(QSlider.TicksBelow)
self.slider.setTickInterval(1)
self.slider.valueChanged.connect(self.slide)
# main figure
self.fig1 = plt.Figure()
self.ax1 = plt.Axes(self.fig1, [0., 0., 1., 1.])
self.ax1.set_axis_off()
self.fig1.add_axes(self.ax1)
self.canvas1 = FigureCanvas(self.fig1)
self.cidpress = self.fig1.canvas.mpl_connect('button_press_event', self.on_press)
self.cidrelease = self.fig1.canvas.mpl_connect('button_release_event', self.on_release)
self.cidmotion = self.fig1.canvas.mpl_connect('motion_notify_event', self.on_motion)
# sub figure
self.fig2 = plt.Figure()
self.ax2 = plt.Axes(self.fig2, [0., 0., 1., 1.])
self.ax2.set_axis_off()
self.fig2.add_axes(self.ax2)
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setMaximumHeight(320)
self.canvas2.setMaximumWidth(600)
self.label1 = QLabel('Ground-Truth', self)
self.label1.setAlignment(Qt.AlignCenter)
font1 = self.label1.font()
font1.setPointSize(20)
# object buttons
obj_buttons = QVBoxLayout()
obj_buttons.addSpacing(20)
obj_buttons.addWidget(self.obj1_button)
obj_buttons.addWidget(self.obj2_button)
obj_buttons.addWidget(self.obj3_button)
obj_buttons.addWidget(self.obj4_button)
obj_buttons.addWidget(self.obj5_button)
obj_buttons.addSpacing(20)
# navigator for layout
navi = QHBoxLayout()
navi.addWidget(self.lcd1)
navi.addWidget(self.prev_button)
navi.addWidget(self.play_button)
navi.addWidget(self.next_button)
navi.addStretch(1)
navi.addStretch(1)
navi.addWidget(self.restart_button)
navi.addWidget(self.run_button)
navi.addWidget(self.end_button)
navi2 = QHBoxLayout()
navi2.addWidget(self.cand1_button)
navi2.addWidget(self.cand2_button)
navi3 = QHBoxLayout()
navi3.addWidget(self.cand3_button)
navi3.addWidget(self.cand4_button)
# main layout
layout_main = QVBoxLayout()
layout_main.addWidget(self.canvas1)
layout_main.addWidget(self.slider)
layout_main.addWidget(self.lcd2)
layout_main.addLayout(navi)
layout_main.addLayout(navi2)
layout_main.addLayout(navi3)
layout_main.setStretchFactor(navi, 1)
layout_main.setStretchFactor(self.canvas1, 0)
# sub layout
layout_sub = QVBoxLayout()
layout_sub.addWidget(self.canvas2)
layout_sub.addWidget(self.label1)
layout_sub.addWidget(self.lcd3)
# demo
final_demo = QHBoxLayout()
final_demo.addLayout(obj_buttons)
final_demo.addSpacing(30)
final_demo.addLayout(layout_main)
final_demo.addLayout(layout_sub)
self.setLayout(final_demo)
# timer
self.timer = QTimer()
self.timer.setSingleShot(False)
self.timer.timeout.connect(self.on_time)
# initialize visualize
self.current_mask = np.zeros((self.num_frames, self.height, self.width), dtype=np.uint8)
self.cursur = 0
self.on_showing = None
self.on_showing2 = None
self.show_current()
# initialize action
self.reset_scribbles()
self.pressed = False
self.on_drawing = None
self.drawn_strokes = []
self.obj1_button.setChecked(True)
self.show()
def restart_video(self):
self.__init__(self.DIE, self.model, self.root, self.video_idx)
def show_candidates(self):
sorted_score_idx = np.argsort(self.model.scores_nf)
exclude_range = self.num_frames/10
excluded_next_candidates = []
self.candidate_frames = []
for i in range(self.num_frames):
if not sorted_score_idx[i] in excluded_next_candidates:
self.candidate_frames.append(sorted_score_idx[i])
excluded_next_candidates += list(range(
int(sorted_score_idx[i]-(exclude_range/2)+0.5), int(sorted_score_idx[i]+(exclude_range/2)+0.5)))
if len(self.candidate_frames)==4:
break
self.candidate_frames = sorted(self.candidate_frames)
canvasImg = Image.new('RGB', (self.width, self.height))
draw = ImageDraw.Draw(canvasImg)
cand_Img = Image.fromarray(self.vis_frames[self.candidate_frames[0]]).resize((self.width//2-4, self.height//2-2))
canvasImg.paste(cand_Img, (2, 1))
cand_Img = Image.fromarray(self.vis_frames[self.candidate_frames[1]]).resize((self.width//2-4, self.height//2-2))
canvasImg.paste(cand_Img, (self.width//2 + 2, 1))
cand_Img = Image.fromarray(self.vis_frames[self.candidate_frames[2]]).resize((self.width//2-4, self.height//2-2))
canvasImg.paste(cand_Img, (2, self.height//2 + 1))
cand_Img = Image.fromarray(self.vis_frames[self.candidate_frames[3]]).resize((self.width//2-4, self.height//2-2))
canvasImg.paste(cand_Img, (self.width//2 + 2, self.height//2 + 1))
draw.multiline_text((5, 5), 'Candidate A: Fr{:03d}'.format(self.candidate_frames[0]),
fill=(255, 255, 255, 255), font=self.selected_font, spacing=1.5, align="right")
draw.multiline_text((5 + self.width//2, 5), 'Candidate B: Fr{:03d}'.format(self.candidate_frames[1]),
fill=(255, 255, 255, 255), font=self.selected_font, spacing=1.5, align="right")
draw.multiline_text((5, 5 + self.height//2), 'Candidate C: Fr{:03d}'.format(self.candidate_frames[2]),
fill=(255, 255, 255, 255), font=self.selected_font, spacing=1.5, align="right")
draw.multiline_text((5 + self.width//2, 5 + self.height//2), 'Candidate D: Fr{:03d}'.format(self.candidate_frames[3]),
fill=(255, 255, 255, 255), font=self.selected_font, spacing=1.5, align="right")
vis_candidates = np.array(canvasImg)
if self.on_showing:
self.on_showing.remove()
self.on_showing2.remove()
self.on_showing = self.ax1.imshow(vis_candidates)
def show_candidates_gt(self):
canvasImg2 = Image.new('RGB', (self.width, self.height))
cand_Img = Image.fromarray(self.gts_overlayed[self.candidate_frames[0]]).resize((self.width//2-4, self.height//2-2))
canvasImg2.paste(cand_Img, (2, 1))
cand_Img = Image.fromarray(self.gts_overlayed[self.candidate_frames[1]]).resize((self.width//2-4, self.height//2-2))
canvasImg2.paste(cand_Img, (self.width//2 + 2, 1))
cand_Img = Image.fromarray(self.gts_overlayed[self.candidate_frames[2]]).resize((self.width//2-4, self.height//2-2))
canvasImg2.paste(cand_Img, (2, self.height//2 + 1))
cand_Img = Image.fromarray(self.gts_overlayed[self.candidate_frames[3]]).resize((self.width//2-4, self.height//2-2))
canvasImg2.paste(cand_Img, (self.width//2 + 2, self.height//2 + 1))
vis_candidates_gt = np.array(canvasImg2)
self.on_showing2 = self.ax2.imshow(vis_candidates_gt)
def show_current(self):
if self.on_showing:
self.on_showing.remove()
self.on_showing2.remove()
self.on_showing = self.ax1.imshow(self.vis_frames[self.cursur])
self.on_showing2 = self.ax2.imshow(self.gts_overlayed[self.cursur])
self.canvas1.draw()
self.canvas2.draw()
self.lcd1.setText('{: 3d} / {: 3d}'.format(self.cursur, self.num_frames-1))
self.slider.setValue(self.cursur)
def show_current_anno(self):
viz = overlay_mask(self.frames[self.cursur], self.current_mask[self.cursur], alpha=0.5, contour_thickness=2)
if self.on_showing:
self.on_showing.remove()
self.on_showing2.remove()
self.on_showing = self.ax1.imshow(viz)
self.on_showing2 = self.ax2.imshow(self.gts_overlayed[self.cursur])
self.canvas1.draw()
self.canvas2.draw()
self.lcd1.setText('{: 3d} / {: 3d}'.format(self.cursur, self.num_frames - 1))
self.slider.setValue(self.cursur)
def reset_scribbles(self):
self.scribbles = {}
self.scribbles['scribbles'] = [[] for _ in range(self.num_frames)]
self.scribbles['sequence'] = self.sequence
def clear_strokes(self):
# clear drawn scribbles
if len(self.drawn_strokes) > 0:
for line in self.drawn_strokes:
if line is not None:
line.pop(0).remove()
self.drawn_strokes= []
self.canvas1.draw()
self.canvas2.draw()
def slide(self):
self.clear_strokes()
self.reset_scribbles()
self.cursur = self.slider.value()
self.show_current()
# print('slide')
def on_candidateA(self):
if len(self.candidate_frames) !=0:
self.finding_timesteps.append(time.time()-self.time_init)
self.cursur = self.candidate_frames[0]
self. after_candidates_decided = True
self.show_current()
def on_candidateB(self):
if len(self.candidate_frames) !=0:
self.finding_timesteps.append(time.time()-self.time_init)
self.cursur = self.candidate_frames[1]
self. after_candidates_decided = True
self.show_current()
def on_candidateC(self):
if len(self.candidate_frames) !=0:
self.finding_timesteps.append(time.time()-self.time_init)
self.cursur = self.candidate_frames[2]
self. after_candidates_decided = True
self.show_current()
def on_candidateD(self):
if len(self.candidate_frames) !=0:
self.finding_timesteps.append(time.time()-self.time_init)
self.cursur = self.candidate_frames[3]
self. after_candidates_decided = True
self.show_current()
def on_run_dschange(self):
if len(self.scribbles['scribbles'][self.cursur])>=1:
self.text_print += 'Running VOS...\n'
self.lcd3.setText(self.text_print)
def on_run(self):
if len(self.scribbles['scribbles'][self.cursur])>=1:
self.scribble_timesteps.append(time.time()-self.time_init)
self.VOS_once_executed_bool = True
self.model.Run_propagation(self.cursur)
self.current_mask = self.model.Get_mask()
self.current_round +=1
print('[Overlaying segmentations...]')
for fr in range(self.num_frames):
self.vis_frames[fr] = overlay_mask(self.frames[fr], self.current_mask[fr], alpha=0.5, contour_thickness=2)
print('[Overlaying Done.] \n')
# clear scribble and reset
self.show_candidates()
self.show_candidates_gt()
self.after_candidates_decided = False
self.reset_scribbles()
self.clear_strokes()
self.lcd2.setText('Current round : {:02d}'.format(self.current_round + 1))
self.text_print += '\nRound [{:02d}]\n'.format(self.current_round+1)
self.operate_timesteps.append(time.time() - self.time_init)
self.slider.setDisabled(True)
self.text_print += 'Finding a unsatisfying frame...\n'
self.lcd3.setText(self.text_print)
def on_end(self):
if self.VOS_once_executed_bool and (len(self.scribbles['scribbles'][self.cursur])==0):
if len(self.finding_timesteps) == (len(self.operate_timesteps)-1):
self.finding_timesteps.append(time.time()-self.time_init)
final_mask = self.model.Get_mask()
final_J = np.average(batched_jaccard(self.gts, final_mask, average_over_objects=False), axis=0) # n_obj
final_F = np.average(batched_f_measure(self.gts, final_mask, average_over_objects=False), axis=0) # n_obj
self.DIE.write_in_csv(self.sequence, self.n_obj, final_J, final_F, self.scribble_timesteps, self.operate_timesteps, self.finding_timesteps)
if self.save_imgs:
save_dir = os.path.join('result_video', 'Alg[{}]_{}'.format(self.DIE.algorithm_name,self.DIE.current_time), '{}'.format(self.sequence))
utils_custom.mkdir(save_dir)
for fr_idx in range(self.num_frames):
savefname = os.path.join(save_dir,'{:05d}.png'.format(fr_idx))
tmpPIL = Image.fromarray(final_mask[fr_idx].astype(np.uint8), 'P')
tmpPIL.putpalette(self._palette)
tmpPIL.save(savefname)
if self.video_indices is not None:
QCoreApplication.instance().quit()
def on_prev(self):
self.clear_strokes()
self.reset_scribbles()
self.cursur = max(0, self.cursur-1)
self.show_current()
# print('prev')
def on_next(self):
self.clear_strokes()
self.reset_scribbles()
self.cursur = min(self.cursur+1, self.num_frames-1)
self.show_current()
# print('next ')
def on_time(self):
self.clear_strokes()
self.reset_scribbles()
self.cursur += 1
if self.cursur > self.num_frames-1:
self.cursur = 0
self.show_current()
def on_play(self):
if self.timer.isActive():
self.timer.stop()
else:
self.timer.start(100 / 10)
def on_press(self, event):
if (len(self.finding_timesteps)-len(self.operate_timesteps)==0) and self.after_candidates_decided:
self.slider.setDisabled(True)
if self.not_started:
self.text_print += 'Providing scribble...\n'
self.lcd3.setText(self.text_print)
self.time_init = time.time()
self.not_started = False
if event.xdata and event.ydata:
self.pressed = True
self.stroke = {}
self.stroke['path'] = []
self.stroke['path'].append([event.xdata/self.width, event.ydata/self.height])
if event.button == 1:
self.stroke['object_id'] = self.current_object
else:
self.stroke['object_id'] = 0
self.stroke['start_time'] = time.time()
self.visualize_annotation(event)
def on_motion(self, event):
if (len(self.finding_timesteps)-len(self.operate_timesteps)==0) and self.after_candidates_decided:
self.visualize_annotation(event)
def on_release(self, event):
if (len(self.finding_timesteps)-len(self.operate_timesteps)==0) and self.after_candidates_decided:
self.pressed = False
if event.xdata and event.ydata:
self.stroke['path'].append([event.xdata/self.width, event.ydata/self.height])
self.stroke['end_time'] = time.time()
self.scribbles['annotated_frame'] = self.cursur
self.scribbles['scribbles'][self.cursur].append(self.stroke)
self.drawn_strokes.append(self.on_drawing)
self.on_drawing = None
self.model.Run_interaction(self.scribbles)
self.current_mask[self.cursur] = self.model.Get_mask_index(self.cursur)
self.show_current_anno()
def visualize_annotation(self, event):
if self.pressed and event.xdata and event.ydata:
self.stroke['path'].append([event.xdata/self.width, event.ydata/self.height])
x = [p[0]*self.width for p in self.stroke['path']]
y = [p[1]*self.height for p in self.stroke['path']]
if self.on_drawing:
self.on_drawing.pop(0).remove()
if self.stroke['object_id'] == 0:
self.on_drawing = self.ax1.plot(x,y, marker='o', markersize=4, linewidth=5, color=[0,0,0])
if self.stroke['object_id'] == self.current_object:
self.on_drawing = self.ax1.plot(x,y, marker='o', markersize=4, linewidth=5, color=(self.cmap[self.current_object])/320 +0.2)
self.canvas1.draw()
def obj1_pressed(self):
if self.pressed: self.obj1_button.toggle()
else:
self.current_object = 1
self.obj1_button.setChecked(True), self.obj2_button.setChecked(False), self.obj3_button.setChecked(False)
self.obj4_button.setChecked(False), self.obj5_button.setChecked(False)
def obj2_pressed(self):
if self.pressed: self.obj2_button.toggle()
else:
if self.n_obj>=2:
self.current_object = 2
self.obj1_button.setChecked(False), self.obj2_button.setChecked(True), self.obj3_button.setChecked(False)
self.obj4_button.setChecked(False), self.obj5_button.setChecked(False)
def obj3_pressed(self):
if self.pressed: self.obj3_button.toggle()
else:
if self.n_obj>=3:
self.current_object = 3
self.obj1_button.setChecked(False), self.obj2_button.setChecked(False), self.obj3_button.setChecked(True)
self.obj4_button.setChecked(False), self.obj5_button.setChecked(False)
def obj4_pressed(self):
if self.pressed: self.obj4_button.toggle()
else:
if self.n_obj>=4:
self.current_object = 4
self.obj1_button.setChecked(False), self.obj2_button.setChecked(False), self.obj3_button.setChecked(False)
self.obj4_button.setChecked(True), self.obj5_button.setChecked(False)
def obj5_pressed(self):
if self.pressed: self.obj5_button.toggle()
else:
if self.n_obj>=5:
self.current_object = 5
self.obj1_button.setChecked(False), self.obj2_button.setChecked(False), self.obj3_button.setChecked(False)
self.obj4_button.setChecked(False), self.obj5_button.setChecked(True)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.close() | [
"davisinteractive.metrics.batched_jaccard",
"PIL.Image.new",
"PyQt5.QtCore.QCoreApplication.instance",
"matplotlib.pyplot.Figure",
"libs.utils_custom.mkdir",
"numpy.argsort",
"numpy.array",
"PIL.ImageDraw.Draw",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QTextEdit",
"PIL.ImageFont.truetype"... | [((1867, 1886), 'davisinteractive.utils.visualization._pascal_color_map', '_pascal_color_map', ([], {}), '()\n', (1884, 1886), False, 'from davisinteractive.utils.visualization import overlay_mask, _pascal_color_map\n'), ((2093, 2137), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_helveltica'], {'size': '(20)'}), '(font_helveltica, size=20)\n', (2111, 2137), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((3069, 3112), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""\nAnnotate \nobject 1 [1]\n"""'], {}), '("""\nAnnotate \nobject 1 [1]\n""")\n', (3080, 3112), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((3396, 3439), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""\nAnnotate \nobject 2 [2]\n"""'], {}), '("""\nAnnotate \nobject 2 [2]\n""")\n', (3407, 3439), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((3683, 3726), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""\nAnnotate \nobject 3 [3]\n"""'], {}), '("""\nAnnotate \nobject 3 [3]\n""")\n', (3694, 3726), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((3971, 4014), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""\nAnnotate \nobject 4 [4]\n"""'], {}), '("""\nAnnotate \nobject 4 [4]\n""")\n', (3982, 4014), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((4257, 4300), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""\nAnnotate \nobject 5 [5]\n"""'], {}), '("""\nAnnotate \nobject 5 [5]\n""")\n', (4268, 4300), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((4900, 4924), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Prev [<-]"""'], {}), "('Prev [<-]')\n", (4911, 4924), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((5057, 5081), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Next [->]"""'], {}), "('Next [->]')\n", (5068, 5081), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((5215, 5238), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Play [P]"""'], {}), "('Play [P]')\n", (5226, 5238), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((5366, 5398), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Restart the video"""'], {}), "('Restart the video')\n", (5377, 5398), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((5489, 5515), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Run VOS [R]"""'], {}), "('Run VOS [R]')\n", (5500, 5515), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((5698, 5726), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Satisfied [S]"""'], {}), "('Satisfied [S]')\n", (5709, 5726), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((5850, 5880), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Candidate A [A]"""'], {}), "('Candidate A [A]')\n", (5861, 5880), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((6014, 6044), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Candidate B [B]"""'], {}), "('Candidate B [B]')\n", (6025, 6044), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((6178, 6208), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Candidate C [C]"""'], {}), "('Candidate C [C]')\n", (6189, 6208), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((6342, 6372), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Candidate D [D]"""'], {}), "('Candidate D [D]')\n", (6353, 6372), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((6513, 6524), 'PyQt5.QtWidgets.QTextEdit', 'QTextEdit', ([], {}), '()\n', (6522, 6524), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((6750, 6761), 'PyQt5.QtWidgets.QTextEdit', 'QTextEdit', ([], {}), '()\n', (6759, 6761), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((7001, 7012), 'PyQt5.QtWidgets.QTextEdit', 'QTextEdit', ([], {}), '()\n', (7010, 7012), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((7285, 7307), 'PyQt5.QtWidgets.QSlider', 'QSlider', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (7292, 7307), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((7615, 7627), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (7625, 7627), True, 'import matplotlib.pyplot as plt\n'), ((7647, 7688), 'matplotlib.pyplot.Axes', 'plt.Axes', (['self.fig1', '[0.0, 0.0, 1.0, 1.0]'], {}), '(self.fig1, [0.0, 0.0, 1.0, 1.0])\n', (7655, 7688), True, 'import matplotlib.pyplot as plt\n'), ((7777, 7800), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.fig1'], {}), '(self.fig1)\n', (7789, 7800), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((8123, 8135), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (8133, 8135), True, 'import matplotlib.pyplot as plt\n'), ((8155, 8196), 'matplotlib.pyplot.Axes', 'plt.Axes', (['self.fig2', '[0.0, 0.0, 1.0, 1.0]'], {}), '(self.fig2, [0.0, 0.0, 1.0, 1.0])\n', (8163, 8196), True, 'import matplotlib.pyplot as plt\n'), ((8285, 8308), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.fig2'], {}), '(self.fig2)\n', (8297, 8308), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((8416, 8444), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Ground-Truth"""', 'self'], {}), "('Ground-Truth', self)\n", (8422, 8444), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((8609, 8622), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (8620, 8622), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((8981, 8994), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (8992, 8994), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((9347, 9360), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (9358, 9360), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((9463, 9476), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (9474, 9476), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((9608, 9621), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (9619, 9621), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((10003, 10016), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (10014, 10016), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((10179, 10192), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (10190, 10192), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((10425, 10433), 'PyQt5.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (10431, 10433), False, 'from PyQt5.QtCore import QTimer, QTime, QCoreApplication, Qt\n'), ((10583, 10651), 'numpy.zeros', 'np.zeros', (['(self.num_frames, self.height, self.width)'], {'dtype': 'np.uint8'}), '((self.num_frames, self.height, self.width), dtype=np.uint8)\n', (10591, 10651), True, 'import numpy as np\n'), ((11142, 11174), 'numpy.argsort', 'np.argsort', (['self.model.scores_nf'], {}), '(self.model.scores_nf)\n', (11152, 11174), True, 'import numpy as np\n'), ((11789, 11832), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(self.width, self.height)'], {}), "('RGB', (self.width, self.height))\n", (11798, 11832), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((11848, 11873), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['canvasImg'], {}), '(canvasImg)\n', (11862, 11873), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((13495, 13514), 'numpy.array', 'np.array', (['canvasImg'], {}), '(canvasImg)\n', (13503, 13514), True, 'import numpy as np\n'), ((13732, 13775), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(self.width, self.height)'], {}), "('RGB', (self.width, self.height))\n", (13741, 13775), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((14542, 14562), 'numpy.array', 'np.array', (['canvasImg2'], {}), '(canvasImg2)\n', (14550, 14562), True, 'import numpy as np\n'), ((15137, 15243), 'davisinteractive.utils.visualization.overlay_mask', 'overlay_mask', (['self.frames[self.cursur]', 'self.current_mask[self.cursur]'], {'alpha': '(0.5)', 'contour_thickness': '(2)'}), '(self.frames[self.cursur], self.current_mask[self.cursur],\n alpha=0.5, contour_thickness=2)\n', (15149, 15243), False, 'from davisinteractive.utils.visualization import overlay_mask, _pascal_color_map\n'), ((1353, 1408), 'os.path.join', 'os.path.join', (['root', '"""JPEGImages"""', '"""480p"""', 'self.sequence'], {}), "(root, 'JPEGImages', '480p', self.sequence)\n", (1365, 1408), False, 'import os\n'), ((1634, 1690), 'os.path.join', 'os.path.join', (['root', '"""Annotations"""', '"""480p"""', 'self.sequence'], {}), "(root, 'Annotations', '480p', self.sequence)\n", (1646, 1690), False, 'import os\n'), ((1771, 1846), 'davisinteractive.utils.visualization.overlay_mask', 'overlay_mask', (['self.frames[fr]', 'self.gts[fr]'], {'alpha': '(0.4)', 'contour_thickness': '(2)'}), '(self.frames[fr], self.gts[fr], alpha=0.4, contour_thickness=2)\n', (1783, 1846), False, 'from davisinteractive.utils.visualization import overlay_mask, _pascal_color_map\n'), ((22427, 22438), 'time.time', 'time.time', ([], {}), '()\n', (22436, 22438), False, 'import time\n'), ((1947, 1974), 'PIL.Image.open', 'Image.open', (['"""etc/00000.png"""'], {}), "('etc/00000.png')\n", (1957, 1974), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((11893, 11951), 'PIL.Image.fromarray', 'Image.fromarray', (['self.vis_frames[self.candidate_frames[0]]'], {}), '(self.vis_frames[self.candidate_frames[0]])\n', (11908, 11951), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((12057, 12115), 'PIL.Image.fromarray', 'Image.fromarray', (['self.vis_frames[self.candidate_frames[1]]'], {}), '(self.vis_frames[self.candidate_frames[1]])\n', (12072, 12115), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((12237, 12295), 'PIL.Image.fromarray', 'Image.fromarray', (['self.vis_frames[self.candidate_frames[2]]'], {}), '(self.vis_frames[self.candidate_frames[2]])\n', (12252, 12295), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((12418, 12476), 'PIL.Image.fromarray', 'Image.fromarray', (['self.vis_frames[self.candidate_frames[3]]'], {}), '(self.vis_frames[self.candidate_frames[3]])\n', (12433, 12476), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((13795, 13856), 'PIL.Image.fromarray', 'Image.fromarray', (['self.gts_overlayed[self.candidate_frames[0]]'], {}), '(self.gts_overlayed[self.candidate_frames[0]])\n', (13810, 13856), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((13963, 14024), 'PIL.Image.fromarray', 'Image.fromarray', (['self.gts_overlayed[self.candidate_frames[1]]'], {}), '(self.gts_overlayed[self.candidate_frames[1]])\n', (13978, 14024), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((14147, 14208), 'PIL.Image.fromarray', 'Image.fromarray', (['self.gts_overlayed[self.candidate_frames[2]]'], {}), '(self.gts_overlayed[self.candidate_frames[2]])\n', (14162, 14208), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((14332, 14393), 'PIL.Image.fromarray', 'Image.fromarray', (['self.gts_overlayed[self.candidate_frames[3]]'], {}), '(self.gts_overlayed[self.candidate_frames[3]])\n', (14347, 14393), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((18108, 18196), 'davisinteractive.utils.visualization.overlay_mask', 'overlay_mask', (['self.frames[fr]', 'self.current_mask[fr]'], {'alpha': '(0.5)', 'contour_thickness': '(2)'}), '(self.frames[fr], self.current_mask[fr], alpha=0.5,\n contour_thickness=2)\n', (18120, 18196), False, 'from davisinteractive.utils.visualization import overlay_mask, _pascal_color_map\n'), ((19216, 19281), 'davisinteractive.metrics.batched_jaccard', 'batched_jaccard', (['self.gts', 'final_mask'], {'average_over_objects': '(False)'}), '(self.gts, final_mask, average_over_objects=False)\n', (19231, 19281), False, 'from davisinteractive.metrics import batched_jaccard, batched_f_measure\n'), ((19333, 19400), 'davisinteractive.metrics.batched_f_measure', 'batched_f_measure', (['self.gts', 'final_mask'], {'average_over_objects': '(False)'}), '(self.gts, final_mask, average_over_objects=False)\n', (19350, 19400), False, 'from davisinteractive.metrics import batched_jaccard, batched_f_measure\n'), ((19772, 19800), 'libs.utils_custom.mkdir', 'utils_custom.mkdir', (['save_dir'], {}), '(save_dir)\n', (19790, 19800), False, 'from libs import utils_custom\n'), ((21308, 21319), 'time.time', 'time.time', ([], {}), '()\n', (21317, 21319), False, 'import time\n'), ((21830, 21841), 'time.time', 'time.time', ([], {}), '()\n', (21839, 21841), False, 'import time\n'), ((16452, 16463), 'time.time', 'time.time', ([], {}), '()\n', (16461, 16463), False, 'import time\n'), ((16727, 16738), 'time.time', 'time.time', ([], {}), '()\n', (16736, 16738), False, 'import time\n'), ((17002, 17013), 'time.time', 'time.time', ([], {}), '()\n', (17011, 17013), False, 'import time\n'), ((17277, 17288), 'time.time', 'time.time', ([], {}), '()\n', (17286, 17288), False, 'import time\n'), ((17755, 17766), 'time.time', 'time.time', ([], {}), '()\n', (17764, 17766), False, 'import time\n'), ((18679, 18690), 'time.time', 'time.time', ([], {}), '()\n', (18688, 18690), False, 'import time\n'), ((2892, 2908), 'PyQt5.QtWidgets.QDesktopWidget', 'QDesktopWidget', ([], {}), '()\n', (2906, 2908), False, 'from PyQt5.QtWidgets import QWidget, QApplication, QComboBox, QHBoxLayout, QLabel, QPushButton, QTextEdit, QVBoxLayout, QSlider, QDesktopWidget, QMainWindow\n'), ((19108, 19119), 'time.time', 'time.time', ([], {}), '()\n', (19117, 19119), False, 'import time\n'), ((20187, 20214), 'PyQt5.QtCore.QCoreApplication.instance', 'QCoreApplication.instance', ([], {}), '()\n', (20212, 20214), False, 'from PyQt5.QtCore import QTimer, QTime, QCoreApplication, Qt\n')] |
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import numpy as np
from detectron.core.config import cfg
from detectron.modeling.generate_anchors import generate_anchors
from detectron.utils.c2 import const_fill
from detectron.utils.c2 import gauss_fill
from detectron.utils.net import get_group_gn
import detectron.modeling.ResNet as ResNet
import detectron.utils.blob as blob_utils
import detectron.utils.boxes as box_utils
# Lowest and highest pyramid levels in the backbone network. For FPN, we assume
# that all networks have 5 spatial reductions, each by a factor of 2. Level 1
# would correspond to the input image, hence it does not make sense to use it.
LOWEST_BACKONE_LVL = 2 # E.g., "conv2"-like level
HIGHEST_BACKONE_LVL = 5 # E.g., "conv5"-like level
# FPN with ResNet
def add_fpn_ResNet50_conv5_body(model):
return add_fpn_onto_conv_body(
model, ResNet.add_ResNet50_conv5_body, fpn_level_info_ResNet50_conv5
)
def add_fpn_ResNet50_conv5_P2only_body(model):
return add_fpn_onto_conv_body(
model,
ResNet.add_ResNet50_conv5_body,
fpn_level_info_ResNet50_conv5,
P2only=True
)
def add_fpn_ResNet101_conv5_P2only_body(model):
return add_fpn_onto_conv_body(
model,
ResNet.add_ResNet101_conv5_body,
fpn_level_info_ResNet101_conv5,
P2only=True
)
def add_fpn_ResNet152_conv5_body(model):
return add_fpn_onto_conv_body(
model, ResNet.add_ResNet152_conv5_body, fpn_level_info_ResNet152_conv5
)
def add_fpn_ResNet152_conv5_P2only_body(model):
return add_fpn_onto_conv_body(
model,
ResNet.add_ResNet152_conv5_body,
fpn_level_info_ResNet152_conv5,
P2only=True
)
# Functions for bolting FPN onto a backone architectures
def add_fpn_onto_conv_body(
model, conv_body_func, fpn_level_info_func, P2only=False
):
"""Add the specified conv body to the model and then add FPN levels to it."""
# Note: blobs_conv is in reversed order:[fpn5, fpn4, fpn3, fpn2]
# similarly for dims_conv: [2048, 1024, 512, 256]
# similarly fo spatial_scales_fpn: [1/32, 1/16, 1/8, 1/4]
conv_body_func(model)
blobs_fpn, dim_fpn, spatial_scales_fpn = add_fpn(
model, fpn_level_info_func()
)
if P2only:
return blobs_fpn[-1], dim_fpn, spatial_scales_fpn[-1]
else:
return blobs_fpn, dim_fpn, spatial_scales_fpn
def add_fpn(model, fpn_level_info):
"""Add FPN connections base on the model described in the FPN paper."""
# FPN levels are built starting from the highest/coarest level of the
# backbone (usually "conv5"). First we build down, recursively constructing
# lower/finer resolution FPN levels. Then we build up, constructing levels
# that are even higher/coarser than the starting level.
fpn_dim = cfg.FPN.DIM
min_level, max_level = get_min_max_levels()
num_backbone_stages = (
len(fpn_level_info.blobs) - (min_level - LOWEST_BACKONE_LVL)
)
lateral_input_blobs = fpn_level_info.blobs[:num_backbone_stages]
output_blobs = [
'fpn_inner_{}'.format(s)
for s in fpn_level_info.blobs[:num_backbone_stages]
]
fpn_dim_lateral = fpn_level_info.dims
xavier_fill = ('XavierFill, {}')
# For the coarsest bnackbone level:1x1 conv only seeds recursion
if cfg.FPN.USE_GN:
#
c = model.model.ConGN(
lateral_input_blobs[0],
output_blobs[0], # note :this ia s prefix
dim_in=fpn_dim_lateral[0],
dim_out=fpn_dim,
group_gn=get_group_gn(fpn_dim),
kernel=1,
pad=0,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
output_blobs[0] = c #
else:
model.Conv(
lateral_input_blobs[0],
output_blobs[0],
dim_in=fpn_dim_lateral[0],
dim_out=fpn_dim,
kernel=1,
pad=0,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
#
# Step 1: recursively build down starting from the coarsest backbone level
#
# For other levels add top-down and lateral connections
for i in range(num_backbone_stages - 1):
add_topdown_lateral_module(
model,
output_blobs[i],
lateral_input_blobs[i + 1],
output_blobs[i+ 1],
fpn_dim,
fpn_dim_lateral[i + 1]
)
# Post-hoc scale-specific 3x3 convs
blobs_fpn = []
spatial_scales = []
for i in range(num_backbone_stages):
if cfg.FPN.USE_GN:
# use GroupNorm
fpn_blob = model.ConvGN(
output_blobs[i],
'fpn_{}'.format(fpn_level_info.blobs[i]),
dim_in=fpn_dim,
dim_out=fpn_dim,
group_gn=get_group_gn(fpn_dim),
kernel=3,
pad=1,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
else:
fpn_blob = model.Conv(
output_blobs[i],
'fon_{}'.format(fpn_level_info.blobs[i]),
dim_in=fpn_dim,
dim_out=fpn_dim,
kernel=3,
pad=1,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
blobs_fpn += {fpn_blob}
spatial_scales += [fpn_level_info.spatial_scales[i]]
#
# Step 2: build up starting from the coarsest backbone level
#
# Check if we need the P6 feature map
if not cfg.FPN.EXTRA_CONV_LEVELS and max_level == HIGHEST_BACKONE_LVL + 1:
P6_blob_in = blobs_fpn[0]
P6_name = P6_blob_in + '_subsampled_2x'
# Use max pooling to simulate stride 2 sbusampling
P6_blob = model.MaxPool(P6_blob_in, P6_name, kernel=1, pad=0, stride=2)
blobs_fpn.insert(0, P6_blob)
spatial_scales.insert(0, spatial_scales[0] * 0.5)
# Coarser FPN levels introduced for RetinaNet
if cfg.FPN.EXTRA_CONV_LEVELS and max_level > HIGHEST_BACKONE_LVL:
fpn_blob = fpn_level_info.blobs[0]
dim_in = fpn_level_info.dims[0]
for i in range(HIGHEST_BACKONE_LVL + 1, max_level + 1):
fpn_blob_in = fpn_blob
if i > HIGHEST_BACKONE_LVL + 1:
fpn_blob_in = model.Relu(fpn_blob, fpn_blob + '_relu')
fpn_blob = model.Conv(
fpn_blob_in,
'fpn_' + str(i),
dim_in=dim_in,
dim_out=fpn_dim,
kernel=3,
pad=1,
stride=2,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
dim_in=fpn_dim
blobs_fpn.insert(0, fpn_blob)
spatial_scales.insert(0, spatial_scales[0] * 0.5)
return blobs_fpn, fpn_dim, spatial_scales
def add_topdown_lateral_module(model, fpn_top, fpn_lateral, fpn_bottom, dim_top, dim_lateral):
"""Add a top-down lateral module."""
# Lateral 1x1 conv
if cfg.FPN.USE_GN:
# use GroupNorm
lat = model.ConvGN(
fpn_lateral,
fpn_bottom + '_lateral',
dim_in=dim_lateral,
dim_out=dim_top,
group_gn=get_group_gn(dim_top),
kernel=1,
pad=0,
stride=1,
weight_init=(const_fill(0.0) if cfg.FPN.ZERO_INIT_LATERAL
else ('XavierFill', {})),
bias_init=const_fill(0.0)
)
else:
lat = model.Conv(
fpn_lateral,
fpn_bottom + '_lateral',
dim_in=dim_lateral,
dim_out=dim_top,
kernel=1,
pad=0,
stride=1,
weight_init=(
const_fill(0.0)
if cfg.FPN.ZERO_INIT_LATERAL else ('XavierFill', {})
),
bias_init=const_fill(0.0)
)
# Top-down 2x upsampling
td = model.net.UpsampleNearest(fpn_top, fpn_bottom + '_topdown', scale=2)
# Sum lateral and top-down
model.net.Sum([lat, td], fpn_bottom)
def get_min_max_levels():
"""The min and max FPN levels required for supporting RPN and/or RoI
transform operations on multiple FPN levels."""
min_level = LOWEST_BACKONE_LVL
max_level = HIGHEST_BACKONE_LVL
if cfg.FPN.MULTILEVEL_RPN and not cfg.FPN.MULTILEVEL_ROIS:
max_level = cfg.FPN.RPN_MAX_LEVEL
min_level = cfg.FPN.RPN_MIN_LEVEL
if not cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS:
max_level = cfg.FPN.ROI_MAX_LEVEL
min_level = cfg.FPN.ROI_MIN_LEVEL
return min_level, max_level
# RPN with an FPN backbone
def add_fpn_rpn_outputs(model, blobs_in, dim_in, spatial_scales):
"""Add FPN on FPN specific outputs."""
num_anchors = len(cfg.FPN.RPN_ASPECT_RATIOS)
dim_out = dim_in
k_max = cfg.FPN.RPN_MAX_LEVEL # coa
k_min = cfg.FPN.RPN_MIN_LEVEL
assert len(blobs_in) == k_max - k_min + 1
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order
sc = spatial_scales[k_max - lvl]
slvl = str(lvl)
if lvl ==k_min:
# Create conv ops with randomly initialized weights and
# zeroed biases for the first FPN level; these will be shared by
# all other FPN levels
# RPN hidden representation
conv_rpn_fpn = model.Conv(
bl_in,
'conv_rpn_fpn' + slvl,
dim_in,
dim_out,
kernel=3,
pad=1,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
model.Relu(conv_rpn_fpn, conv_rpn_fpn)
# Proposal classification scores
rpn_cls_logits_fpn = model.Conv(
conv_rpn_fpn,
'rpn_cls_logits_fpn' + slvl,
dim_in,
num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
# Proposal bbox regression deltas
rpn_bbox_pred_fpn = model.Conv(
conv_rpn_fpn,
'rpn_bbox_pred_fpn' + slvl,
dim_in,
4 * num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
else:
# Share weights and biases
sk_min = str(k_min)
# RPN hidden representation
conv_rpn_fpn = model.ConvShared(
bl_in,
'conv_rpn_fpn' + slvl,
dim_in,
dim_out,
kernel=3,
pad=1,
stride=1,
weight='conv_rpn_fpn' + sk_min + '_w',
bias='conv_rpn_fpn' + sk_min + '_b'
)
model.Relu(conv_rpn_fpn, conv_rpn_fpn)
# Proposal classification scores
rpn_cls_logits_fpn = model.ConvShared(
conv_rpn_fpn,
'rpn_cls_logits_fpn' + slvl,
dim_in,
num_anchors,
kernel=1,
pad=0,
stride=1,
weight='rpn_cls_logits_fpn' + sk_min + '_w',
bias='rpn_cls_logits-fpn' + sk_min + '_b'
)
# Proposal bbox regression deltas
rpn_bbox_pred_fpn = model.ConvShared(
conv_rpn_fpn,
'rpn_bbox_pred_fpn' + slvl,
dim_in,
4 * num_anchors,
kernel=1,
pad=0,
stride=1,
weight='rpn_bbox_pred_fpn' + sk_min + '_w',
bias='rpn_bbox_pred_fpn' + sk_min + '_b'
)
if not model.train or cfg.MODEL.FASTER_RCNN:
# Proposals are needed during:
# 1) inference (== not model.train) for RPN only and Faster R-CNN
# OR
# 2) training for Faster R-CNN
# Otherwise (== training for RPN only), proposals are not needed.
lvl_anchors = generate_anchors(
stride=2.**lvl,
sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl -k_min),),
aspect_ratios=cfg.FPN.RPN_ASPECT_RATIOS
)
rpn_cls_probs_fpn = model.net.Sigmoid(
rpn_cls_logits_fpn, 'rpn_cls_probs_fpn' + slvl
)
model.GenerateProposal(
[rpn_cls_probs_fpn, rpn_bbox_pred_fpn, 'im_info'],
['rpn_rois_fpn' + slvl, 'rpn_roi_probs_fpn' + slvl],
anchors=lvl_anchors,
spatial_scales=sc
)
def add_fpn_rpn_losses(model):
"""Add RPN on FPN specific losses."""
loss_gradients = {}
for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
slvl = str(lvl)
# Spatially narrow the full-sized RPN label arrays to match the feature map
# shape
model.net.SpatialNarrowAs(
['rpn_labels_int32_wide_fpn' + slvl, 'rpn_cls_logits_fpn' + slvl],
'rpn_labels_int32_fpn' + slvl
)
for key in ('targets', 'inside_weights', 'outside_weights'):
model.net.SpatialNarrowAs(
[
'rpn_bbox_' + key + '_wide_fpn' +slvl,
'rpn_bbox_pred_fpn' + slvl
],
'rpn_bbox_' + key + '_fpn' + slvl
)
loss_rpn_cls_fpn = model.net.SigmoidCrossEntropyLoss(
['ron_cls_logits_fpn' + slvl, 'rpn_labels_int32_fpn' + slvl],
'loss_rpn_cls_fpn' + slvl,
normalize=0,
scale=(
model.GetLossScale() / cfg.TRAIN.RPN_BATCH_SIZE_PER_IM /
cfg.TRAIN.IMS_PER_BATCH
)
)
# Normalization by (1) RPN_BATCH_SIZE_PER_IM and (2) IMS_PER_BATCH is
# handled by (1) setting bbox outside weights and (2) SmoothL1Loss
# normalizes by IMS_PER_BATCH
loss_rpn_bbox_fpn = model.net.SmoothL1Loss(
[
'rpn_bbox_pred_fpn' + slvl, 'rpn_bbox_targets_fpn' + slvl,
'rpn_bbox_inside_weights_fpn' + slvl,
'rpn_bbox_outside_weights_fpn' + slvl
],
'loss_rpn_bbox_fpn' + slvl,
beta=1. / 9.,
scale=model.GetLossScale()
)
loss_gradients.update(
blob_utils.get_loss_gradients(model, [loss_rpn_cls_fpn, loss_rpn_bbox_fpn])
)
model.AddLosses(['loss_rpn_cls_fpn' + slvl, 'loss_rpn_bbox_fpn' + slvl])
return loss_gradients
# Helper functions for working with multilevel FPN RoIs
def map_rois_to_fpn_levels(rois, k_min, k_max):
# Compute level ids
s = np.sqrt(box_utils.boxes_area(rois))
s0 = cfg.FPN.ROI_CANONICAL_SCALE #
lvl0 = cfg.FPN.ROI_CANONICAL_level
#
target_lvls = np.floor(lvl0 + np.log2(s /s0 + 1e-6))
target_lvls = np.clip(target_lvls, k_min, k_max)
return target_lvls
def add_multilevel_roi_blobs(
blobs, blob_prefix, rois, target_lvls, lvl_min, lvl_max):
"""Add RoI blobs for multiple FPN levels to the blobs dict."""
rois_idx_order = np.empty((0, ))
rois_stacked = np.zeros((0, 5), dtype=np.float32)
for lvl in range(lvl_min, lvl_max + 1):
idx_lvl = np.where(target_lvls == lvl)[0]
blobs[blob_prefix + '_fpn' + str(lvl)] = rois[idx_lvl, :]
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_stacked = np.vstack(
[rois_stacked, blobs[blob_prefix + '_fpn' + str(lvl)]]
)
rois_idx_restore = np.argsort(rois_idx_order).astype(np.int32, copy=False)
blobs[blob_prefix + '_idx_restore_int32'] = rois_idx_restore
# Sanity check that restore order is correct
assert (rois_stacked[rois_idx_restore] == rois).all()
# FPN level info for stages 5, 4, 3, 2 for select models ()
FpnLevelInfo = collections.namedtuple(
'FpnLevelInfo',
['blobs', 'dims', 'spatial_scales']
)
def fpn_level_info_ResNet50_conv5():
return FpnLevelInfo(
blobs=('res5_2_sum', 'res4_5_sum','res3_s3_sum', 'res2_2_sum'),
dims=(2048, 1024, 512, 256),
spatial_scales=[1. /32., 1./ 16., 1./8., 1./ 4.]
)
def fpn_level_info_ResNet101_conv5():
return FpnLevelInfo(
blobs=('res5_2_sum', 'res4_22_sum', 'res3_3_sum', 'res2_2_sum'),
dims=(2048, 1024, 512, 256),
spatial_scales=(1./32., 1./16., 1./8., 1./4.)
)
def fpn_level_info_ResNet152_conv5():
return FpnLevelInfo(
blobs=('res5_2_sum', 'res4_35_sum', 'res3_7_sum', 'res2_2_sum'),
dims=(2048, 1024, 512, 256),
spatial_scales=(1. / 32., 1. / 16., 1. / 8., 1. / 4.)
) | [
"numpy.clip",
"collections.namedtuple",
"detectron.utils.boxes.boxes_area",
"numpy.where",
"detectron.utils.net.get_group_gn",
"detectron.utils.c2.gauss_fill",
"numpy.argsort",
"numpy.zeros",
"numpy.empty",
"detectron.modeling.generate_anchors.generate_anchors",
"numpy.concatenate",
"detectron... | [((16407, 16482), 'collections.namedtuple', 'collections.namedtuple', (['"""FpnLevelInfo"""', "['blobs', 'dims', 'spatial_scales']"], {}), "('FpnLevelInfo', ['blobs', 'dims', 'spatial_scales'])\n", (16429, 16482), False, 'import collections\n'), ((15427, 15461), 'numpy.clip', 'np.clip', (['target_lvls', 'k_min', 'k_max'], {}), '(target_lvls, k_min, k_max)\n', (15434, 15461), True, 'import numpy as np\n'), ((15671, 15685), 'numpy.empty', 'np.empty', (['(0,)'], {}), '((0,))\n', (15679, 15685), True, 'import numpy as np\n'), ((15706, 15740), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {'dtype': 'np.float32'}), '((0, 5), dtype=np.float32)\n', (15714, 15740), True, 'import numpy as np\n'), ((15240, 15266), 'detectron.utils.boxes.boxes_area', 'box_utils.boxes_area', (['rois'], {}), '(rois)\n', (15260, 15266), True, 'import detectron.utils.boxes as box_utils\n'), ((15926, 15967), 'numpy.concatenate', 'np.concatenate', (['(rois_idx_order, idx_lvl)'], {}), '((rois_idx_order, idx_lvl))\n', (15940, 15967), True, 'import numpy as np\n'), ((12574, 12718), 'detectron.modeling.generate_anchors.generate_anchors', 'generate_anchors', ([], {'stride': '(2.0 ** lvl)', 'sizes': '(cfg.FPN.RPN_ANCHOR_START_SIZE * 2.0 ** (lvl - k_min),)', 'aspect_ratios': 'cfg.FPN.RPN_ASPECT_RATIOS'}), '(stride=2.0 ** lvl, sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * \n 2.0 ** (lvl - k_min),), aspect_ratios=cfg.FPN.RPN_ASPECT_RATIOS)\n', (12590, 12718), False, 'from detectron.modeling.generate_anchors import generate_anchors\n'), ((14900, 14975), 'detectron.utils.blob.get_loss_gradients', 'blob_utils.get_loss_gradients', (['model', '[loss_rpn_cls_fpn, loss_rpn_bbox_fpn]'], {}), '(model, [loss_rpn_cls_fpn, loss_rpn_bbox_fpn])\n', (14929, 14975), True, 'import detectron.utils.blob as blob_utils\n'), ((15386, 15409), 'numpy.log2', 'np.log2', (['(s / s0 + 1e-06)'], {}), '(s / s0 + 1e-06)\n', (15393, 15409), True, 'import numpy as np\n'), ((15803, 15831), 'numpy.where', 'np.where', (['(target_lvls == lvl)'], {}), '(target_lvls == lvl)\n', (15811, 15831), True, 'import numpy as np\n'), ((16102, 16128), 'numpy.argsort', 'np.argsort', (['rois_idx_order'], {}), '(rois_idx_order)\n', (16112, 16128), True, 'import numpy as np\n'), ((3705, 3726), 'detectron.utils.net.get_group_gn', 'get_group_gn', (['fpn_dim'], {}), '(fpn_dim)\n', (3717, 3726), False, 'from detectron.utils.net import get_group_gn\n'), ((3850, 3865), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (3860, 3865), False, 'from detectron.utils.c2 import const_fill\n'), ((4191, 4206), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (4201, 4206), False, 'from detectron.utils.c2 import const_fill\n'), ((7521, 7542), 'detectron.utils.net.get_group_gn', 'get_group_gn', (['dim_top'], {}), '(dim_top)\n', (7533, 7542), False, 'from detectron.utils.net import get_group_gn\n'), ((7750, 7765), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (7760, 7765), False, 'from detectron.utils.c2 import const_fill\n'), ((8162, 8177), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (8172, 8177), False, 'from detectron.utils.c2 import const_fill\n'), ((5034, 5055), 'detectron.utils.net.get_group_gn', 'get_group_gn', (['fpn_dim'], {}), '(fpn_dim)\n', (5046, 5055), False, 'from detectron.utils.net import get_group_gn\n'), ((5199, 5214), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (5209, 5214), False, 'from detectron.utils.c2 import const_fill\n'), ((5576, 5591), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (5586, 5591), False, 'from detectron.utils.c2 import const_fill\n'), ((6935, 6950), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (6945, 6950), False, 'from detectron.utils.c2 import const_fill\n'), ((7632, 7647), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (7642, 7647), False, 'from detectron.utils.c2 import const_fill\n'), ((8040, 8055), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (8050, 8055), False, 'from detectron.utils.c2 import const_fill\n'), ((9924, 9940), 'detectron.utils.c2.gauss_fill', 'gauss_fill', (['(0.01)'], {}), '(0.01)\n', (9934, 9940), False, 'from detectron.utils.c2 import gauss_fill\n'), ((9968, 9983), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (9978, 9983), False, 'from detectron.utils.c2 import const_fill\n'), ((10370, 10386), 'detectron.utils.c2.gauss_fill', 'gauss_fill', (['(0.01)'], {}), '(0.01)\n', (10380, 10386), False, 'from detectron.utils.c2 import gauss_fill\n'), ((10414, 10429), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (10424, 10429), False, 'from detectron.utils.c2 import const_fill\n'), ((10768, 10784), 'detectron.utils.c2.gauss_fill', 'gauss_fill', (['(0.01)'], {}), '(0.01)\n', (10778, 10784), False, 'from detectron.utils.c2 import gauss_fill\n'), ((10812, 10827), 'detectron.utils.c2.const_fill', 'const_fill', (['(0.0)'], {}), '(0.0)\n', (10822, 10827), False, 'from detectron.utils.c2 import const_fill\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Write supervised training tasks to TFRecord dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import sys
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
from latent_programmer.tasks.robust_fill import sample_random
from latent_programmer.tasks.robust_fill import tokens as dsl_tokens
sys.path.append('../../../../')
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_work_units', 1, 'Total number of work units.')
flags.DEFINE_integer('seed', 42, 'Fixed random seed.')
flags.DEFINE_integer('num_tasks', 100000, 'Number of tasks to write.')
flags.DEFINE_integer('num_strings_per_task', 4,
'Number of input/output strings per task.')
flags.DEFINE_integer('max_expressions', 10,
'Maximum number of expressions in program.')
flags.DEFINE_integer('min_expressions', 1,
'Maximum number of expressions in program.')
flags.DEFINE_integer('max_input_length', 20,
'Maximum number of characters in input strings.')
flags.DEFINE_string('save_dir', None, 'Directory to save results to.')
flags.DEFINE_boolean('split_program', False,
'Whether to split program by parial program.')
flags.DEFINE_boolean('split_outputs', False,
'Whether to split outputs by partial program.')
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def serialize_example(task,
token_id_table):
"""Creates a tf.Example message to be written to a file."""
# Create a dictionary mapping the feature name to the tf.Example-compatible
# data type.
io_string = ''
if FLAGS.split_outputs:
for inp in task.inputs:
io_string += inp + '<'
for expr in task.program.expressions:
io_string += expr(inp) + '|'
io_string = io_string[:-1] + '>'
io_string = io_string[:-1]
else:
for inp, out in zip(task.inputs, task.outputs):
io_string += inp + '<' + out + '>'
io_string = io_string[:-1]
program_string = ''
if FLAGS.split_program:
for expr in task.program.expressions:
program_string += ' '.join(map(str, expr.encode(token_id_table)))
program_string += '|'
program_string = program_string[:-1]
else:
program_string = ' '.join(
map(str, task.program.encode(token_id_table)[:-1]))
feature = {
'i/o': _bytes_feature(str.encode(io_string)),
'program_encoding': _bytes_feature(str.encode(program_string)),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def main(_):
tf.enable_v2_behavior()
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
_, token_id_table = dsl_tokens.build_token_tables()
if not gfile.isdir(FLAGS.save_dir):
gfile.mkdir(FLAGS.save_dir)
worker_fname = os.path.join(FLAGS.save_dir,
'program_tasks.tf_records-00000-of-00001')
# Write the `tf.Example` observations to the file.
with tf.io.TFRecordWriter(worker_fname) as writer:
for _ in range(FLAGS.num_tasks):
task = sample_random.random_task(
max_expressions=FLAGS.max_expressions,
min_expressions=FLAGS.min_expressions,
max_k=3,
max_input_tokens=5,
max_input_length=FLAGS.max_input_length,
max_output_length=FLAGS.max_input_length * FLAGS.max_expressions,
num_examples=FLAGS.num_strings_per_task,
)
example = serialize_example(task, token_id_table)
writer.write(example)
if __name__ == '__main__':
app.run(main)
| [
"tensorflow.compat.v2.train.Features",
"latent_programmer.tasks.robust_fill.sample_random.random_task",
"absl.flags.DEFINE_integer",
"tensorflow.compat.v2.random.set_seed",
"latent_programmer.tasks.robust_fill.tokens.build_token_tables",
"os.path.join",
"absl.flags.DEFINE_boolean",
"random.seed",
"a... | [((1056, 1087), 'sys.path.append', 'sys.path.append', (['"""../../../../"""'], {}), "('../../../../')\n", (1071, 1087), False, 'import sys\n'), ((1130, 1202), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_work_units"""', '(1)', '"""Total number of work units."""'], {}), "('num_work_units', 1, 'Total number of work units.')\n", (1150, 1202), False, 'from absl import flags\n'), ((1203, 1257), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""', '(42)', '"""Fixed random seed."""'], {}), "('seed', 42, 'Fixed random seed.')\n", (1223, 1257), False, 'from absl import flags\n'), ((1259, 1329), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_tasks"""', '(100000)', '"""Number of tasks to write."""'], {}), "('num_tasks', 100000, 'Number of tasks to write.')\n", (1279, 1329), False, 'from absl import flags\n'), ((1330, 1425), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_strings_per_task"""', '(4)', '"""Number of input/output strings per task."""'], {}), "('num_strings_per_task', 4,\n 'Number of input/output strings per task.')\n", (1350, 1425), False, 'from absl import flags\n'), ((1443, 1535), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_expressions"""', '(10)', '"""Maximum number of expressions in program."""'], {}), "('max_expressions', 10,\n 'Maximum number of expressions in program.')\n", (1463, 1535), False, 'from absl import flags\n'), ((1553, 1644), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""min_expressions"""', '(1)', '"""Maximum number of expressions in program."""'], {}), "('min_expressions', 1,\n 'Maximum number of expressions in program.')\n", (1573, 1644), False, 'from absl import flags\n'), ((1662, 1760), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_input_length"""', '(20)', '"""Maximum number of characters in input strings."""'], {}), "('max_input_length', 20,\n 'Maximum number of characters in input strings.')\n", (1682, 1760), False, 'from absl import flags\n'), ((1779, 1849), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""save_dir"""', 'None', '"""Directory to save results to."""'], {}), "('save_dir', None, 'Directory to save results to.')\n", (1798, 1849), False, 'from absl import flags\n'), ((1851, 1946), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""split_program"""', '(False)', '"""Whether to split program by parial program."""'], {}), "('split_program', False,\n 'Whether to split program by parial program.')\n", (1871, 1946), False, 'from absl import flags\n'), ((1964, 2060), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""split_outputs"""', '(False)', '"""Whether to split outputs by partial program."""'], {}), "('split_outputs', False,\n 'Whether to split outputs by partial program.')\n", (1984, 2060), False, 'from absl import flags\n'), ((3504, 3527), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (3525, 3527), True, 'import tensorflow.compat.v2 as tf\n'), ((3531, 3561), 'tensorflow.compat.v2.random.set_seed', 'tf.random.set_seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (3549, 3561), True, 'import tensorflow.compat.v2 as tf\n'), ((3564, 3590), 'numpy.random.seed', 'np.random.seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (3578, 3590), True, 'import numpy as np\n'), ((3593, 3616), 'random.seed', 'random.seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (3604, 3616), False, 'import random\n'), ((3640, 3671), 'latent_programmer.tasks.robust_fill.tokens.build_token_tables', 'dsl_tokens.build_token_tables', ([], {}), '()\n', (3669, 3671), True, 'from latent_programmer.tasks.robust_fill import tokens as dsl_tokens\n'), ((3761, 3832), 'os.path.join', 'os.path.join', (['FLAGS.save_dir', '"""program_tasks.tf_records-00000-of-00001"""'], {}), "(FLAGS.save_dir, 'program_tasks.tf_records-00000-of-00001')\n", (3773, 3832), False, 'import os\n'), ((4495, 4508), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (4502, 4508), False, 'from absl import app\n'), ((3924, 3958), 'tensorflow.compat.v2.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['worker_fname'], {}), '(worker_fname)\n', (3944, 3958), True, 'import tensorflow.compat.v2 as tf\n'), ((2195, 2228), 'tensorflow.compat.v2.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (2213, 2228), True, 'import tensorflow.compat.v2 as tf\n'), ((3408, 3442), 'tensorflow.compat.v2.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (3425, 3442), True, 'import tensorflow.compat.v2 as tf\n'), ((4020, 4318), 'latent_programmer.tasks.robust_fill.sample_random.random_task', 'sample_random.random_task', ([], {'max_expressions': 'FLAGS.max_expressions', 'min_expressions': 'FLAGS.min_expressions', 'max_k': '(3)', 'max_input_tokens': '(5)', 'max_input_length': 'FLAGS.max_input_length', 'max_output_length': '(FLAGS.max_input_length * FLAGS.max_expressions)', 'num_examples': 'FLAGS.num_strings_per_task'}), '(max_expressions=FLAGS.max_expressions,\n min_expressions=FLAGS.min_expressions, max_k=3, max_input_tokens=5,\n max_input_length=FLAGS.max_input_length, max_output_length=FLAGS.\n max_input_length * FLAGS.max_expressions, num_examples=FLAGS.\n num_strings_per_task)\n', (4045, 4318), False, 'from latent_programmer.tasks.robust_fill import sample_random\n')] |
# -*- coding: utf-8 -*-
# Visualizzazione dell'andamento della funzione di errore quadratico nella regressione
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
# +
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['image.interpolation'] = 'none'
plt.rcParams['figure.figsize'] = (16, 8)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 8
colors = ['xkcd:pale orange', 'xkcd:sea blue', 'xkcd:pale red', 'xkcd:sage green', 'xkcd:terra cotta', 'xkcd:dull purple', 'xkcd:teal', 'xkcd:goldenrod', 'xkcd:cadet blue',
'xkcd:scarlet']
# -
# definisce un vettore di colori
colors = sns.color_palette("husl", 4)
# dichiara alcune proprietà grafiche della figura
sns.set(style="darkgrid", context='paper', palette=colors, rc={"figure.figsize": (16, 8),'image.cmap': 'jet', 'lines.linewidth':.7})
# legge i dati in dataframe pandas
data = pd.read_csv("../dataset/cars.csv", delimiter=',', header=0, names=['X','y'])
# calcola dimensione dei dati
n = len(data)
# visualizza dati mediante scatter
fig = plt.figure()
fig.patch.set_facecolor('white')
ax = fig.gca()
ax.scatter(data['X'], data['y'], s=40,c='r', marker='o', alpha=.5)
plt.xlabel(u'Velocità in mph', fontsize=14)
plt.ylabel('Distanza di arresto in ft', fontsize=14)
plt.show()
# Estrae dal dataframe l'array X delle features e aggiunge ad esso una colonna di 1
X=np.array(data['X']).reshape(-1,1)
X = np.column_stack((np.ones(n), X))
# Estrae dal dataframe l'array t dei valori target
t=np.array(data['y']).reshape(-1,1)
# mostra distribuzione dell'errore quadratico medio al variare dei coefficienti
# insieme dei valori considerati per i coefficienti
w0_list = np.linspace(-100, 100, 100)
w1_list = np.linspace(-100, 100, 100)
# crea una griglia di coppie di valori
w0, w1 = np.meshgrid(w0_list, w1_list)
# definisce la funzione da calcolare in ogni punto della griglia
def error(v1, v2):
theta = np.array((v1, v2)).reshape(-1, 1)
e=(np.dot(X,theta)-t)
return np.dot(e.T,e)[0,0]/(2*n)
v_error=np.vectorize(error)
e=v_error(w0,w1).T
fig = plt.figure()
fig.patch.set_facecolor('white')
ax = fig.gca(projection='3d')
surf=ax.plot_surface(w0, w1, e, rstride=1, cstride=1, cmap=plt.cm.jet , linewidth=0, antialiased=True)
ax.tick_params(axis='x', labelsize=8)
ax.tick_params(axis='y', labelsize=8)
ax.tick_params(axis='z', labelsize=8)
plt.xlabel(r"$w_0$", fontsize=12)
plt.ylabel(r"$w_1$", fontsize=12)
plt.title(r"Errore quadratico medio al variare dei coefficienti $w_0,w_1$", fontsize=12)
fig.colorbar(surf, shrink=0.5, aspect=7, cmap=plt.cm.jet)
plt.show()
fig = plt.figure(figsize=(12,12))
fig.patch.set_facecolor('white')
ax = fig.gca()
im = plt.imshow(e, origin='lower', extent=(w0_list.min(),w0_list.max(),w1_list.min(), w1_list.max()), aspect='auto',alpha=.8)
#plt.contour(w0, w1, e,color='r', lw=0.7)
ax.tick_params(axis='x', labelsize=8)
ax.tick_params(axis='y', labelsize=8)
plt.xlabel(r"$w_0$", fontsize=12)
plt.ylabel(r"$w_1$", fontsize=12)
plt.title(r"Errore quadratico medio al variare dei coefficienti $w_0,w_1$", fontsize=12)
fig.colorbar(im, shrink=0.5, aspect=7, cmap=plt.cm.jet)
plt.show()
| [
"seaborn.set",
"matplotlib.pyplot.title",
"numpy.ones",
"seaborn.color_palette",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.dot",
"numpy.meshgrid",
"numpy.vect... | [((249, 281), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (262, 281), True, 'import matplotlib.pyplot as plt\n'), ((1145, 1173), 'seaborn.color_palette', 'sns.color_palette', (['"""husl"""', '(4)'], {}), "('husl', 4)\n", (1162, 1173), True, 'import seaborn as sns\n'), ((1224, 1364), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""', 'context': '"""paper"""', 'palette': 'colors', 'rc': "{'figure.figsize': (16, 8), 'image.cmap': 'jet', 'lines.linewidth': 0.7}"}), "(style='darkgrid', context='paper', palette=colors, rc={\n 'figure.figsize': (16, 8), 'image.cmap': 'jet', 'lines.linewidth': 0.7})\n", (1231, 1364), True, 'import seaborn as sns\n'), ((1400, 1477), 'pandas.read_csv', 'pd.read_csv', (['"""../dataset/cars.csv"""'], {'delimiter': '""","""', 'header': '(0)', 'names': "['X', 'y']"}), "('../dataset/cars.csv', delimiter=',', header=0, names=['X', 'y'])\n", (1411, 1477), True, 'import pandas as pd\n'), ((1564, 1576), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1574, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1735), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['u"""Velocità in mph"""'], {'fontsize': '(14)'}), "(u'Velocità in mph', fontsize=14)\n", (1702, 1735), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1788), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distanza di arresto in ft"""'], {'fontsize': '(14)'}), "('Distanza di arresto in ft', fontsize=14)\n", (1746, 1788), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1797, 1799), True, 'import matplotlib.pyplot as plt\n'), ((2190, 2217), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', '(100)'], {}), '(-100, 100, 100)\n', (2201, 2217), True, 'import numpy as np\n'), ((2228, 2255), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', '(100)'], {}), '(-100, 100, 100)\n', (2239, 2255), True, 'import numpy as np\n'), ((2306, 2335), 'numpy.meshgrid', 'np.meshgrid', (['w0_list', 'w1_list'], {}), '(w0_list, w1_list)\n', (2317, 2335), True, 'import numpy as np\n'), ((2538, 2557), 'numpy.vectorize', 'np.vectorize', (['error'], {}), '(error)\n', (2550, 2557), True, 'import numpy as np\n'), ((2585, 2597), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2595, 2597), True, 'import matplotlib.pyplot as plt\n'), ((2878, 2910), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$w_0$"""'], {'fontsize': '(12)'}), "('$w_0$', fontsize=12)\n", (2888, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2912, 2944), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$w_1$"""'], {'fontsize': '(12)'}), "('$w_1$', fontsize=12)\n", (2922, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2946, 3037), 'matplotlib.pyplot.title', 'plt.title', (['"""Errore quadratico medio al variare dei coefficienti $w_0,w_1$"""'], {'fontsize': '(12)'}), "('Errore quadratico medio al variare dei coefficienti $w_0,w_1$',\n fontsize=12)\n", (2955, 3037), True, 'import matplotlib.pyplot as plt\n'), ((3093, 3103), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3101, 3103), True, 'import matplotlib.pyplot as plt\n'), ((3111, 3139), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (3121, 3139), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3463), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$w_0$"""'], {'fontsize': '(12)'}), "('$w_0$', fontsize=12)\n", (3441, 3463), True, 'import matplotlib.pyplot as plt\n'), ((3465, 3497), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$w_1$"""'], {'fontsize': '(12)'}), "('$w_1$', fontsize=12)\n", (3475, 3497), True, 'import matplotlib.pyplot as plt\n'), ((3499, 3590), 'matplotlib.pyplot.title', 'plt.title', (['"""Errore quadratico medio al variare dei coefficienti $w_0,w_1$"""'], {'fontsize': '(12)'}), "('Errore quadratico medio al variare dei coefficienti $w_0,w_1$',\n fontsize=12)\n", (3508, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3644, 3654), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3652, 3654), True, 'import matplotlib.pyplot as plt\n'), ((1887, 1906), 'numpy.array', 'np.array', (["data['X']"], {}), "(data['X'])\n", (1895, 1906), True, 'import numpy as np\n'), ((1942, 1952), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1949, 1952), True, 'import numpy as np\n'), ((2012, 2031), 'numpy.array', 'np.array', (["data['y']"], {}), "(data['y'])\n", (2020, 2031), True, 'import numpy as np\n'), ((2474, 2490), 'numpy.dot', 'np.dot', (['X', 'theta'], {}), '(X, theta)\n', (2480, 2490), True, 'import numpy as np\n'), ((2433, 2451), 'numpy.array', 'np.array', (['(v1, v2)'], {}), '((v1, v2))\n', (2441, 2451), True, 'import numpy as np\n'), ((2504, 2518), 'numpy.dot', 'np.dot', (['e.T', 'e'], {}), '(e.T, e)\n', (2510, 2518), True, 'import numpy as np\n')] |
import numpy as np
import numpy.testing as npt
from stumpy import stamp, core
import pytest
import naive
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
substitution_values = [np.nan, np.inf]
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stamp_mass_PI(T_A, T_B):
m = 3
trivial_idx = 2
zone = int(np.ceil(m / 2))
Q = T_B[trivial_idx : trivial_idx + m]
M_T, Σ_T = core.compute_mean_std(T_B, m)
ref_P, ref_I, ref_left_I, ref_right_I = naive.mass(
Q, T_B, m, trivial_idx=trivial_idx, excl_zone=zone, ignore_trivial=True
)
comp_P, comp_I = stamp._mass_PI(
Q, T_B, M_T, Σ_T, trivial_idx=trivial_idx, excl_zone=zone
)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
comp_left_P, comp_left_I = stamp._mass_PI(
Q, T_B, M_T, Σ_T, trivial_idx=trivial_idx, excl_zone=zone, left=True
)
npt.assert_almost_equal(ref_left_I, comp_left_I)
comp_right_P, comp_right_I = stamp._mass_PI(
Q, T_B, M_T, Σ_T, trivial_idx=trivial_idx, excl_zone=zone, right=True
)
npt.assert_almost_equal(ref_right_I, comp_right_I)
def test_stamp_int_input():
with pytest.raises(TypeError):
T = np.arange(10)
stamp(T, T, 5, ignore_trivial=True)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stamp_self_join(T_A, T_B):
m = 3
zone = int(np.ceil(m / 2))
ref_mp = naive.stamp(T_B, m, exclusion_zone=zone)
comp_mp = stamp.stamp(T_B, T_B, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stamp_A_B_join(T_A, T_B):
m = 3
ref_mp = naive.stamp(T_A, m, T_B=T_B)
comp_mp = stamp.stamp(T_A, T_B, m)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("substitute_B", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_stamp_nan_inf_self_join(T_A, T_B, substitute_B, substitution_locations):
m = 3
T_B_sub = T_B.copy()
for substitution_location_B in substitution_locations:
T_B_sub[:] = T_B[:]
T_B_sub[substitution_location_B] = substitute_B
zone = int(np.ceil(m / 2))
ref_mp = naive.stamp(T_B_sub, m, exclusion_zone=zone)
comp_mp = stamp.stamp(T_B_sub, T_B_sub, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("substitute_A", substitution_values)
@pytest.mark.parametrize("substitute_B", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_stamp_nan_inf_A_B_join(
T_A, T_B, substitute_A, substitute_B, substitution_locations
):
m = 3
T_A_sub = T_A.copy()
T_B_sub = T_B.copy()
for substitution_location_B in substitution_locations:
for substitution_location_A in substitution_locations:
T_A_sub[:] = T_A[:]
T_B_sub[:] = T_B[:]
T_A_sub[substitution_location_A] = substitute_A
T_B_sub[substitution_location_B] = substitute_B
ref_mp = naive.stamp(T_A_sub, m, T_B=T_B_sub)
comp_mp = stamp.stamp(T_A_sub, T_B_sub, m)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
def test_stamp_nan_zero_mean_self_join():
T = np.array([-1, 0, 1, np.inf, 1, 0, -1])
m = 3
zone = int(np.ceil(m / 2))
ref_mp = naive.stamp(T, m, exclusion_zone=zone)
comp_mp = stamp.stamp(T, T, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
| [
"naive.mass",
"numpy.ceil",
"stumpy.core.compute_mean_std",
"naive.stamp",
"pytest.mark.parametrize",
"numpy.testing.assert_almost_equal",
"stumpy.stamp._mass_PI",
"naive.replace_inf",
"numpy.array",
"pytest.raises",
"stumpy.stamp.stamp",
"stumpy.stamp",
"numpy.random.uniform",
"numpy.aran... | [((514, 560), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T_A, T_B"""', 'test_data'], {}), "('T_A, T_B', test_data)\n", (537, 560), False, 'import pytest\n'), ((1595, 1641), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T_A, T_B"""', 'test_data'], {}), "('T_A, T_B', test_data)\n", (1618, 1641), False, 'import pytest\n'), ((1949, 1995), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T_A, T_B"""', 'test_data'], {}), "('T_A, T_B', test_data)\n", (1972, 1995), False, 'import pytest\n'), ((2238, 2284), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T_A, T_B"""', 'test_data'], {}), "('T_A, T_B', test_data)\n", (2261, 2284), False, 'import pytest\n'), ((2286, 2346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitute_B"""', 'substitution_values'], {}), "('substitute_B', substitution_values)\n", (2309, 2346), False, 'import pytest\n'), ((2348, 2421), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_locations"""', 'substitution_locations'], {}), "('substitution_locations', substitution_locations)\n", (2371, 2421), False, 'import pytest\n'), ((2982, 3028), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T_A, T_B"""', 'test_data'], {}), "('T_A, T_B', test_data)\n", (3005, 3028), False, 'import pytest\n'), ((3030, 3090), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitute_A"""', 'substitution_values'], {}), "('substitute_A', substitution_values)\n", (3053, 3090), False, 'import pytest\n'), ((3092, 3152), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitute_B"""', 'substitution_values'], {}), "('substitute_B', substitution_values)\n", (3115, 3152), False, 'import pytest\n'), ((3154, 3227), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_locations"""', 'substitution_locations'], {}), "('substitution_locations', substitution_locations)\n", (3177, 3227), False, 'import pytest\n'), ((715, 744), 'stumpy.core.compute_mean_std', 'core.compute_mean_std', (['T_B', 'm'], {}), '(T_B, m)\n', (736, 744), False, 'from stumpy import stamp, core\n'), ((788, 875), 'naive.mass', 'naive.mass', (['Q', 'T_B', 'm'], {'trivial_idx': 'trivial_idx', 'excl_zone': 'zone', 'ignore_trivial': '(True)'}), '(Q, T_B, m, trivial_idx=trivial_idx, excl_zone=zone,\n ignore_trivial=True)\n', (798, 875), False, 'import naive\n'), ((907, 980), 'stumpy.stamp._mass_PI', 'stamp._mass_PI', (['Q', 'T_B', 'M_T', 'Σ_T'], {'trivial_idx': 'trivial_idx', 'excl_zone': 'zone'}), '(Q, T_B, M_T, Σ_T, trivial_idx=trivial_idx, excl_zone=zone)\n', (921, 980), False, 'from stumpy import stamp, core\n'), ((1000, 1038), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (1023, 1038), True, 'import numpy.testing as npt\n'), ((1043, 1081), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (1066, 1081), True, 'import numpy.testing as npt\n'), ((1114, 1202), 'stumpy.stamp._mass_PI', 'stamp._mass_PI', (['Q', 'T_B', 'M_T', 'Σ_T'], {'trivial_idx': 'trivial_idx', 'excl_zone': 'zone', 'left': '(True)'}), '(Q, T_B, M_T, Σ_T, trivial_idx=trivial_idx, excl_zone=zone,\n left=True)\n', (1128, 1202), False, 'from stumpy import stamp, core\n'), ((1218, 1266), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_left_I', 'comp_left_I'], {}), '(ref_left_I, comp_left_I)\n', (1241, 1266), True, 'import numpy.testing as npt\n'), ((1301, 1390), 'stumpy.stamp._mass_PI', 'stamp._mass_PI', (['Q', 'T_B', 'M_T', 'Σ_T'], {'trivial_idx': 'trivial_idx', 'excl_zone': 'zone', 'right': '(True)'}), '(Q, T_B, M_T, Σ_T, trivial_idx=trivial_idx, excl_zone=zone,\n right=True)\n', (1315, 1390), False, 'from stumpy import stamp, core\n'), ((1406, 1456), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_right_I', 'comp_right_I'], {}), '(ref_right_I, comp_right_I)\n', (1429, 1456), True, 'import numpy.testing as npt\n'), ((1732, 1772), 'naive.stamp', 'naive.stamp', (['T_B', 'm'], {'exclusion_zone': 'zone'}), '(T_B, m, exclusion_zone=zone)\n', (1743, 1772), False, 'import naive\n'), ((1787, 1832), 'stumpy.stamp.stamp', 'stamp.stamp', (['T_B', 'T_B', 'm'], {'ignore_trivial': '(True)'}), '(T_B, T_B, m, ignore_trivial=True)\n', (1798, 1832), False, 'from stumpy import stamp, core\n'), ((1837, 1862), 'naive.replace_inf', 'naive.replace_inf', (['ref_mp'], {}), '(ref_mp)\n', (1854, 1862), False, 'import naive\n'), ((1867, 1893), 'naive.replace_inf', 'naive.replace_inf', (['comp_mp'], {}), '(comp_mp)\n', (1884, 1893), False, 'import naive\n'), ((1898, 1945), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_mp[:, :2]', 'comp_mp'], {}), '(ref_mp[:, :2], comp_mp)\n', (1921, 1945), True, 'import numpy.testing as npt\n'), ((2054, 2082), 'naive.stamp', 'naive.stamp', (['T_A', 'm'], {'T_B': 'T_B'}), '(T_A, m, T_B=T_B)\n', (2065, 2082), False, 'import naive\n'), ((2097, 2121), 'stumpy.stamp.stamp', 'stamp.stamp', (['T_A', 'T_B', 'm'], {}), '(T_A, T_B, m)\n', (2108, 2121), False, 'from stumpy import stamp, core\n'), ((2126, 2151), 'naive.replace_inf', 'naive.replace_inf', (['ref_mp'], {}), '(ref_mp)\n', (2143, 2151), False, 'import naive\n'), ((2156, 2182), 'naive.replace_inf', 'naive.replace_inf', (['comp_mp'], {}), '(comp_mp)\n', (2173, 2182), False, 'import naive\n'), ((2187, 2234), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_mp[:, :2]', 'comp_mp'], {}), '(ref_mp[:, :2], comp_mp)\n', (2210, 2234), True, 'import numpy.testing as npt\n'), ((4000, 4038), 'numpy.array', 'np.array', (['[-1, 0, 1, np.inf, 1, 0, -1]'], {}), '([-1, 0, 1, np.inf, 1, 0, -1])\n', (4008, 4038), True, 'import numpy as np\n'), ((4094, 4132), 'naive.stamp', 'naive.stamp', (['T', 'm'], {'exclusion_zone': 'zone'}), '(T, m, exclusion_zone=zone)\n', (4105, 4132), False, 'import naive\n'), ((4147, 4188), 'stumpy.stamp.stamp', 'stamp.stamp', (['T', 'T', 'm'], {'ignore_trivial': '(True)'}), '(T, T, m, ignore_trivial=True)\n', (4158, 4188), False, 'from stumpy import stamp, core\n'), ((4194, 4219), 'naive.replace_inf', 'naive.replace_inf', (['ref_mp'], {}), '(ref_mp)\n', (4211, 4219), False, 'import naive\n'), ((4224, 4250), 'naive.replace_inf', 'naive.replace_inf', (['comp_mp'], {}), '(comp_mp)\n', (4241, 4250), False, 'import naive\n'), ((4255, 4302), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_mp[:, :2]', 'comp_mp'], {}), '(ref_mp[:, :2], comp_mp)\n', (4278, 4302), True, 'import numpy.testing as npt\n'), ((134, 179), 'numpy.array', 'np.array', (['[9, 8100, -60, 7]'], {'dtype': 'np.float64'}), '([9, 8100, -60, 7], dtype=np.float64)\n', (142, 179), True, 'import numpy as np\n'), ((189, 249), 'numpy.array', 'np.array', (['[584, -11, 23, 79, 1001, 0, -19]'], {'dtype': 'np.float64'}), '([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64)\n', (197, 249), True, 'import numpy as np\n'), ((640, 654), 'numpy.ceil', 'np.ceil', (['(m / 2)'], {}), '(m / 2)\n', (647, 654), True, 'import numpy as np\n'), ((1496, 1520), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1509, 1520), False, 'import pytest\n'), ((1534, 1547), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1543, 1547), True, 'import numpy as np\n'), ((1556, 1591), 'stumpy.stamp', 'stamp', (['T', 'T', '(5)'], {'ignore_trivial': '(True)'}), '(T, T, 5, ignore_trivial=True)\n', (1561, 1591), False, 'from stumpy import stamp, core\n'), ((1703, 1717), 'numpy.ceil', 'np.ceil', (['(m / 2)'], {}), '(m / 2)\n', (1710, 1717), True, 'import numpy as np\n'), ((2737, 2781), 'naive.stamp', 'naive.stamp', (['T_B_sub', 'm'], {'exclusion_zone': 'zone'}), '(T_B_sub, m, exclusion_zone=zone)\n', (2748, 2781), False, 'import naive\n'), ((2800, 2853), 'stumpy.stamp.stamp', 'stamp.stamp', (['T_B_sub', 'T_B_sub', 'm'], {'ignore_trivial': '(True)'}), '(T_B_sub, T_B_sub, m, ignore_trivial=True)\n', (2811, 2853), False, 'from stumpy import stamp, core\n'), ((2862, 2887), 'naive.replace_inf', 'naive.replace_inf', (['ref_mp'], {}), '(ref_mp)\n', (2879, 2887), False, 'import naive\n'), ((2896, 2922), 'naive.replace_inf', 'naive.replace_inf', (['comp_mp'], {}), '(comp_mp)\n', (2913, 2922), False, 'import naive\n'), ((2931, 2978), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_mp[:, :2]', 'comp_mp'], {}), '(ref_mp[:, :2], comp_mp)\n', (2954, 2978), True, 'import numpy.testing as npt\n'), ((4065, 4079), 'numpy.ceil', 'np.ceil', (['(m / 2)'], {}), '(m / 2)\n', (4072, 4079), True, 'import numpy as np\n'), ((2704, 2718), 'numpy.ceil', 'np.ceil', (['(m / 2)'], {}), '(m / 2)\n', (2711, 2718), True, 'import numpy as np\n'), ((3719, 3755), 'naive.stamp', 'naive.stamp', (['T_A_sub', 'm'], {'T_B': 'T_B_sub'}), '(T_A_sub, m, T_B=T_B_sub)\n', (3730, 3755), False, 'import naive\n'), ((3778, 3810), 'stumpy.stamp.stamp', 'stamp.stamp', (['T_A_sub', 'T_B_sub', 'm'], {}), '(T_A_sub, T_B_sub, m)\n', (3789, 3810), False, 'from stumpy import stamp, core\n'), ((3823, 3848), 'naive.replace_inf', 'naive.replace_inf', (['ref_mp'], {}), '(ref_mp)\n', (3840, 3848), False, 'import naive\n'), ((3861, 3887), 'naive.replace_inf', 'naive.replace_inf', (['comp_mp'], {}), '(comp_mp)\n', (3878, 3887), False, 'import naive\n'), ((3900, 3947), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_mp[:, :2]', 'comp_mp'], {}), '(ref_mp[:, :2], comp_mp)\n', (3923, 3947), True, 'import numpy.testing as npt\n'), ((272, 307), 'numpy.random.uniform', 'np.random.uniform', (['(-1000)', '(1000)', '[8]'], {}), '(-1000, 1000, [8])\n', (289, 307), True, 'import numpy as np\n'), ((336, 372), 'numpy.random.uniform', 'np.random.uniform', (['(-1000)', '(1000)', '[64]'], {}), '(-1000, 1000, [64])\n', (353, 372), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function, division
import numpy as np
from rdkit import Chem
from rdkit import rdBase
from rdkit.Chem import AllChem
from rdkit import DataStructs
import rdkit.Chem.QED as QED
import scripts.sascorer as sascorer
import os
import pickle
import torch
from chemprop.train import predict
from chemprop.data import MoleculeDataset, MoleculeDataLoader
from chemprop.data.utils import get_data, get_data_from_smiles
from chemprop.utils import load_args, load_checkpoint, load_scalers
rdBase.DisableLog('rdApp.error')
class gsk3_model():
"""Scores based on an ECFP classifier for activity."""
kwargs = ["clf_path"]
clf_path = 'data/gsk3/gsk3.pkl'
def __init__(self):
with open(self.clf_path, "rb") as f:
self.clf = pickle.load(f)
def __call__(self, smiles_list):
fps = []
mask = []
for i,smiles in enumerate(smiles_list):
mol = Chem.MolFromSmiles(smiles)
mask.append( int(mol is not None) )
fp = gsk3_model.fingerprints_from_mol(mol) if mol else np.zeros((1, 2048))
fps.append(fp)
fps = np.concatenate(fps, axis=0)
scores = self.clf.predict_proba(fps)[:, 1]
scores = scores * np.array(mask)
return np.float32(scores)
@classmethod
def fingerprints_from_mol(cls, mol): # use ECFP4
features_vec = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=2048)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
return features.reshape(1, -1)
class jnk3_model():
"""Scores based on an ECFP classifier for activity."""
kwargs = ["clf_path"]
clf_path = 'data/jnk3/jnk3.pkl'
def __init__(self):
with open(self.clf_path, "rb") as f:
self.clf = pickle.load(f)
def __call__(self, smiles_list):
fps = []
mask = []
for i,smiles in enumerate(smiles_list):
mol = Chem.MolFromSmiles(smiles)
mask.append( int(mol is not None) )
fp = jnk3_model.fingerprints_from_mol(mol) if mol else np.zeros((1, 2048))
fps.append(fp)
fps = np.concatenate(fps, axis=0)
scores = self.clf.predict_proba(fps)[:, 1]
scores = scores * np.array(mask)
return np.float32(scores)
@classmethod
def fingerprints_from_mol(cls, mol): # use ECFP4
features_vec = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=2048)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
return features.reshape(1, -1)
class qed_func():
def __call__(self, smiles_list):
scores = []
for smiles in smiles_list:
mol = Chem.MolFromSmiles(smiles)
if mol is None:
scores.append(0)
else:
scores.append(QED.qed(mol))
return np.float32(scores)
class sa_func():
def __call__(self, smiles_list):
scores = []
for smiles in smiles_list:
mol = Chem.MolFromSmiles(smiles)
if mol is None:
scores.append(100)
else:
scores.append(sascorer.calculateScore(mol))
return np.float32(scores)
class chemprop_model():
def __init__(self, checkpoint_dir, features_generator=None):
self.features_generator = features_generator
self.checkpoints, self.scalers, self.features_scalers = [], [], []
for root, _, files in os.walk(checkpoint_dir):
for fname in files:
if fname.endswith('.pt'):
fname = os.path.join(root, fname)
scaler, features_scaler, _, _ = load_scalers(fname)
self.scalers.append(scaler)
self.features_scalers.append(features_scaler)
model = load_checkpoint(fname, device=torch.device('cpu'))
self.checkpoints.append(model)
def __call__(self, smiles, batch_size=500):
test_data = get_data_from_smiles(
smiles=[[s] for s in smiles],
skip_invalid_smiles=False,
features_generator=self.features_generator
)
valid_indices = [i for i in range(len(test_data)) if test_data[i].mol[0] is not None]
full_data = test_data
test_data = MoleculeDataset([test_data[i] for i in valid_indices])
test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=batch_size)
sum_preds = np.zeros((len(test_data), 1))
for model, scaler, features_scaler in zip(self.checkpoints, self.scalers, self.features_scalers):
test_data.reset_features_and_targets()
if features_scaler is not None:
test_data.normalize_features(features_scaler)
model_preds = predict(
model=model,
data_loader=test_data_loader,
scaler=scaler
)
sum_preds += np.array(model_preds)
# Ensemble predictions
avg_preds = sum_preds / len(self.checkpoints)
avg_preds = avg_preds.squeeze(-1).tolist()
# Put zero for invalid smiles
full_preds = [0.0] * len(full_data)
for i, si in enumerate(valid_indices):
full_preds[si] = avg_preds[i]
return np.array(full_preds, dtype=np.float32)
def get_scoring_function(prop_name, features_generator=None):
"""Function that initializes and returns a scoring function by name"""
if prop_name == 'jnk3':
return jnk3_model()
elif prop_name == 'gsk3':
return gsk3_model()
elif prop_name == 'qed':
return qed_func()
elif prop_name == 'sa':
return sa_func()
else:
return chemprop_model(prop_name, features_generator)
if __name__ == "__main__":
import sys
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--prop', required=True)
parser.add_argument('--features_generator', default=None, nargs='*')
args = parser.parse_args()
funcs = [get_scoring_function(prop, args.features_generator) for prop in args.prop.split(',')]
data = [line.split()[:2] for line in sys.stdin]
all_x, all_y = zip(*data)
props = [func(all_y) for func in funcs]
col_list = [all_x, all_y] + props
for tup in zip(*col_list):
print(*tup)
| [
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"numpy.array",
"scripts.sascorer.calculateScore",
"os.walk",
"argparse.ArgumentParser",
"chemprop.data.MoleculeDataset",
"chemprop.train.predict",
"numpy.concatenate",
"rdkit.rdBase.DisableLog",
"rdkit.Chem.QED.qed",
"chemprop.data.MoleculeData... | [((530, 562), 'rdkit.rdBase.DisableLog', 'rdBase.DisableLog', (['"""rdApp.error"""'], {}), "('rdApp.error')\n", (547, 562), False, 'from rdkit import rdBase\n'), ((5949, 5965), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (5963, 5965), False, 'from argparse import ArgumentParser\n'), ((1158, 1185), 'numpy.concatenate', 'np.concatenate', (['fps'], {'axis': '(0)'}), '(fps, axis=0)\n', (1172, 1185), True, 'import numpy as np\n'), ((1293, 1311), 'numpy.float32', 'np.float32', (['scores'], {}), '(scores)\n', (1303, 1311), True, 'import numpy as np\n'), ((1407, 1464), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['mol', '(2)'], {'nBits': '(2048)'}), '(mol, 2, nBits=2048)\n', (1444, 1464), False, 'from rdkit.Chem import AllChem\n'), ((1484, 1498), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (1492, 1498), True, 'import numpy as np\n'), ((1507, 1562), 'rdkit.DataStructs.ConvertToNumpyArray', 'DataStructs.ConvertToNumpyArray', (['features_vec', 'features'], {}), '(features_vec, features)\n', (1538, 1562), False, 'from rdkit import DataStructs\n'), ((2201, 2228), 'numpy.concatenate', 'np.concatenate', (['fps'], {'axis': '(0)'}), '(fps, axis=0)\n', (2215, 2228), True, 'import numpy as np\n'), ((2336, 2354), 'numpy.float32', 'np.float32', (['scores'], {}), '(scores)\n', (2346, 2354), True, 'import numpy as np\n'), ((2450, 2507), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['mol', '(2)'], {'nBits': '(2048)'}), '(mol, 2, nBits=2048)\n', (2487, 2507), False, 'from rdkit.Chem import AllChem\n'), ((2527, 2541), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (2535, 2541), True, 'import numpy as np\n'), ((2550, 2605), 'rdkit.DataStructs.ConvertToNumpyArray', 'DataStructs.ConvertToNumpyArray', (['features_vec', 'features'], {}), '(features_vec, features)\n', (2581, 2605), False, 'from rdkit import DataStructs\n'), ((2941, 2959), 'numpy.float32', 'np.float32', (['scores'], {}), '(scores)\n', (2951, 2959), True, 'import numpy as np\n'), ((3273, 3291), 'numpy.float32', 'np.float32', (['scores'], {}), '(scores)\n', (3283, 3291), True, 'import numpy as np\n'), ((3545, 3568), 'os.walk', 'os.walk', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3552, 3568), False, 'import os\n'), ((4085, 4211), 'chemprop.data.utils.get_data_from_smiles', 'get_data_from_smiles', ([], {'smiles': '[[s] for s in smiles]', 'skip_invalid_smiles': '(False)', 'features_generator': 'self.features_generator'}), '(smiles=[[s] for s in smiles], skip_invalid_smiles=\n False, features_generator=self.features_generator)\n', (4105, 4211), False, 'from chemprop.data.utils import get_data, get_data_from_smiles\n'), ((4397, 4451), 'chemprop.data.MoleculeDataset', 'MoleculeDataset', (['[test_data[i] for i in valid_indices]'], {}), '([test_data[i] for i in valid_indices])\n', (4412, 4451), False, 'from chemprop.data import MoleculeDataset, MoleculeDataLoader\n'), ((4479, 4539), 'chemprop.data.MoleculeDataLoader', 'MoleculeDataLoader', ([], {'dataset': 'test_data', 'batch_size': 'batch_size'}), '(dataset=test_data, batch_size=batch_size)\n', (4497, 4539), False, 'from chemprop.data import MoleculeDataset, MoleculeDataLoader\n'), ((5381, 5419), 'numpy.array', 'np.array', (['full_preds'], {'dtype': 'np.float32'}), '(full_preds, dtype=np.float32)\n', (5389, 5419), True, 'import numpy as np\n'), ((800, 814), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (811, 814), False, 'import pickle\n'), ((954, 980), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (972, 980), False, 'from rdkit import Chem\n'), ((1263, 1277), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (1271, 1277), True, 'import numpy as np\n'), ((1843, 1857), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1854, 1857), False, 'import pickle\n'), ((1997, 2023), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (2015, 2023), False, 'from rdkit import Chem\n'), ((2306, 2320), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (2314, 2320), True, 'import numpy as np\n'), ((2776, 2802), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (2794, 2802), False, 'from rdkit import Chem\n'), ((3090, 3116), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (3108, 3116), False, 'from rdkit import Chem\n'), ((4881, 4946), 'chemprop.train.predict', 'predict', ([], {'model': 'model', 'data_loader': 'test_data_loader', 'scaler': 'scaler'}), '(model=model, data_loader=test_data_loader, scaler=scaler)\n', (4888, 4946), False, 'from chemprop.train import predict\n'), ((5034, 5055), 'numpy.array', 'np.array', (['model_preds'], {}), '(model_preds)\n', (5042, 5055), True, 'import numpy as np\n'), ((1096, 1115), 'numpy.zeros', 'np.zeros', (['(1, 2048)'], {}), '((1, 2048))\n', (1104, 1115), True, 'import numpy as np\n'), ((2139, 2158), 'numpy.zeros', 'np.zeros', (['(1, 2048)'], {}), '((1, 2048))\n', (2147, 2158), True, 'import numpy as np\n'), ((2912, 2924), 'rdkit.Chem.QED.qed', 'QED.qed', (['mol'], {}), '(mol)\n', (2919, 2924), True, 'import rdkit.Chem.QED as QED\n'), ((3228, 3256), 'scripts.sascorer.calculateScore', 'sascorer.calculateScore', (['mol'], {}), '(mol)\n', (3251, 3256), True, 'import scripts.sascorer as sascorer\n'), ((3672, 3697), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (3684, 3697), False, 'import os\n'), ((3751, 3770), 'chemprop.utils.load_scalers', 'load_scalers', (['fname'], {}), '(fname)\n', (3763, 3770), False, 'from chemprop.utils import load_args, load_checkpoint, load_scalers\n'), ((3944, 3963), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3956, 3963), False, 'import torch\n')] |
try: import cPickle as pickle
except: import pickle
import os
if os.name == 'posix' and 'DISPLAY' not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib
import matplotlib.pyplot as plt
import itertools
from matplotlib import rc
import random
import seaborn
import numpy as np
import pandas as pd
import pdb
from argparse import ArgumentParser
font = {'family': 'serif', 'serif': ['computer modern roman']}
rc('text', usetex=False)
rc('font', weight='bold')
rc('font', size=20)
rc('lines', markersize=10)
rc('xtick', labelsize=12)
rc('ytick', labelsize=12)
rc('axes', labelsize='x-large')
rc('axes', labelweight='bold')
rc('axes', titlesize='x-large')
rc('axes', linewidth=3)
plt.rc('font', **font)
seaborn.set_style("darkgrid")
figsize_d = {2: (5, 2),
4: (9, 2)}
m_name_l = {"dynAE": "DynAE",
"dynRNN": "DynRNN",
"rand": "RandDynamic",
}
expMap = {"gr": "GR MAP", "lp": "LP MAP",
"nc": "NC F1 score"}
expMap2 = {"gr": "GR MAP", "lp": "LP P@100",
"nc": "NC F1 score"}
def get_node_color(node_community):
cnames = [item[0] for item in matplotlib.colors.cnames.items()]
node_colors = [cnames[c] for c in node_community]
return node_colors
def plot(x_s, y_s, fig_n, x_lab, y_lab,
file_save_path, title, legendLabels=None, show=False):
plt.rcParams.update({'font.size': 16, 'font.weight': 'bold'})
markers = ['o', '*', 'v', 'D', '<', 's', '+', '^', '>']
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
series = []
plt.figure(fig_n)
i = 0
for i in range(len(x_s)):
# n_points = len(x_s[i])
# n_points = int(n_points/10) + random.randint(1,100)
# x = x_s[i][::n_points]
# y = y_s[i][::n_points]
x = x_s[i]
y = y_s[i]
series.append(plt.plot(x, y, color=colors[i],
linewidth=2, marker=markers[i],
markersize=8))
plt.xlabel(x_lab, fontsize=16, fontweight='bold')
plt.ylabel(y_lab, fontsize=16, fontweight='bold')
plt.title(title, fontsize=16, fontweight='bold')
if legendLabels:
plt.legend([s[0] for s in series], legendLabels)
plt.savefig(file_save_path)
if show:
plt.show()
def plot_ts(ts_df, plot_title, eventDates,
eventLabels=None, save_file_name=None,
xLabel=None, yLabel=None, show=False):
ax = ts_df.plot(title=plot_title, marker='*',
markerfacecolor='red', markersize=10,
linestyle='solid')
colors = ['r', 'g', 'c', 'm', 'y', 'b', 'k']
if not eventLabels:
for eventDate in eventDates:
# Show event as a red vertical line
ax.axvline(eventDate, color='r', linestyle='--', lw=2)
else:
for idx in range(len(eventDates)):
ax.axvline(eventDates[idx], color=colors[idx],
linestyle='--', lw=2, label=eventLabels[idx])
ax.legend()
if xLabel:
ax.set_xlabel(xLabel, fontweight='bold')
if yLabel:
ax.set_ylabel(yLabel, fontweight='bold')
fig = ax.get_figure()
if save_file_name:
fig.savefig(save_file_name, bbox_inches='tight')
if show:
fig.show()
def turn_latex(key_str):
if key_str in ['mu', 'rho', 'beta', 'alpha', 'gamma']:
return '$\%s$' % key_str
else:
return '$%s$' % key_str.upper()
def plot_hyp_data2(hyp_keys, exp_param,
meths, data,
s_sch="u_rand",
dim=2):
font = {'family': 'serif', 'serif': ['computer modern roman']}
rc('text', usetex=True)
rc('font', weight='bold')
rc('font', size=8)
rc('lines', markersize=2.5)
rc('lines', linewidth=0.5)
rc('xtick', labelsize=6)
rc('ytick', labelsize=6)
rc('axes', labelsize='small')
rc('axes', labelweight='bold')
rc('axes', titlesize='small')
rc('axes', linewidth=1)
plt.rc('font', **font)
seaborn.set_style("darkgrid")
for exp in exp_param:
df_all = pd.DataFrame()
n_meths = 0
for meth in meths:
try:
df = pd.read_hdf(
"intermediate/%s_%s_%s_%s_dim_%d_data_hyp.h5" % (data, meth, exp, s_sch, dim),
"df"
)
n_meths += 1
except:
print ('%s_%s_%s_%s_dim_%d_data_hyp.h5 not found. Ignoring data set' % (data, meth, exp, s_sch, dim))
continue
# Check if experiment is in the dataframe
if expMap[exp] not in df:
continue
df["Method"] = m_name_l[meth]
# pdb.set_trace()
df_all = df_all.append(df).reset_index()
df_all = df_all.drop(['index'], axis=1)
if df_all.empty:
continue
df = df_all
col_names = df.columns
col_rename_d = {}
for col_name in col_names:
col_rename_d[col_name] = col_name.replace('_', '\ ')
df.rename(columns=col_rename_d, inplace=True)
for hyp_key in hyp_keys:
# hyp_key_ren = hyp_key.replace('_', '\ ')
df_trun = df[hyp_keys + ["Round Id", expMap[exp], expMap2[exp], "Method"]]
df_grouped = df_trun
rem_hyp_keys = list(set(hyp_keys) - {hyp_key})
val_lists = [df_grouped[r_k].unique() for r_k in rem_hyp_keys]
n_cols = len(list(itertools.product(*val_lists)))
if len(df_grouped[hyp_key].unique()) < 3:
continue
plot_shape = (1, n_cols)
fin1, axarray1 = plt.subplots(1, n_cols, figsize=figsize_d[n_cols])
fin2, axarray2 = plt.subplots(1, n_cols, figsize=figsize_d[n_cols])
for plt_idx, hyp_vals in enumerate(itertools.product(*val_lists)):
plot_idx = np.unravel_index(plt_idx, plot_shape)
hyp_dict = dict(zip(rem_hyp_keys, hyp_vals))
hyp_str = ', '.join(
"%s:%r" % (turn_latex(key), val) for (key, val) in hyp_dict.iteritems() if len(df_grouped[key].unique()) > 1
)
df_temp = df_grouped
for hyp_idx, hyp_val in enumerate(hyp_vals):
df_temp = df_temp[df_temp[rem_hyp_keys[hyp_idx]] == hyp_val]
if len(df_temp[hyp_key].unique()) < 3:
continue
print('Plotting %s: %s' % (exp, hyp_key))
try:
ax = seaborn.tsplot(time=hyp_key, value=expMap[exp],
unit="Round Id", condition="Method",
data=df_temp,
ax=axarray1[plot_idx[0], plot_idx[1]])
if plot_idx[1]:
ax.set_ylabel('')
if not plot_idx[0]:
ax.set_xlabel('')
except IndexError:
try:
ax = seaborn.tsplot(time=hyp_key, value=expMap[exp],
unit="Round Id", condition="Method",
data=df_temp,
ax=axarray1[plt_idx])
except:
import pdb
pdb.set_trace()
if plt_idx:
ax.set_ylabel('')
ax.set_title(hyp_str)
hyp_values = df_grouped[hyp_key].unique()
l_diff = hyp_values[-1] - hyp_values[-2]
f_diff = hyp_values[1] - hyp_values[0]
l_f_diff_r = l_diff / f_diff
if l_f_diff_r > 1:
log_base = pow(l_f_diff_r, 1.0 / (len(hyp_values) - 2))
ax.set_xscale('log', basex=round(log_base))
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend_.remove()
try:
ax = seaborn.tsplot(time=hyp_key, value=expMap2[exp],
unit="Round Id", condition="Method",
data=df_temp,
ax=axarray2[plot_idx[0], plot_idx[1]])
if plot_idx[1]:
ax.set_ylabel('')
if not plot_idx[0]:
ax.set_xlabel('')
except IndexError:
ax = seaborn.tsplot(time=hyp_key, value=expMap2[exp],
unit="Round Id", condition="Method",
data=df_temp,
ax=axarray2[plt_idx])
if plt_idx:
ax.set_ylabel('')
ax.set_title(hyp_str)
if l_f_diff_r > 1:
log_base = pow(l_f_diff_r, 1.0 / (len(hyp_values) - 2))
ax.set_xscale('log', basex=round(log_base))
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend_.remove()
for col_idx in range(axarray1.shape[0]):
box = axarray1[col_idx].get_position()
axarray1[col_idx].set_position(
[box.x0,
box.y0 + box.height * 0.1,
box.width,
box.height * 0.9]
)
box = axarray2[col_idx].get_position()
axarray2[col_idx].set_position(
[box.x0,
box.y0 + box.height * 0.1,
box.width,
box.height * 0.9]
)
fin1.legend(loc='lower center', bbox_to_anchor=(0.45, -0.01),
ncol=n_meths, fancybox=True, shadow=True)
fin2.legend(loc='lower center', bbox_to_anchor=(0.45, -0.01),
ncol=n_meths, fancybox=True, shadow=True)
fin1.savefig(
'plots/data_hyp/%s_%s_%s_%d_%s.pdf' % (data, exp, s_sch, dim, hyp_key),
dpi=300, format='pdf', bbox_inches='tight'
)
fin2.savefig(
'plots/data_hyp/%s_%s_%s_%d_%s_p100.pdf' % (data, exp, s_sch, dim, hyp_key),
dpi=300, format='pdf', bbox_inches='tight'
)
fin1.clf()
fin2.clf()
def plot_hyp_data(hyp_keys, exp_param,
meths, data,
s_sch="u_rand",
dim=2):
for exp in exp_param:
df_all = pd.DataFrame()
for meth in meths:
try:
df = pd.read_hdf(
"intermediate/%s_%s_%s_%s_dim_%d_data_hyp.h5" % (data, meth, exp, s_sch, dim),
"df"
)
except:
print('%s_%s_%s_%s_dim_%d_data_hyp.h5 not found. Ignoring data set' % (data, meth, exp, s_sch, dim))
continue
# Check if experiment is in the dataframe
if expMap[exp] not in df:
continue
df["Method"] = m_name_l[meth]
# pdb.set_trace()
df_all = df_all.append(df).reset_index()
df_all = df_all.drop(['index'], axis=1)
if df_all.empty:
continue
df = df_all
col_names = df.columns
col_rename_d = {}
for col_name in col_names:
col_rename_d[col_name] = col_name.replace('_', '\ ')
df.rename(columns=col_rename_d, inplace=True)
for hyp_key in hyp_keys:
# hyp_key_ren = hyp_key.replace('_', '\ ')
df_trun = df[hyp_keys + ["Round Id", expMap[exp], expMap2[exp], "Method"]]
df_grouped = df_trun
rem_hyp_keys = list(set(hyp_keys) - {hyp_key})
val_lists = [df_grouped[r_k].unique() for r_k in rem_hyp_keys]
for hyp_vals in itertools.product(*val_lists):
hyp_dict = dict(zip(rem_hyp_keys, hyp_vals))
hyp_str = '_'.join("%s=%r" % (key,val) for (key,val) in hyp_dict.iteritems())
df_temp = df_grouped
for hyp_idx, hyp_val in enumerate(hyp_vals):
df_temp = df_temp[df_temp[rem_hyp_keys[hyp_idx]] == hyp_val]
if len(df_temp[hyp_key].unique()) < 3:
continue
print('Plotting %s: %s' % (exp, hyp_key))
ax = seaborn.tsplot(time=hyp_key, value=expMap[exp],
unit="Round Id", condition="Method",
data=df_temp)
hyp_values = df_grouped[hyp_key].unique()
l_diff = hyp_values[-1] - hyp_values[-2]
f_diff = hyp_values[1] - hyp_values[0]
l_f_diff_r = l_diff / f_diff
if l_f_diff_r > 1:
log_base = pow(l_f_diff_r, 1.0 / (len(hyp_values) - 2))
ax.set_xscale('log', basex=round(log_base))
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend()
plt.savefig(
'plots/data_hyp/%s_%s_%s_%d_%s.pdf' % (data, exp, s_sch, dim, hyp_str),
dpi=300, format='pdf', bbox_inches='tight'
)
plt.clf()
ax = seaborn.tsplot(time=hyp_key, value=expMap2[exp],
unit="Round Id", condition="Method",
data=df_temp)
hyp_values = df_grouped[hyp_key].unique()
l_diff = hyp_values[-1] - hyp_values[-2]
f_diff = hyp_values[1] - hyp_values[0]
l_f_diff_r = l_diff / f_diff
if l_f_diff_r > 1:
log_base = pow(l_f_diff_r, 1.0 / (len(hyp_values) - 2))
ax.set_xscale('log', basex=round(log_base))
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend()
plt.savefig(
'plots/data_hyp/%s_%s_%s_%d_%s_p_100.pdf' % (data, exp, s_sch, dim, hyp_str),
dpi=300, format='pdf', bbox_inches='tight'
)
plt.clf()
def plot_hyp(hyp_keys, exp_param, meth, data,
s_sch="u_rand"):
for exp in exp_param:
df = pd.read_hdf(
"intermediate/%s_%s_%s_%s_hyp.h5" % (data, meth, exp, s_sch),
"df"
)
col_names = df.columns
col_rename_d = {}
for col_name in col_names:
col_rename_d[col_name] = col_name.replace('_', '\ ')
df.rename(columns=col_rename_d, inplace=True)
for hyp_key in hyp_keys:
hyp_key_ren = hyp_key.replace('_', '\ ')
df_trun = df[[hyp_key_ren, "Round Id", expMap[exp]]]
try:
df_grouped = df_trun.groupby([hyp_key_ren, "Round Id"]).max().reset_index()
except TypeError:
df_trun[hyp_key_ren + "2"] = \
df_trun[hyp_key_ren].apply(lambda x: str(x))
df_trun[hyp_key_ren] = df_trun[hyp_key_ren + "2"].copy()
df_trun = df_trun.drop([hyp_key_ren + "2"], axis=1)
df_grouped = df_trun.groupby([hyp_key_ren, "Round Id"]).max().reset_index()
if len(df_grouped[hyp_key_ren].unique()) < 3:
continue
try:
print('Plotting %s: %s' % (exp, hyp_key))
ax = seaborn.tsplot(time=hyp_key_ren, value=expMap[exp],
unit="Round Id", data=df_grouped)
hyp_values = df_grouped[hyp_key_ren].unique()
l_diff = hyp_values[-1] - hyp_values[-2]
f_diff = hyp_values[1] - hyp_values[0]
l_f_diff_r = l_diff / f_diff
if l_f_diff_r > 1:
log_base = pow(l_f_diff_r, 1.0 / (len(hyp_values) - 2))
ax.set_xscale('log', basex=round(log_base))
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend()
except ValueError:
ax = seaborn.barplot(x=hyp_key_ren, y=expMap[exp], data=df_grouped)
except ZeroDivisionError:
print( 'Only 2 points provided to plot hyperparameters')
continue
plt.savefig(
'plots/hyp/%s_%s_%s_%s_%s.pdf' % (data, meth, exp, s_sch, hyp_key),
dpi=300, format='pdf', bbox_inches='tight'
)
plt.clf()
def plot_hyp_all(hyp_keys, exp_param, meth, data_sets,
s_sch="u_rand"):
for exp in exp_param:
df_all = pd.DataFrame()
for data in data_sets:
try:
df = pd.read_hdf(
"intermediate/%s_%s_%s_%s_hyp.h5" % (data, meth, exp, s_sch),
"df"
)
except:
print( '%s_%s_%s_%s_hyp.h5 not found. Ignoring data set' % (data, meth, exp, s_sch))
continue
# Check if experiment is in the dataframe
if expMap[exp] not in df:
continue
df["Data"] = data
# pdb.set_trace()
df_all = df_all.append(df).reset_index()
df_all = df_all.drop(['index'], axis=1)
if df_all.empty:
continue
col_names = df_all.columns
col_rename_d = {}
for col_name in col_names:
col_rename_d[col_name] = col_name.replace('_', '\ ')
df_all.rename(columns=col_rename_d, inplace=True)
for hyp_key in hyp_keys:
hyp_key_ren = hyp_key.replace('_', '\ ')
df_trun = df_all[[hyp_key_ren, "Round Id", expMap[exp], "Data"]]
try:
df_grouped = \
df_trun.groupby([hyp_key_ren, "Round Id", "Data"]).max().reset_index()
except TypeError:
df_trun[hyp_key_ren + "2"] = \
df_trun[hyp_key_ren].apply(lambda x: str(x))
df_trun[hyp_key_ren] = df_trun[hyp_key_ren + "2"].copy()
df_trun = df_trun.drop([hyp_key_ren + "2"], axis=1)
df_grouped = df_trun.groupby([hyp_key_ren, "Round Id", "Data"]).max().reset_index()
if len(df_grouped[df_grouped['Data'] == data_sets[0]][hyp_key_ren].unique()) < 3:
continue
try:
print ('Plotting %s: %s' % (exp, hyp_key))
if hyp_key_ren == 'inout\ p':
hyp_key_ren = 'q'
elif hyp_key_ren == 'ret\ p':
hyp_key_ren = 'p'
df_grouped.rename(columns={expMap[exp]: m_name_l[meth]}, inplace=True)
try:
df_grouped.rename(columns={'inout\ p': 'q'}, inplace=True)
except:
pass
try:
df_grouped.rename(columns={'ret\ p': 'p'}, inplace=True)
except:
pass
ax = seaborn.tsplot(time=hyp_key_ren, value=m_name_l[meth],
unit="Round Id", condition="Data",
data=df_grouped)
hyp_values = df_grouped[hyp_key_ren].unique()
l_diff = hyp_values[-1] - hyp_values[-2]
f_diff = hyp_values[1] - hyp_values[0]
l_f_diff_r = l_diff / f_diff
if l_f_diff_r > 1:
log_base = pow(l_f_diff_r, 1.0 / (len(hyp_values) - 2))
ax.set_xscale('log', basex=round(log_base))
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend()
except ValueError:
ax = seaborn.barplot(x="Data", y=m_name_l[meth],
hue=hyp_key_ren, data=df_grouped)
except ZeroDivisionError:
print('Only 2 points provided to plot hyperparameters')
continue
except:
pdb.set_trace()
plt.savefig(
'plots/hyp/%s_%s_%s_%s.pdf' % (meth, exp, s_sch, hyp_key),
dpi=300, format='pdf', bbox_inches='tight'
)
plt.clf()
def plot_p_at_k(res_pre, res_suffix, exp_type, m_names_f,
m_names, d_arr, n_rounds, save_fig_name, T_pred, length, testData, nm, epochs,
K=27089, plot_d=False, s_sch="u_rand"): #k=32768
log_K = int(np.log2(K)) + 1
num_k = log_K - 3
df_map = pd.DataFrame(
np.zeros((n_rounds * len(m_names) * len(d_arr) * T_pred, 5)),
columns=['d', 'Method', 'Round id', 'MAP', 't']
)
df_p_100 = pd.DataFrame(
np.zeros((n_rounds * len(m_names) * len(d_arr) * T_pred, 5)),
columns=['d', 'Method', 'Round id', 'P@100', 't']
)
df_p_100_idx = 0
df_map_idx = 0
MAP = [None] * len(d_arr)
for d_idx, d in enumerate(d_arr):
d = int(d)
df_prec = pd.DataFrame(
np.zeros((n_rounds * len(m_names) * num_k, 4)),
columns=['k', 'Method', 'Round id', 'precision@k']
)
df_idx = 0
MAP[d_idx] = [None] * len(m_names_f)
k_range = [2**i for i in range(3, log_K)]
p_at_k_ind = [2**i - 1 for i in range(3, log_K)]
for m_idx, method in enumerate(m_names_f):
try:
f = open('%s/%s/%s/nm%d_l%d_emb%d_%s%s' % (res_pre, testData, method, nm, length, d_arr[0],s_sch, res_suffix), 'rb')
except IOError:
print('file not found :%s/%s/%s/nm%d_l%d_emb%d_%s%s' % (res_pre, testData, method, nm, length, d_arr[0],s_sch, res_suffix))
continue
if exp_type == 'gr':
[_, _, MAP[d_idx][m_idx], prec_curv, _, _] = \
pickle.load(f)
else:
[MAP[d_idx][m_idx], prec_curv] = pickle.load(f)
try:
assert len(MAP[d_idx][m_idx]) >= T_pred-1
except:
# T_pred=len(MAP[d_idx][m_idx])
pdb.set_trace()
# continue
for rid in range(min(n_rounds, len(prec_curv[0]))):
for t in range(T_pred-1):
try:
p_at_k = np.array(prec_curv[t][rid][:K])
except:
pdb.set_trace()
if p_at_k.shape[0] == 0:
print('%s_%s_%d%s: Encountered missing precision curve' \
% (res_pre, method, d, res_suffix))
continue
df_map.loc[df_map_idx, 'd'] = d
df_map.loc[df_map_idx, 't'] = t
df_map.loc[df_map_idx, 'MAP'] = MAP[d_idx][m_idx][t][rid]
df_map.loc[df_map_idx, 'Method'] = m_names[m_idx]
df_map.loc[df_map_idx, 'Round id'] = rid
df_map_idx += 1
df_p_100.loc[df_p_100_idx, 'd'] = d
df_p_100.loc[df_p_100_idx, 't'] = t
df_p_100.loc[df_p_100_idx, 'P@100'] = p_at_k[100]
df_p_100.loc[df_p_100_idx, 'Method'] = m_names[m_idx]
df_p_100.loc[df_p_100_idx, 'Round id'] = rid
df_p_100_idx += 1
df_prec.loc[df_idx:df_idx + num_k - 1, 'k'] = k_range
df_prec.loc[df_idx:df_idx + num_k - 1, 'precision@k'] = \
p_at_k[p_at_k_ind]
df_prec.loc[df_idx:df_idx + num_k - 1, 'Method'] = \
m_names[m_idx]
df_prec.loc[df_idx:df_idx + num_k - 1, 'Round id'] = \
rid
df_idx += num_k
f.close()
if d == 32:
df_prec = df_prec[:df_idx]
# seaborn.FacetGrid.set(xticks=[2**i for i in range(3, log_K)])
# ax = seaborn.factorplot(x='k', y='precision@k',
# hue='Method', units='Round id',
# data=df_prec)
# pdb.set_trace()
plt.figure()
try:
ax = seaborn.tsplot(time='k', value='precision@k',
unit='Round id', condition='Method',
data=df_prec)
except:
pdb.set_trace()
# ax.set_xscale('log', basex=2)
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
# ax.legend_.remove()
plt.show()
# pdb.set_trace()
# return
plt.savefig('%s_d_%d.pdf' % (save_fig_name, d),
dpi=300, format='pdf', bbox_inches='tight')
plt.clf()
df_map = df_map[:df_map_idx]
ax = seaborn.tsplot(time='t', value='MAP',
unit='Round id', condition='Method',
data=df_map)
# ax = seaborn.barplot(x="Method", y="MAP", data=df_map)
plt.savefig('%s_d_%d_map.pdf' % (save_fig_name, d),
dpi=300, format='pdf', bbox_inches='tight')
plt.savefig('%s_d_%d_map.png' % (save_fig_name, d),
dpi=300, bbox_inches='tight')
plt.clf()
df_p_100 = df_p_100[:df_p_100_idx]
ax = seaborn.tsplot(time='t', value='P@100',
unit='Round id', condition='Method',
data=df_p_100)
# ax = seaborn.barplot(x="Method", y="MAP", data=df_p_100)
plt.savefig('%s_d_%d_p100.pdf' % (save_fig_name, d),
dpi=300, format='pdf', bbox_inches='tight')
plt.savefig('%s_d_%d_p100.png' % (save_fig_name, d),
dpi=300, bbox_inches='tight')
plt.clf()
if plot_d and len(d_arr) > 1:
df_map = df_map[:df_map_idx]
ax = seaborn.tsplot(time='d', value='MAP', unit='Round id',
condition='Method', data=df_map)
ax.set_xscale('log', basex=2)
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
# ax.legend_.remove()
ax.legend()
plt.savefig('%s_map.pdf' % save_fig_name,
dpi=300, format='pdf', bbox_inches='tight')
plt.savefig('%s_map.png' % save_fig_name,
dpi=300, bbox_inches='tight')
plt.clf()
df_p_100 = df_p_100[:df_p_100_idx]
ax = seaborn.tsplot(time='d', value='P@100', unit='Round id',
condition='Method', data=df_p_100)
ax.set_xscale('log', basex=2)
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
# ax.legend_.remove()
ax.legend()
plt.savefig('%s_p_100.pdf' % save_fig_name,
dpi=300, format='pdf', bbox_inches='tight')
plt.savefig('%s_p_100.png' % save_fig_name,
dpi=300, bbox_inches='tight')
plt.clf()
return MAP
def plot_F1(res_pre, res_suffix, exp_type,
m_names_f, m_names, d_arr, n_rounds,
save_fig_name, K=1024, plot_d=False):
df_f1_glob = pd.DataFrame(
np.zeros((n_rounds * len(m_names) * len(d_arr), 5)),
columns=['d', 'Method', 'Round id',
'Micro-F1 score', 'Macro-F1 score']
)
df_f1_glob_idx = 0
for d in d_arr:
d = int(d)
df = pd.DataFrame(np.zeros((n_rounds * len(m_names) * K, 5)),
columns=['Train ratio', 'Method', 'Round id',
'Micro-F1 score', 'Macro-F1 score'])
df_idx = 0
for idx, method in enumerate(m_names_f):
try:
with open('%s_%s_%d%s' % (res_pre, method, d, res_suffix), 'rb') as f:
[test_ratio_arr, micro, macro] = pickle.load(f)
n_xlabels = len(test_ratio_arr)
for round_id in range(min(n_rounds, len(micro))):
microF1 = micro[round_id]
macroF1 = macro[round_id]
df_f1_glob.loc[df_f1_glob_idx, 'd'] = d
df_f1_glob.loc[df_f1_glob_idx, 'Micro-F1 score'] = \
microF1[len(test_ratio_arr) // 2]
df_f1_glob.loc[df_f1_glob_idx, 'Macro-F1 score'] = \
macroF1[len(test_ratio_arr) // 2]
df_f1_glob.loc[df_f1_glob_idx, 'Method'] = m_names[idx]
df_f1_glob.loc[df_f1_glob_idx, 'Round id'] = round_id
df_f1_glob_idx += 1
df.loc[df_idx:df_idx + n_xlabels - 1, 'Train ratio'] = \
[(1.0 - test_r) for test_r in test_ratio_arr]
df.loc[df_idx:df_idx + n_xlabels - 1, 'Micro-F1 score'] = \
microF1
df.loc[df_idx:df_idx + n_xlabels - 1, 'Macro-F1 score'] = \
macroF1
df.loc[df_idx:df_idx + n_xlabels - 1, 'Method'] = \
m_names[idx]
df.loc[df_idx:df_idx + n_xlabels -
1, 'Round id'] = round_id
df_idx += n_xlabels
except IOError:
print('File %s_%s_%d%s not found. Ignoring it for NC plot' \
% (res_pre, method, d, res_suffix))
continue
if d == 32:
df = df[:df_idx]
ax = seaborn.tsplot(time='Train ratio', value='Micro-F1 score',
unit='Round id', condition='Method', data=df)
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend_.remove()
plt.savefig('%s_d_%d_micro.pdf' % (save_fig_name, d),
dpi=300, format='pdf', bbox_inches='tight')
plt.show()
plt.clf()
ax = seaborn.tsplot(time='Train ratio', value='Macro-F1 score',
unit='Round id', condition='Method', data=df)
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend_.remove()
plt.savefig('%s_d_%d_macro.pdf' % (save_fig_name, d),
dpi=300, format='pdf', bbox_inches='tight')
plt.show()
plt.clf()
if plot_d and len(d_arr) > 1:
df_f1_glob = df_f1_glob[:df_f1_glob_idx]
ax = seaborn.tsplot(time='d', value='Micro-F1 score',
unit='Round id', condition='Method',
data=df_f1_glob)
ax.set_xscale('log', basex=2)
marker = ["o", "s", "D", "^", "v", "8", "*", "p", "1", "h"]
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend_.remove()
plt.savefig('%s_micro.pdf' % (save_fig_name),
dpi=300, format='pdf', bbox_inches='tight')
plt.clf()
ax = seaborn.tsplot(time='d', value='Macro-F1 score',
unit='Round id', condition='Method',
data=df_f1_glob)
ax.set_xscale('log', basex=2)
for line_i in range(len(ax.lines)):
ax.lines[line_i].set_marker(marker[line_i])
# ax.grid()
ax.legend_.remove()
plt.savefig('%s_macro.pdf' % (save_fig_name),
dpi=300, format='pdf', bbox_inches='tight')
plt.clf()
def plotExpRes(res_pre, methods, exp,
d_arr, save_fig_pre,
n_rounds, plot_d, T_pred,length,
testData,nm=None,epochs=None,
samp_scheme="u_rand", K=1000):
# m_names = [m_name_l[meth] for meth in methods]
m_names=methods
map_gr = None
map_lp = None
if "gr" in exp:
map_gr = plot_p_at_k(res_pre, '.gr', 'gr',
methods, m_names, d_arr,
n_rounds, '%s_gr' % save_fig_pre, T_pred,
K=K, plot_d=plot_d,
s_sch=samp_scheme)
if "lp" in exp:
map_lp = plot_p_at_k(res_pre, '.lp',
'lp', methods, m_names, d_arr,
n_rounds, '%s_lp' % save_fig_pre, T_pred, length,testData, nm, epochs,
K=K, plot_d=plot_d,
s_sch=samp_scheme)
if "nc" in exp:
plot_F1(res_pre, '.nc', 'nc', methods, m_names,
d_arr, n_rounds, '%s_nc' % save_fig_pre,
K=K, plot_d=plot_d)
return map_gr, map_lp
if __name__=='__main__':
parser = ArgumentParser(description='Learns node embeddings for a sequence of graph snapshots')
parser.add_argument('-t', '--testDataType', default='sbm_cd', type=str,help='Type of data to test the code')
parser.add_argument('-l', '--timelength', default=7, type=int, help='Number of time series graph to generate')
parser.add_argument('-nm', '--nodemigration', default=10, type=int, help='number of nodes to migrate')
parser.add_argument('-emb', '--embeddimension', default=128, type=int, help='embedding dimension')
parser.add_argument('-rd', '--resultdir', type=str, default='./results_link_all', help="result directory name")
parser.add_argument('-sm', '--samples', default = 5000,type = int, help= 'samples for test data')
parser.add_argument('-iter', '--epochs', default=250, type=int, help='number of epochs')
args = parser.parse_args()
methods = ['incrementalSVD','rerunSVD','optimalSVD', 'dynTRIAD', 'staticAE']
method_d = [ 'dynAE', 'dynRNN', 'dynAERNN']
testData = ['sbm_cd']#, 'academic', 'hep', 'AS']
t_pred = args.timelength-args.timelength//2
plotExpRes(args.resultdir, methods,
'lp', [args.embeddimension],
'plots/%s' % (args.testDataType),
1, True, t_pred,args.timelength,
args.testDataType, args.nodemigration, args.epochs,"u_rand")
# python utils/plot_util.py -t sbm_cd -l 7 -nm 5 -iter 250 -emb 32 -rd /media/Data/graph-learning/dynamicgem/results_link_all
| [
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"numpy.array",
"matplotlib.rc",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"itertools.product",
"numpy.unravel_index",
"pandas.DataFrame",
"pandas.read_hdf",
"matplotlib.pyplot.savefig",
"matplotlib.use",
... | [((441, 465), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (443, 465), False, 'from matplotlib import rc\n'), ((466, 491), 'matplotlib.rc', 'rc', (['"""font"""'], {'weight': '"""bold"""'}), "('font', weight='bold')\n", (468, 491), False, 'from matplotlib import rc\n'), ((492, 511), 'matplotlib.rc', 'rc', (['"""font"""'], {'size': '(20)'}), "('font', size=20)\n", (494, 511), False, 'from matplotlib import rc\n'), ((512, 538), 'matplotlib.rc', 'rc', (['"""lines"""'], {'markersize': '(10)'}), "('lines', markersize=10)\n", (514, 538), False, 'from matplotlib import rc\n'), ((539, 564), 'matplotlib.rc', 'rc', (['"""xtick"""'], {'labelsize': '(12)'}), "('xtick', labelsize=12)\n", (541, 564), False, 'from matplotlib import rc\n'), ((565, 590), 'matplotlib.rc', 'rc', (['"""ytick"""'], {'labelsize': '(12)'}), "('ytick', labelsize=12)\n", (567, 590), False, 'from matplotlib import rc\n'), ((591, 622), 'matplotlib.rc', 'rc', (['"""axes"""'], {'labelsize': '"""x-large"""'}), "('axes', labelsize='x-large')\n", (593, 622), False, 'from matplotlib import rc\n'), ((623, 653), 'matplotlib.rc', 'rc', (['"""axes"""'], {'labelweight': '"""bold"""'}), "('axes', labelweight='bold')\n", (625, 653), False, 'from matplotlib import rc\n'), ((654, 685), 'matplotlib.rc', 'rc', (['"""axes"""'], {'titlesize': '"""x-large"""'}), "('axes', titlesize='x-large')\n", (656, 685), False, 'from matplotlib import rc\n'), ((686, 709), 'matplotlib.rc', 'rc', (['"""axes"""'], {'linewidth': '(3)'}), "('axes', linewidth=3)\n", (688, 709), False, 'from matplotlib import rc\n'), ((710, 732), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (716, 732), True, 'import matplotlib.pyplot as plt\n'), ((733, 762), 'seaborn.set_style', 'seaborn.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (750, 762), False, 'import seaborn\n'), ((143, 164), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (157, 164), False, 'import matplotlib\n'), ((1380, 1441), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16, 'font.weight': 'bold'}"], {}), "({'font.size': 16, 'font.weight': 'bold'})\n", (1399, 1441), True, 'import matplotlib.pyplot as plt\n'), ((1571, 1588), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_n'], {}), '(fig_n)\n', (1581, 1588), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2273), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_save_path'], {}), '(file_save_path)\n', (2257, 2273), True, 'import matplotlib.pyplot as plt\n'), ((3672, 3695), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (3674, 3695), False, 'from matplotlib import rc\n'), ((3700, 3725), 'matplotlib.rc', 'rc', (['"""font"""'], {'weight': '"""bold"""'}), "('font', weight='bold')\n", (3702, 3725), False, 'from matplotlib import rc\n'), ((3730, 3748), 'matplotlib.rc', 'rc', (['"""font"""'], {'size': '(8)'}), "('font', size=8)\n", (3732, 3748), False, 'from matplotlib import rc\n'), ((3753, 3780), 'matplotlib.rc', 'rc', (['"""lines"""'], {'markersize': '(2.5)'}), "('lines', markersize=2.5)\n", (3755, 3780), False, 'from matplotlib import rc\n'), ((3785, 3811), 'matplotlib.rc', 'rc', (['"""lines"""'], {'linewidth': '(0.5)'}), "('lines', linewidth=0.5)\n", (3787, 3811), False, 'from matplotlib import rc\n'), ((3816, 3840), 'matplotlib.rc', 'rc', (['"""xtick"""'], {'labelsize': '(6)'}), "('xtick', labelsize=6)\n", (3818, 3840), False, 'from matplotlib import rc\n'), ((3845, 3869), 'matplotlib.rc', 'rc', (['"""ytick"""'], {'labelsize': '(6)'}), "('ytick', labelsize=6)\n", (3847, 3869), False, 'from matplotlib import rc\n'), ((3874, 3903), 'matplotlib.rc', 'rc', (['"""axes"""'], {'labelsize': '"""small"""'}), "('axes', labelsize='small')\n", (3876, 3903), False, 'from matplotlib import rc\n'), ((3908, 3938), 'matplotlib.rc', 'rc', (['"""axes"""'], {'labelweight': '"""bold"""'}), "('axes', labelweight='bold')\n", (3910, 3938), False, 'from matplotlib import rc\n'), ((3943, 3972), 'matplotlib.rc', 'rc', (['"""axes"""'], {'titlesize': '"""small"""'}), "('axes', titlesize='small')\n", (3945, 3972), False, 'from matplotlib import rc\n'), ((3977, 4000), 'matplotlib.rc', 'rc', (['"""axes"""'], {'linewidth': '(1)'}), "('axes', linewidth=1)\n", (3979, 4000), False, 'from matplotlib import rc\n'), ((4005, 4027), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (4011, 4027), True, 'import matplotlib.pyplot as plt\n'), ((4032, 4061), 'seaborn.set_style', 'seaborn.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (4049, 4061), False, 'import seaborn\n'), ((34287, 34378), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Learns node embeddings for a sequence of graph snapshots"""'}), "(description=\n 'Learns node embeddings for a sequence of graph snapshots')\n", (34301, 34378), False, 'from argparse import ArgumentParser\n'), ((1999, 2048), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_lab'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "(x_lab, fontsize=16, fontweight='bold')\n", (2009, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2057, 2106), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_lab'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "(y_lab, fontsize=16, fontweight='bold')\n", (2067, 2106), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2163), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "(title, fontsize=16, fontweight='bold')\n", (2124, 2163), True, 'import matplotlib.pyplot as plt\n'), ((2193, 2241), 'matplotlib.pyplot.legend', 'plt.legend', (['[s[0] for s in series]', 'legendLabels'], {}), '([s[0] for s in series], legendLabels)\n', (2203, 2241), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2303, 2305), True, 'import matplotlib.pyplot as plt\n'), ((4105, 4119), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4117, 4119), True, 'import pandas as pd\n'), ((10962, 10976), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10974, 10976), True, 'import pandas as pd\n'), ((15028, 15107), 'pandas.read_hdf', 'pd.read_hdf', (["('intermediate/%s_%s_%s_%s_hyp.h5' % (data, meth, exp, s_sch))", '"""df"""'], {}), "('intermediate/%s_%s_%s_%s_hyp.h5' % (data, meth, exp, s_sch), 'df')\n", (15039, 15107), True, 'import pandas as pd\n'), ((17520, 17534), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (17532, 17534), True, 'import pandas as pd\n'), ((27050, 27141), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': '"""d"""', 'value': '"""MAP"""', 'unit': '"""Round id"""', 'condition': '"""Method"""', 'data': 'df_map'}), "(time='d', value='MAP', unit='Round id', condition='Method',\n data=df_map)\n", (27064, 27141), False, 'import seaborn\n'), ((27450, 27539), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_map.pdf' % save_fig_name)"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('%s_map.pdf' % save_fig_name, dpi=300, format='pdf',\n bbox_inches='tight')\n", (27461, 27539), True, 'import matplotlib.pyplot as plt\n'), ((27564, 27635), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_map.png' % save_fig_name)"], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('%s_map.png' % save_fig_name, dpi=300, bbox_inches='tight')\n", (27575, 27635), True, 'import matplotlib.pyplot as plt\n'), ((27664, 27673), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (27671, 27673), True, 'import matplotlib.pyplot as plt\n'), ((27730, 27825), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': '"""d"""', 'value': '"""P@100"""', 'unit': '"""Round id"""', 'condition': '"""Method"""', 'data': 'df_p_100'}), "(time='d', value='P@100', unit='Round id', condition='Method',\n data=df_p_100)\n", (27744, 27825), False, 'import seaborn\n'), ((28134, 28225), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_p_100.pdf' % save_fig_name)"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('%s_p_100.pdf' % save_fig_name, dpi=300, format='pdf',\n bbox_inches='tight')\n", (28145, 28225), True, 'import matplotlib.pyplot as plt\n'), ((28250, 28323), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_p_100.png' % save_fig_name)"], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('%s_p_100.png' % save_fig_name, dpi=300, bbox_inches='tight')\n", (28261, 28323), True, 'import matplotlib.pyplot as plt\n'), ((28352, 28361), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (28359, 28361), True, 'import matplotlib.pyplot as plt\n'), ((32070, 32177), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': '"""d"""', 'value': '"""Micro-F1 score"""', 'unit': '"""Round id"""', 'condition': '"""Method"""', 'data': 'df_f1_glob'}), "(time='d', value='Micro-F1 score', unit='Round id', condition\n ='Method', data=df_f1_glob)\n", (32084, 32177), False, 'import seaborn\n'), ((32491, 32582), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_micro.pdf' % save_fig_name)"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('%s_micro.pdf' % save_fig_name, dpi=300, format='pdf',\n bbox_inches='tight')\n", (32502, 32582), True, 'import matplotlib.pyplot as plt\n'), ((32609, 32618), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (32616, 32618), True, 'import matplotlib.pyplot as plt\n'), ((32632, 32739), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': '"""d"""', 'value': '"""Macro-F1 score"""', 'unit': '"""Round id"""', 'condition': '"""Method"""', 'data': 'df_f1_glob'}), "(time='d', value='Macro-F1 score', unit='Round id', condition\n ='Method', data=df_f1_glob)\n", (32646, 32739), False, 'import seaborn\n'), ((32985, 33076), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_macro.pdf' % save_fig_name)"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('%s_macro.pdf' % save_fig_name, dpi=300, format='pdf',\n bbox_inches='tight')\n", (32996, 33076), True, 'import matplotlib.pyplot as plt\n'), ((33103, 33112), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (33110, 33112), True, 'import matplotlib.pyplot as plt\n'), ((1159, 1191), 'matplotlib.colors.cnames.items', 'matplotlib.colors.cnames.items', ([], {}), '()\n', (1189, 1191), False, 'import matplotlib\n'), ((1850, 1927), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': 'colors[i]', 'linewidth': '(2)', 'marker': 'markers[i]', 'markersize': '(8)'}), '(x, y, color=colors[i], linewidth=2, marker=markers[i], markersize=8)\n', (1858, 1927), True, 'import matplotlib.pyplot as plt\n'), ((5672, 5722), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'n_cols'], {'figsize': 'figsize_d[n_cols]'}), '(1, n_cols, figsize=figsize_d[n_cols])\n', (5684, 5722), True, 'import matplotlib.pyplot as plt\n'), ((5752, 5802), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'n_cols'], {'figsize': 'figsize_d[n_cols]'}), '(1, n_cols, figsize=figsize_d[n_cols])\n', (5764, 5802), True, 'import matplotlib.pyplot as plt\n'), ((12300, 12329), 'itertools.product', 'itertools.product', (['*val_lists'], {}), '(*val_lists)\n', (12317, 12329), False, 'import itertools\n'), ((17194, 17321), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/hyp/%s_%s_%s_%s_%s.pdf' % (data, meth, exp, s_sch, hyp_key))"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('plots/hyp/%s_%s_%s_%s_%s.pdf' % (data, meth, exp, s_sch,\n hyp_key), dpi=300, format='pdf', bbox_inches='tight')\n", (17205, 17321), True, 'import matplotlib.pyplot as plt\n'), ((17376, 17385), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (17383, 17385), True, 'import matplotlib.pyplot as plt\n'), ((21074, 21193), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/hyp/%s_%s_%s_%s.pdf' % (meth, exp, s_sch, hyp_key))"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('plots/hyp/%s_%s_%s_%s.pdf' % (meth, exp, s_sch, hyp_key), dpi=\n 300, format='pdf', bbox_inches='tight')\n", (21085, 21193), True, 'import matplotlib.pyplot as plt\n'), ((21247, 21256), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (21254, 21256), True, 'import matplotlib.pyplot as plt\n'), ((21493, 21503), 'numpy.log2', 'np.log2', (['K'], {}), '(K)\n', (21500, 21503), True, 'import numpy as np\n'), ((25117, 25129), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25127, 25129), True, 'import matplotlib.pyplot as plt\n'), ((25683, 25693), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25691, 25693), True, 'import matplotlib.pyplot as plt\n'), ((25757, 25852), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_d_%d.pdf' % (save_fig_name, d))"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('%s_d_%d.pdf' % (save_fig_name, d), dpi=300, format='pdf',\n bbox_inches='tight')\n", (25768, 25852), True, 'import matplotlib.pyplot as plt\n'), ((25885, 25894), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (25892, 25894), True, 'import matplotlib.pyplot as plt\n'), ((25954, 26045), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': '"""t"""', 'value': '"""MAP"""', 'unit': '"""Round id"""', 'condition': '"""Method"""', 'data': 'df_map'}), "(time='t', value='MAP', unit='Round id', condition='Method',\n data=df_map)\n", (25968, 26045), False, 'import seaborn\n'), ((26163, 26262), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_d_%d_map.pdf' % (save_fig_name, d))"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('%s_d_%d_map.pdf' % (save_fig_name, d), dpi=300, format='pdf',\n bbox_inches='tight')\n", (26174, 26262), True, 'import matplotlib.pyplot as plt\n'), ((26295, 26381), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_d_%d_map.png' % (save_fig_name, d))"], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('%s_d_%d_map.png' % (save_fig_name, d), dpi=300, bbox_inches=\n 'tight')\n", (26306, 26381), True, 'import matplotlib.pyplot as plt\n'), ((26413, 26422), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (26420, 26422), True, 'import matplotlib.pyplot as plt\n'), ((26488, 26583), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': '"""t"""', 'value': '"""P@100"""', 'unit': '"""Round id"""', 'condition': '"""Method"""', 'data': 'df_p_100'}), "(time='t', value='P@100', unit='Round id', condition='Method',\n data=df_p_100)\n", (26502, 26583), False, 'import seaborn\n'), ((26703, 26803), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_d_%d_p100.pdf' % (save_fig_name, d))"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('%s_d_%d_p100.pdf' % (save_fig_name, d), dpi=300, format='pdf',\n bbox_inches='tight')\n", (26714, 26803), True, 'import matplotlib.pyplot as plt\n'), ((26836, 26923), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_d_%d_p100.png' % (save_fig_name, d))"], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('%s_d_%d_p100.png' % (save_fig_name, d), dpi=300, bbox_inches=\n 'tight')\n", (26847, 26923), True, 'import matplotlib.pyplot as plt\n'), ((26955, 26964), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (26962, 26964), True, 'import matplotlib.pyplot as plt\n'), ((30924, 31032), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': '"""Train ratio"""', 'value': '"""Micro-F1 score"""', 'unit': '"""Round id"""', 'condition': '"""Method"""', 'data': 'df'}), "(time='Train ratio', value='Micro-F1 score', unit='Round id',\n condition='Method', data=df)\n", (30938, 31032), False, 'import seaborn\n'), ((31309, 31410), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_d_%d_micro.pdf' % (save_fig_name, d))"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('%s_d_%d_micro.pdf' % (save_fig_name, d), dpi=300, format='pdf',\n bbox_inches='tight')\n", (31320, 31410), True, 'import matplotlib.pyplot as plt\n'), ((31443, 31453), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31451, 31453), True, 'import matplotlib.pyplot as plt\n'), ((31466, 31475), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (31473, 31475), True, 'import matplotlib.pyplot as plt\n'), ((31493, 31601), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': '"""Train ratio"""', 'value': '"""Macro-F1 score"""', 'unit': '"""Round id"""', 'condition': '"""Method"""', 'data': 'df'}), "(time='Train ratio', value='Macro-F1 score', unit='Round id',\n condition='Method', data=df)\n", (31507, 31601), False, 'import seaborn\n'), ((31806, 31907), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_d_%d_macro.pdf' % (save_fig_name, d))"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('%s_d_%d_macro.pdf' % (save_fig_name, d), dpi=300, format='pdf',\n bbox_inches='tight')\n", (31817, 31907), True, 'import matplotlib.pyplot as plt\n'), ((31940, 31950), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31948, 31950), True, 'import matplotlib.pyplot as plt\n'), ((31963, 31972), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (31970, 31972), True, 'import matplotlib.pyplot as plt\n'), ((4205, 4305), 'pandas.read_hdf', 'pd.read_hdf', (["('intermediate/%s_%s_%s_%s_dim_%d_data_hyp.h5' % (data, meth, exp, s_sch, dim))", '"""df"""'], {}), "('intermediate/%s_%s_%s_%s_dim_%d_data_hyp.h5' % (data, meth,\n exp, s_sch, dim), 'df')\n", (4216, 4305), True, 'import pandas as pd\n'), ((5850, 5879), 'itertools.product', 'itertools.product', (['*val_lists'], {}), '(*val_lists)\n', (5867, 5879), False, 'import itertools\n'), ((5909, 5946), 'numpy.unravel_index', 'np.unravel_index', (['plt_idx', 'plot_shape'], {}), '(plt_idx, plot_shape)\n', (5925, 5946), True, 'import numpy as np\n'), ((11042, 11142), 'pandas.read_hdf', 'pd.read_hdf', (["('intermediate/%s_%s_%s_%s_dim_%d_data_hyp.h5' % (data, meth, exp, s_sch, dim))", '"""df"""'], {}), "('intermediate/%s_%s_%s_%s_dim_%d_data_hyp.h5' % (data, meth,\n exp, s_sch, dim), 'df')\n", (11053, 11142), True, 'import pandas as pd\n'), ((12841, 12944), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': 'hyp_key', 'value': 'expMap[exp]', 'unit': '"""Round Id"""', 'condition': '"""Method"""', 'data': 'df_temp'}), "(time=hyp_key, value=expMap[exp], unit='Round Id', condition=\n 'Method', data=df_temp)\n", (12855, 12944), False, 'import seaborn\n'), ((13666, 13797), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/data_hyp/%s_%s_%s_%d_%s.pdf' % (data, exp, s_sch, dim, hyp_str))"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('plots/data_hyp/%s_%s_%s_%d_%s.pdf' % (data, exp, s_sch, dim,\n hyp_str), dpi=300, format='pdf', bbox_inches='tight')\n", (13677, 13797), True, 'import matplotlib.pyplot as plt\n'), ((13868, 13877), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (13875, 13877), True, 'import matplotlib.pyplot as plt\n'), ((13899, 14003), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': 'hyp_key', 'value': 'expMap2[exp]', 'unit': '"""Round Id"""', 'condition': '"""Method"""', 'data': 'df_temp'}), "(time=hyp_key, value=expMap2[exp], unit='Round Id', condition\n ='Method', data=df_temp)\n", (13913, 14003), False, 'import seaborn\n'), ((14693, 14830), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/data_hyp/%s_%s_%s_%d_%s_p_100.pdf' % (data, exp, s_sch, dim, hyp_str))"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('plots/data_hyp/%s_%s_%s_%d_%s_p_100.pdf' % (data, exp, s_sch,\n dim, hyp_str), dpi=300, format='pdf', bbox_inches='tight')\n", (14704, 14830), True, 'import matplotlib.pyplot as plt\n'), ((14901, 14910), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (14908, 14910), True, 'import matplotlib.pyplot as plt\n'), ((16167, 16257), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': 'hyp_key_ren', 'value': 'expMap[exp]', 'unit': '"""Round Id"""', 'data': 'df_grouped'}), "(time=hyp_key_ren, value=expMap[exp], unit='Round Id', data=\n df_grouped)\n", (16181, 16257), False, 'import seaborn\n'), ((17604, 17683), 'pandas.read_hdf', 'pd.read_hdf', (["('intermediate/%s_%s_%s_%s_hyp.h5' % (data, meth, exp, s_sch))", '"""df"""'], {}), "('intermediate/%s_%s_%s_%s_hyp.h5' % (data, meth, exp, s_sch), 'df')\n", (17615, 17683), True, 'import pandas as pd\n'), ((19887, 19997), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': 'hyp_key_ren', 'value': 'm_name_l[meth]', 'unit': '"""Round Id"""', 'condition': '"""Data"""', 'data': 'df_grouped'}), "(time=hyp_key_ren, value=m_name_l[meth], unit='Round Id',\n condition='Data', data=df_grouped)\n", (19901, 19997), False, 'import seaborn\n'), ((22827, 22841), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (22838, 22841), False, 'import pickle\n'), ((22909, 22923), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (22920, 22923), False, 'import pickle\n'), ((25168, 25269), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': '"""k"""', 'value': '"""precision@k"""', 'unit': '"""Round id"""', 'condition': '"""Method"""', 'data': 'df_prec'}), "(time='k', value='precision@k', unit='Round id', condition=\n 'Method', data=df_prec)\n", (25182, 25269), False, 'import seaborn\n'), ((5495, 5524), 'itertools.product', 'itertools.product', (['*val_lists'], {}), '(*val_lists)\n', (5512, 5524), False, 'import itertools\n'), ((6559, 6701), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': 'hyp_key', 'value': 'expMap[exp]', 'unit': '"""Round Id"""', 'condition': '"""Method"""', 'data': 'df_temp', 'ax': 'axarray1[plot_idx[0], plot_idx[1]]'}), "(time=hyp_key, value=expMap[exp], unit='Round Id', condition=\n 'Method', data=df_temp, ax=axarray1[plot_idx[0], plot_idx[1]])\n", (6573, 6701), False, 'import seaborn\n'), ((8227, 8370), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': 'hyp_key', 'value': 'expMap2[exp]', 'unit': '"""Round Id"""', 'condition': '"""Method"""', 'data': 'df_temp', 'ax': 'axarray2[plot_idx[0], plot_idx[1]]'}), "(time=hyp_key, value=expMap2[exp], unit='Round Id', condition\n ='Method', data=df_temp, ax=axarray2[plot_idx[0], plot_idx[1]])\n", (8241, 8370), False, 'import seaborn\n'), ((16983, 17045), 'seaborn.barplot', 'seaborn.barplot', ([], {'x': 'hyp_key_ren', 'y': 'expMap[exp]', 'data': 'df_grouped'}), '(x=hyp_key_ren, y=expMap[exp], data=df_grouped)\n', (16998, 17045), False, 'import seaborn\n'), ((20760, 20837), 'seaborn.barplot', 'seaborn.barplot', ([], {'x': '"""Data"""', 'y': 'm_name_l[meth]', 'hue': 'hyp_key_ren', 'data': 'df_grouped'}), "(x='Data', y=m_name_l[meth], hue=hyp_key_ren, data=df_grouped)\n", (20775, 20837), False, 'import seaborn\n'), ((21046, 21061), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (21059, 21061), False, 'import pdb\n'), ((25373, 25388), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (25386, 25388), False, 'import pdb\n'), ((29217, 29231), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (29228, 29231), False, 'import pickle\n'), ((8706, 8832), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': 'hyp_key', 'value': 'expMap2[exp]', 'unit': '"""Round Id"""', 'condition': '"""Method"""', 'data': 'df_temp', 'ax': 'axarray2[plt_idx]'}), "(time=hyp_key, value=expMap2[exp], unit='Round Id', condition\n ='Method', data=df_temp, ax=axarray2[plt_idx])\n", (8720, 8832), False, 'import seaborn\n'), ((23104, 23119), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (23117, 23119), False, 'import pdb\n'), ((23328, 23359), 'numpy.array', 'np.array', (['prec_curv[t][rid][:K]'], {}), '(prec_curv[t][rid][:K])\n', (23336, 23359), True, 'import numpy as np\n'), ((7066, 7191), 'seaborn.tsplot', 'seaborn.tsplot', ([], {'time': 'hyp_key', 'value': 'expMap[exp]', 'unit': '"""Round Id"""', 'condition': '"""Method"""', 'data': 'df_temp', 'ax': 'axarray1[plt_idx]'}), "(time=hyp_key, value=expMap[exp], unit='Round Id', condition=\n 'Method', data=df_temp, ax=axarray1[plt_idx])\n", (7080, 7191), False, 'import seaborn\n'), ((23412, 23427), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (23425, 23427), False, 'import pdb\n'), ((7406, 7421), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7419, 7421), False, 'import pdb\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
export model
"""
import os
import numpy as np
from mindspore import Tensor, context, export, load_checkpoint, load_param_into_net
from src.config import parse_args
from src.models.StackedHourglassNet import StackedHourglassNet
args = parse_args()
if __name__ == "__main__":
if not os.path.exists(args.ckpt_file):
print("ckpt file not valid")
exit()
# Set context mode
if args.context_mode == "GRAPH":
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, save_graphs=False)
else:
context.set_context(mode=context.PYNATIVE_MODE, device_target=args.device_target)
# Import net
net = StackedHourglassNet(args.nstack, args.inp_dim, args.oup_dim)
param_dict = load_checkpoint(args.ckpt_file)
load_param_into_net(net, param_dict)
input_arr = Tensor(np.zeros([args.batch_size, args.input_res, args.input_res, 3], np.float32))
export(net, input_arr, file_name=args.file_name, file_format=args.file_format)
| [
"os.path.exists",
"mindspore.export",
"src.config.parse_args",
"mindspore.context.set_context",
"numpy.zeros",
"mindspore.load_checkpoint",
"mindspore.load_param_into_net",
"src.models.StackedHourglassNet.StackedHourglassNet"
] | [((908, 920), 'src.config.parse_args', 'parse_args', ([], {}), '()\n', (918, 920), False, 'from src.config import parse_args\n'), ((1339, 1399), 'src.models.StackedHourglassNet.StackedHourglassNet', 'StackedHourglassNet', (['args.nstack', 'args.inp_dim', 'args.oup_dim'], {}), '(args.nstack, args.inp_dim, args.oup_dim)\n', (1358, 1399), False, 'from src.models.StackedHourglassNet import StackedHourglassNet\n'), ((1417, 1448), 'mindspore.load_checkpoint', 'load_checkpoint', (['args.ckpt_file'], {}), '(args.ckpt_file)\n', (1432, 1448), False, 'from mindspore import Tensor, context, export, load_checkpoint, load_param_into_net\n'), ((1453, 1489), 'mindspore.load_param_into_net', 'load_param_into_net', (['net', 'param_dict'], {}), '(net, param_dict)\n', (1472, 1489), False, 'from mindspore import Tensor, context, export, load_checkpoint, load_param_into_net\n'), ((1594, 1672), 'mindspore.export', 'export', (['net', 'input_arr'], {'file_name': 'args.file_name', 'file_format': 'args.file_format'}), '(net, input_arr, file_name=args.file_name, file_format=args.file_format)\n', (1600, 1672), False, 'from mindspore import Tensor, context, export, load_checkpoint, load_param_into_net\n'), ((960, 990), 'os.path.exists', 'os.path.exists', (['args.ckpt_file'], {}), '(args.ckpt_file)\n', (974, 990), False, 'import os\n'), ((1113, 1215), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'args.device_target', 'save_graphs': '(False)'}), '(mode=context.GRAPH_MODE, device_target=args.\n device_target, save_graphs=False)\n', (1132, 1215), False, 'from mindspore import Tensor, context, export, load_checkpoint, load_param_into_net\n'), ((1229, 1315), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.PYNATIVE_MODE', 'device_target': 'args.device_target'}), '(mode=context.PYNATIVE_MODE, device_target=args.\n device_target)\n', (1248, 1315), False, 'from mindspore import Tensor, context, export, load_checkpoint, load_param_into_net\n'), ((1514, 1588), 'numpy.zeros', 'np.zeros', (['[args.batch_size, args.input_res, args.input_res, 3]', 'np.float32'], {}), '([args.batch_size, args.input_res, args.input_res, 3], np.float32)\n', (1522, 1588), True, 'import numpy as np\n')] |
from ..theming.Theme import Theme
from ..theming.SimpleColorScale import SimpleColorScale
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import numpy as np
def figure(values, theme: Theme, color_scale: SimpleColorScale):
""" Show a map of convolutional filters """
if values.shape[2] < 12:
num_maps = values.shape[2]
title = 'Filters'
values_to_show = values
titles = [str(i) for i in range(num_maps)]
else:
num_maps = 12
title = 'Filters (top %d out of %d)' % (num_maps, values.shape[2])
values_var = np.var(values.reshape(-1, values.shape[2]), axis=0)
topn_idx = np.argpartition(values_var, -num_maps, axis=0)[-1:-(num_maps+1):-1]
values_to_show = values[:, :, topn_idx]
titles = [str(i) for i in topn_idx]
# Number of columns on map depending on the kernel size
if values_to_show.shape[1] < 4:
num_cols = 3
elif values_to_show.shape[1] < 6:
num_cols = 2
else:
num_cols = 1
num_rows = max(num_cols, int(np.ceil(num_maps / num_cols)))
fig = make_subplots(rows=num_rows, cols=num_cols, subplot_titles=titles,
shared_xaxes=True, shared_yaxes=True,
horizontal_spacing=0.02, vertical_spacing=0.06)
# Draw filters as subplots
for i in range(num_maps):
fig.add_trace(go.Heatmap(z=values_to_show[:, :, i], coloraxis="coloraxis"),
row=(i // num_cols) + 1, col=(i % num_cols) + 1)
fig.update_layout(margin=theme.bottom_figure_margins,
title=dict(text=title, font=dict(size=14)),
coloraxis=color_scale.as_dict(),
template=theme.plotly,
font=dict(size=12))
return fig
| [
"numpy.ceil",
"plotly.graph_objects.Heatmap",
"plotly.subplots.make_subplots",
"numpy.argpartition"
] | [((1119, 1279), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': 'num_rows', 'cols': 'num_cols', 'subplot_titles': 'titles', 'shared_xaxes': '(True)', 'shared_yaxes': '(True)', 'horizontal_spacing': '(0.02)', 'vertical_spacing': '(0.06)'}), '(rows=num_rows, cols=num_cols, subplot_titles=titles,\n shared_xaxes=True, shared_yaxes=True, horizontal_spacing=0.02,\n vertical_spacing=0.06)\n', (1132, 1279), False, 'from plotly.subplots import make_subplots\n'), ((674, 720), 'numpy.argpartition', 'np.argpartition', (['values_var', '(-num_maps)'], {'axis': '(0)'}), '(values_var, -num_maps, axis=0)\n', (689, 720), True, 'import numpy as np\n'), ((1075, 1103), 'numpy.ceil', 'np.ceil', (['(num_maps / num_cols)'], {}), '(num_maps / num_cols)\n', (1082, 1103), True, 'import numpy as np\n'), ((1404, 1464), 'plotly.graph_objects.Heatmap', 'go.Heatmap', ([], {'z': 'values_to_show[:, :, i]', 'coloraxis': '"""coloraxis"""'}), "(z=values_to_show[:, :, i], coloraxis='coloraxis')\n", (1414, 1464), True, 'import plotly.graph_objects as go\n')] |
from __future__ import print_function
import keras.backend as K
import keras.losses as losses
import keras.optimizers as optimizers
import numpy as np
from keras.callbacks import ModelCheckpoint
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input, RepeatVector, Reshape
from keras.layers.embeddings import Embedding
from keras.layers.merge import Concatenate, Multiply
from keras.losses import binary_crossentropy
from keras.models import Model, Sequential
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import Adam
from matplotlib import pyplot as plt
from .callbacks import *
from .pretrain_image_gan import *
from .planner import *
class ConditionalImageGan(PretrainImageGan):
'''
Version of the sampler that only produces results conditioned on a
particular action; this version does not bother trying to learn a separate
distribution for each possible state.
This one generates:
- image
- arm command
- gripper command
'''
def __init__(self, *args, **kwargs):
'''
As in the other models, we call super() to parse arguments from the
command line and set things like our optimizer and learning rate.
Parameters:
-----------
taskdef: definition of the problem used to create a task model
'''
super(ConditionalImageGan, self).__init__(*args, **kwargs)
self.PredictorCb = ImageWithFirstCb
self.rep_size = 256
self.num_transforms = 3
self.do_all = True
self.save_encoder_decoder = self.retrain
self.noise_iters = 2
def _makePredictor(self, features):
# =====================================================================
# Create many different image decoders
(images, arm, gripper) = features
img_shape, image_size, arm_size, gripper_size = self._sizes(
images,
arm,
gripper)
# =====================================================================
# Load the image decoders
img_in = Input(img_shape,name="predictor_img_in")
img0_in = Input(img_shape,name="predictor_img0_in")
arm_in = Input((arm_size,))
gripper_in = Input((gripper_size,))
arm_gripper = Concatenate()([arm_in, gripper_in])
label_in = Input((1,))
next_option_in = Input((1,), name="next_option_in")
next_option2_in = Input((1,), name="next_option2_in")
ins = [img0_in, img_in, next_option_in, next_option2_in]
encoder = self._makeImageEncoder(img_shape, perm_drop=True)
decoder = self._makeImageDecoder(self.hidden_shape, perm_drop=True)
LoadEncoderWeights(self, encoder, decoder, gan=True)
# create input for controlling noise output if that's what we decide
# that we want to do
if self.use_noise:
z1 = Input((self.noise_dim,), name="z1_in")
z2 = Input((self.noise_dim,), name="z2_in")
ins += [z1, z2]
h = encoder([img0_in, img_in])
# =====================================================================
# Actually get the right outputs
y = Flatten()(OneHot(self.num_options)(next_option_in))
y2 = Flatten()(OneHot(self.num_options)(next_option2_in))
x = h
tform = self._makeTransform(perm_drop=True)
l = [h, y, z1] if self.use_noise else [h, y]
x = tform(l)
l = [x, y2, z2] if self.use_noise else [x, y2]
x2 = tform(l)
image_out, image_out2 = decoder([x]), decoder([x2])
# =====================================================================
# Save
self.transform_model = tform
# =====================================================================
# Make the discriminator
image_discriminator = self._makeImageDiscriminator(img_shape)
self.discriminator = image_discriminator
image_discriminator.trainable = False
is_fake = image_discriminator([
img0_in, img_in,
next_option_in, next_option2_in,
image_out, image_out2])
# =====================================================================
# Create generator model to train
lfn = self.loss
predictor = Model(ins, [image_out, image_out2])
predictor.compile(
loss=[lfn, lfn], # ignored since we don't train G
optimizer=self.getOptimizer())
self.generator = predictor
# =====================================================================
# And adversarial model
loss = wasserstein_loss if self.use_wasserstein else "binary_crossentropy"
weights = [0.1, 0.1, 1.] if self.use_wasserstein else [100., 100., 1.]
model = Model(ins, [image_out, image_out2, is_fake])
model.compile(
loss=['mae', 'mae', loss],
loss_weights=weights,
optimizer=self.getOptimizer())
self.model = model
self.discriminator.summary()
self.model.summary()
return predictor, model, model, ins, h
def _getData(self, *args, **kwargs):
features, targets = GetAllMultiData(self.num_options, *args, **kwargs)
[I, q, g, oin, label, q_target, g_target,] = features
tt, o1, v, qa, ga, I_target = targets
# Create the next image including input image
I0 = I[0,:,:,:]
length = I.shape[0]
I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1])
# Extract the next goal
I_target2, o2 = GetNextGoal(I_target, o1)
if not self.validate:
return [I0, I, o1, o2], [ I_target, I_target2 ]
else:
features = [I0, I, o1, o2, oin]
o1_1h = ToOneHot(o1, self.num_options)
o2_1h = ToOneHot(o2, self.num_options)
return (features,
[I_target, I_target2, o1_1h, v, qa, ga, o2_1h])
def _makeImageDiscriminator(self, img_shape):
'''
create image-only encoder to extract keypoints from the scene.
Params:
-------
img_shape: shape of the image to encode
'''
img0 = Input(img_shape,name="img0_encoder_in")
img = Input(img_shape,name="img_encoder_in")
img_goal = Input(img_shape,name="goal_encoder_in")
img_goal2 = Input(img_shape,name="goal2_encoder_in")
option = Input((1,),name="disc_options")
option2 = Input((1,),name="disc2_options")
ins = [img0, img, option, option2, img_goal, img_goal2]
dr = self.dropout_rate
# common arguments
kwargs = { "dropout_rate" : dr,
"padding" : "same",
"lrelu" : True,
"bn" : False,
"perm_drop" : True,
}
x0 = AddConv2D(img0, 64, [4,4], 1, **kwargs)
xobs = AddConv2D(img, 64, [4,4], 1, **kwargs)
xg1 = AddConv2D(img_goal, 64, [4,4], 1, **kwargs)
xg2 = AddConv2D(img_goal2, 64, [4,4], 1, **kwargs)
#x1 = Add()([x0, xobs, xg1])
#x2 = Add()([x0, xg1, xg2])
x1 = Add()([xobs, xg1])
x2 = Add()([xg1, xg2])
# -------------------------------------------------------------
y = OneHot(self.num_options)(option)
y = AddDense(y, 64, "lrelu", dr, perm_drop=True)
x1 = TileOnto(x1, y, 64, (64,64), add=True)
x1 = AddConv2D(x1, 64, [4,4], 2, **kwargs)
# -------------------------------------------------------------
y = OneHot(self.num_options)(option2)
y = AddDense(y, 64, "lrelu", dr, perm_drop=True)
x2 = TileOnto(x2, y, 64, (64,64), add=True)
x2 = AddConv2D(x2, 64, [4,4], 2, **kwargs)
#x = Concatenate()([x1, x2])
x = x2
x = AddConv2D(x, 128, [4,4], 2, **kwargs)
x = AddConv2D(x, 256, [4,4], 2, **kwargs)
if self.use_wasserstein:
x = Flatten()(x)
x = AddDense(x, 1, "linear", 0., output=True, bn=False)
else:
x = AddConv2D(x, 1, [1,1], 1, 0., "same", activation="sigmoid",
bn=False)
x = GlobalAveragePooling2D()(x)
#x = Flatten()(x)
#x = AddDense(x, 1, "sigmoid", 0., output=True, bn=False, perm_drop=True)
discrim = Model(ins, x, name="image_discriminator")
self.lr *= 2.
loss = wasserstein_loss if self.use_wasserstein else "binary_crossentropy"
discrim.compile(loss=loss, optimizer=self.getOptimizer())
self.lr *= 0.5
self.image_discriminator = discrim
return discrim
| [
"keras.layers.merge.Concatenate",
"keras.layers.Input",
"keras.models.Model",
"numpy.expand_dims",
"keras.layers.pooling.GlobalAveragePooling2D"
] | [((2126, 2167), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""predictor_img_in"""'}), "(img_shape, name='predictor_img_in')\n", (2131, 2167), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((2185, 2227), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""predictor_img0_in"""'}), "(img_shape, name='predictor_img0_in')\n", (2190, 2227), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((2244, 2262), 'keras.layers.Input', 'Input', (['(arm_size,)'], {}), '((arm_size,))\n', (2249, 2262), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((2284, 2306), 'keras.layers.Input', 'Input', (['(gripper_size,)'], {}), '((gripper_size,))\n', (2289, 2306), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((2384, 2395), 'keras.layers.Input', 'Input', (['(1,)'], {}), '((1,))\n', (2389, 2395), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((2421, 2455), 'keras.layers.Input', 'Input', (['(1,)'], {'name': '"""next_option_in"""'}), "((1,), name='next_option_in')\n", (2426, 2455), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((2482, 2517), 'keras.layers.Input', 'Input', (['(1,)'], {'name': '"""next_option2_in"""'}), "((1,), name='next_option2_in')\n", (2487, 2517), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((4363, 4398), 'keras.models.Model', 'Model', (['ins', '[image_out, image_out2]'], {}), '(ins, [image_out, image_out2])\n', (4368, 4398), False, 'from keras.models import Model, Sequential\n'), ((4866, 4910), 'keras.models.Model', 'Model', (['ins', '[image_out, image_out2, is_fake]'], {}), '(ins, [image_out, image_out2, is_fake])\n', (4871, 4910), False, 'from keras.models import Model, Sequential\n'), ((6277, 6317), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""img0_encoder_in"""'}), "(img_shape, name='img0_encoder_in')\n", (6282, 6317), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((6331, 6370), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""img_encoder_in"""'}), "(img_shape, name='img_encoder_in')\n", (6336, 6370), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((6389, 6429), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""goal_encoder_in"""'}), "(img_shape, name='goal_encoder_in')\n", (6394, 6429), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((6449, 6490), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""goal2_encoder_in"""'}), "(img_shape, name='goal2_encoder_in')\n", (6454, 6490), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((6507, 6539), 'keras.layers.Input', 'Input', (['(1,)'], {'name': '"""disc_options"""'}), "((1,), name='disc_options')\n", (6512, 6539), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((6557, 6590), 'keras.layers.Input', 'Input', (['(1,)'], {'name': '"""disc2_options"""'}), "((1,), name='disc2_options')\n", (6562, 6590), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((8432, 8473), 'keras.models.Model', 'Model', (['ins', 'x'], {'name': '"""image_discriminator"""'}), "(ins, x, name='image_discriminator')\n", (8437, 8473), False, 'from keras.models import Model, Sequential\n'), ((2329, 2342), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {}), '()\n', (2340, 2342), False, 'from keras.layers.merge import Concatenate, Multiply\n'), ((2941, 2979), 'keras.layers.Input', 'Input', (['(self.noise_dim,)'], {'name': '"""z1_in"""'}), "((self.noise_dim,), name='z1_in')\n", (2946, 2979), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((2997, 3035), 'keras.layers.Input', 'Input', (['(self.noise_dim,)'], {'name': '"""z2_in"""'}), "((self.noise_dim,), name='z2_in')\n", (3002, 3035), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((5561, 5587), 'numpy.expand_dims', 'np.expand_dims', (['I0'], {'axis': '(0)'}), '(I0, axis=0)\n', (5575, 5587), True, 'import numpy as np\n'), ((8269, 8293), 'keras.layers.pooling.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (8291, 8293), False, 'from keras.layers.pooling import GlobalAveragePooling2D\n')] |
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Tests for nested_ops."""
from typing import Any, Dict
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from rlds import rlds_types
from rlds.transformations import flexible_batch
from rlds.transformations import nested_ops
from rlds.transformations import transformations_testlib
import tensorflow as tf
class NestedOpsTest(transformations_testlib.TransformationsTest):
def setUp(self):
super().setUp()
steps1 = {
rlds_types.OBSERVATION: {
'field0': [[1., 0.], [0., 1.], [0., 2.]],
'field1': [[1., 0.], [1., 1.], [1., 2.]]
},
rlds_types.ACTION: ([0, 10, 20], [10, 11, 21], [20, 21, 22]),
rlds_types.REWARD: [0.0, 1.0, 2.0],
rlds_types.IS_TERMINAL: [False, False, True],
rlds_types.IS_FIRST: [True, False, False],
}
steps2 = {
rlds_types.OBSERVATION: {
'field0': [[3., 0.], [1., 1.], [1., 2.]],
'field1': [[1., 3.], [1., 4.], [1., 2.]]
},
rlds_types.ACTION: ([0, 10, 20], [10, 11, 21], [20, 21, 22]),
rlds_types.REWARD: [0.0, 1.0, 2.0],
rlds_types.IS_TERMINAL: [False, False, True],
rlds_types.IS_FIRST: [True, False, False],
}
self.steps1_dataset = tf.data.Dataset.from_tensor_slices(steps1)
self.steps2_dataset = tf.data.Dataset.from_tensor_slices(steps2)
self.episodes_dataset = tf.data.Dataset.from_tensor_slices({
rlds_types.STEPS: [self.steps1_dataset, self.steps2_dataset],
})
self.obs_mean = {
'field0': tf.constant([1., 1.], dtype=tf.float64),
'field1': tf.constant([1., 2.], dtype=tf.float64),
}
self.obs_std = {
'field0': tf.constant([1., np.sqrt(np.float64(4) / np.float64(6))]),
'field1': tf.constant([0., np.sqrt(np.float64(10) / np.float64(6))]),
}
def check_nested_equality(self, result, expected):
if isinstance(expected, dict):
self.assertLen(list(result.keys()), len(list(expected.keys())))
for k in expected:
self.check_nested_equality(result[k], expected[k])
else:
self.assertLen(result, len(expected))
for k, _ in enumerate(expected):
self.assertEqual(result[k], expected[k])
def test_map_episode(self):
def add_one_to_reward(step: Dict[str, Any]) -> Dict[str, Any]:
step[rlds_types.REWARD] += 1
return step
episode1 = {rlds_types.STEPS: self.steps1_dataset,
'sample_metadata': 'metadata_value'}
episode2_in_place = nested_ops._map_episode(
episode1, add_one_to_reward, in_place=True)
episode2 = nested_ops._map_episode(
episode1, add_one_to_reward, in_place=False)
self.assertIn('sample_metadata', episode2)
for step1, step2_in_place, step2 in tf.data.Dataset.zip(
(episode1[rlds_types.STEPS],
episode2_in_place[rlds_types.STEPS],
episode2[rlds_types.STEPS])):
self.assertTrue(
tf.reduce_all(tf.equal(step1[rlds_types.REWARD],
step2_in_place[rlds_types.REWARD])))
self.assertTrue(
tf.reduce_all(tf.equal(step1[rlds_types.REWARD] + 1,
step2[rlds_types.REWARD])))
@parameterized.parameters((1,), (2,), (flexible_batch.BATCH_AUTO_TUNE,))
def test_nested_map(self, batch_size):
shift = tf.nest.map_structure(lambda x: -tf.cast(x, tf.float32),
self.obs_mean)
scale = tf.nest.map_structure(
lambda x: tf.cast(1.0 / np.maximum(x, 1e-3), tf.float32), self.obs_std)
def normalize_step(step: Dict[str, Any]) -> Dict[str, Any]:
step[rlds_types.OBSERVATION] = tf.nest.map_structure(
lambda x, x_offset, x_scale: (x + x_offset) * x_scale,
step[rlds_types.OBSERVATION], shift, scale)
return step
normalized_ds = nested_ops.map_nested_steps(
self.episodes_dataset,
normalize_step,
optimization_batch_size=batch_size)
two_ds = tf.data.Dataset.zip(
(self.episodes_dataset.flat_map(lambda x: x[rlds_types.STEPS]),
normalized_ds.flat_map(lambda x: x[rlds_types.STEPS])))
for (sample, normalized) in two_ds:
normalized_obs = normalized[rlds_types.OBSERVATION]
expected_obs = tf.nest.map_structure(
lambda obs, shift, scale: (obs + shift) * scale,
sample[rlds_types.OBSERVATION], shift, scale)
for k in expected_obs:
self.assertEqual(expected_obs[k].shape, normalized_obs[k].shape)
self.assertTrue(
tf.reduce_all(tf.equal(expected_obs[k], normalized_obs[k])))
def test_nested_apply(self):
def truncate_episode(steps):
return steps.take(2)
dataset = nested_ops.apply_nested_steps(self.episodes_dataset,
truncate_episode)
for episode in dataset:
steps = episode[rlds_types.STEPS]
episode_length = steps.reduce(0, lambda x, step: x + 1)
self.assertEqual(episode_length, 2)
@parameterized.parameters((1,), (2,), (flexible_batch.BATCH_AUTO_TUNE,))
def test_total_sum_tfdata(self, batch_size):
expected_sum = {
rlds_types.OBSERVATION: {
'field0': [6., 6.],
'field1': [6., 12.],
},
rlds_types.ACTION: (60, 84, 126),
}
def data_to_sum(step):
return {
rlds_types.OBSERVATION: step[rlds_types.OBSERVATION],
rlds_types.ACTION: step[rlds_types.ACTION]
}
total_sum = nested_ops.sum_nested_steps(
self.episodes_dataset, data_to_sum, optimization_batch_size=batch_size)
self.expect_nested_dict_equality(total_sum, expected_sum)
@parameterized.parameters((1,), (2,), (flexible_batch.BATCH_AUTO_TUNE,))
def test_sum_episode_tfdata(self, batch_size):
expected_sum = {
rlds_types.OBSERVATION: {
'field0': [1., 3.],
'field1': [3., 3.],
},
rlds_types.ACTION: (30, 42, 63),
}
def data_to_sum(step):
return {
rlds_types.OBSERVATION: step[rlds_types.OBSERVATION],
rlds_types.ACTION: step[rlds_types.ACTION]
}
obs_action = self.steps1_dataset.map(
lambda step:
{k: step[k] for k in [rlds_types.OBSERVATION, rlds_types.ACTION]})
total_sum = nested_ops.sum_dataset(
obs_action, data_to_sum,
optimization_batch_size=batch_size)
self.expect_nested_dict_equality(total_sum, expected_sum)
def test_final_step_tfdata(self):
step = nested_ops.final_step(self.steps1_dataset)
self.assertTrue(
tf.reduce_all(tf.equal(step[rlds_types.OBSERVATION]['field0'],
[0., 2.])))
def test_episode_length_tfdata(self):
result = nested_ops.episode_length(
self.steps1_dataset, optimization_batch_size=0)
self.assertTrue(result, 3)
def test_episode_length_batched(self):
result = nested_ops.episode_length(self.steps1_dataset)
self.assertTrue(result, 3)
if __name__ == '__main__':
absltest.main()
| [
"tensorflow.data.Dataset.zip",
"tensorflow.equal",
"tensorflow.data.Dataset.from_tensor_slices",
"rlds.transformations.nested_ops.episode_length",
"rlds.transformations.nested_ops.sum_dataset",
"numpy.float64",
"absl.testing.parameterized.parameters",
"absl.testing.absltest.main",
"rlds.transformati... | [((3816, 3887), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(1,)', '(2,)', '(flexible_batch.BATCH_AUTO_TUNE,)'], {}), '((1,), (2,), (flexible_batch.BATCH_AUTO_TUNE,))\n', (3840, 3887), False, 'from absl.testing import parameterized\n'), ((5600, 5671), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(1,)', '(2,)', '(flexible_batch.BATCH_AUTO_TUNE,)'], {}), '((1,), (2,), (flexible_batch.BATCH_AUTO_TUNE,))\n', (5624, 5671), False, 'from absl.testing import parameterized\n'), ((6258, 6329), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(1,)', '(2,)', '(flexible_batch.BATCH_AUTO_TUNE,)'], {}), '((1,), (2,), (flexible_batch.BATCH_AUTO_TUNE,))\n', (6282, 6329), False, 'from absl.testing import parameterized\n'), ((7602, 7617), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (7615, 7617), False, 'from absl.testing import absltest\n'), ((1862, 1904), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['steps1'], {}), '(steps1)\n', (1896, 1904), True, 'import tensorflow as tf\n'), ((1931, 1973), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['steps2'], {}), '(steps2)\n', (1965, 1973), True, 'import tensorflow as tf\n'), ((2002, 2104), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['{rlds_types.STEPS: [self.steps1_dataset, self.steps2_dataset]}'], {}), '({rlds_types.STEPS: [self.steps1_dataset,\n self.steps2_dataset]})\n', (2036, 2104), True, 'import tensorflow as tf\n'), ((3113, 3180), 'rlds.transformations.nested_ops._map_episode', 'nested_ops._map_episode', (['episode1', 'add_one_to_reward'], {'in_place': '(True)'}), '(episode1, add_one_to_reward, in_place=True)\n', (3136, 3180), False, 'from rlds.transformations import nested_ops\n'), ((3205, 3273), 'rlds.transformations.nested_ops._map_episode', 'nested_ops._map_episode', (['episode1', 'add_one_to_reward'], {'in_place': '(False)'}), '(episode1, add_one_to_reward, in_place=False)\n', (3228, 3273), False, 'from rlds.transformations import nested_ops\n'), ((3370, 3489), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(episode1[rlds_types.STEPS], episode2_in_place[rlds_types.STEPS], episode2[\n rlds_types.STEPS])'], {}), '((episode1[rlds_types.STEPS], episode2_in_place[\n rlds_types.STEPS], episode2[rlds_types.STEPS]))\n', (3389, 3489), True, 'import tensorflow as tf\n'), ((4445, 4551), 'rlds.transformations.nested_ops.map_nested_steps', 'nested_ops.map_nested_steps', (['self.episodes_dataset', 'normalize_step'], {'optimization_batch_size': 'batch_size'}), '(self.episodes_dataset, normalize_step,\n optimization_batch_size=batch_size)\n', (4472, 4551), False, 'from rlds.transformations import nested_ops\n'), ((5309, 5379), 'rlds.transformations.nested_ops.apply_nested_steps', 'nested_ops.apply_nested_steps', (['self.episodes_dataset', 'truncate_episode'], {}), '(self.episodes_dataset, truncate_episode)\n', (5338, 5379), False, 'from rlds.transformations import nested_ops\n'), ((6082, 6185), 'rlds.transformations.nested_ops.sum_nested_steps', 'nested_ops.sum_nested_steps', (['self.episodes_dataset', 'data_to_sum'], {'optimization_batch_size': 'batch_size'}), '(self.episodes_dataset, data_to_sum,\n optimization_batch_size=batch_size)\n', (6109, 6185), False, 'from rlds.transformations import nested_ops\n'), ((6880, 6968), 'rlds.transformations.nested_ops.sum_dataset', 'nested_ops.sum_dataset', (['obs_action', 'data_to_sum'], {'optimization_batch_size': 'batch_size'}), '(obs_action, data_to_sum, optimization_batch_size=\n batch_size)\n', (6902, 6968), False, 'from rlds.transformations import nested_ops\n'), ((7092, 7134), 'rlds.transformations.nested_ops.final_step', 'nested_ops.final_step', (['self.steps1_dataset'], {}), '(self.steps1_dataset)\n', (7113, 7134), False, 'from rlds.transformations import nested_ops\n'), ((7324, 7397), 'rlds.transformations.nested_ops.episode_length', 'nested_ops.episode_length', (['self.steps1_dataset'], {'optimization_batch_size': '(0)'}), '(self.steps1_dataset, optimization_batch_size=0)\n', (7349, 7397), False, 'from rlds.transformations import nested_ops\n'), ((7493, 7539), 'rlds.transformations.nested_ops.episode_length', 'nested_ops.episode_length', (['self.steps1_dataset'], {}), '(self.steps1_dataset)\n', (7518, 7539), False, 'from rlds.transformations import nested_ops\n'), ((2156, 2197), 'tensorflow.constant', 'tf.constant', (['[1.0, 1.0]'], {'dtype': 'tf.float64'}), '([1.0, 1.0], dtype=tf.float64)\n', (2167, 2197), True, 'import tensorflow as tf\n'), ((2215, 2256), 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0]'], {'dtype': 'tf.float64'}), '([1.0, 2.0], dtype=tf.float64)\n', (2226, 2256), True, 'import tensorflow as tf\n'), ((4264, 4388), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['(lambda x, x_offset, x_scale: (x + x_offset) * x_scale)', 'step[rlds_types.OBSERVATION]', 'shift', 'scale'], {}), '(lambda x, x_offset, x_scale: (x + x_offset) * x_scale,\n step[rlds_types.OBSERVATION], shift, scale)\n', (4285, 4388), True, 'import tensorflow as tf\n'), ((4864, 4984), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['(lambda obs, shift, scale: (obs + shift) * scale)', 'sample[rlds_types.OBSERVATION]', 'shift', 'scale'], {}), '(lambda obs, shift, scale: (obs + shift) * scale,\n sample[rlds_types.OBSERVATION], shift, scale)\n', (4885, 4984), True, 'import tensorflow as tf\n'), ((7178, 7238), 'tensorflow.equal', 'tf.equal', (["step[rlds_types.OBSERVATION]['field0']", '[0.0, 2.0]'], {}), "(step[rlds_types.OBSERVATION]['field0'], [0.0, 2.0])\n", (7186, 7238), True, 'import tensorflow as tf\n'), ((3560, 3629), 'tensorflow.equal', 'tf.equal', (['step1[rlds_types.REWARD]', 'step2_in_place[rlds_types.REWARD]'], {}), '(step1[rlds_types.REWARD], step2_in_place[rlds_types.REWARD])\n', (3568, 3629), True, 'import tensorflow as tf\n'), ((3712, 3776), 'tensorflow.equal', 'tf.equal', (['(step1[rlds_types.REWARD] + 1)', 'step2[rlds_types.REWARD]'], {}), '(step1[rlds_types.REWARD] + 1, step2[rlds_types.REWARD])\n', (3720, 3776), True, 'import tensorflow as tf\n'), ((3974, 3996), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (3981, 3996), True, 'import tensorflow as tf\n'), ((4114, 4134), 'numpy.maximum', 'np.maximum', (['x', '(0.001)'], {}), '(x, 0.001)\n', (4124, 4134), True, 'import numpy as np\n'), ((5155, 5199), 'tensorflow.equal', 'tf.equal', (['expected_obs[k]', 'normalized_obs[k]'], {}), '(expected_obs[k], normalized_obs[k])\n', (5163, 5199), True, 'import tensorflow as tf\n'), ((2326, 2339), 'numpy.float64', 'np.float64', (['(4)'], {}), '(4)\n', (2336, 2339), True, 'import numpy as np\n'), ((2342, 2355), 'numpy.float64', 'np.float64', (['(6)'], {}), '(6)\n', (2352, 2355), True, 'import numpy as np\n'), ((2403, 2417), 'numpy.float64', 'np.float64', (['(10)'], {}), '(10)\n', (2413, 2417), True, 'import numpy as np\n'), ((2420, 2433), 'numpy.float64', 'np.float64', (['(6)'], {}), '(6)\n', (2430, 2433), True, 'import numpy as np\n')] |
import numpy as np
# import logging
def compare_headers(self, hdr, newhdr, uid=True):
compare_template_headers(self, hdr, newhdr, uid)
compare_geometry_headers(self, hdr, newhdr)
def compare_template_headers(self, hdr, newhdr, uid=True):
# logging.debug('compare_headers: name {} {}'.format(hdr.name, newhdr.name))
self.assertEqual(hdr.name, newhdr.name)
# logging.debug('compare_headers: description {} {}'.format(hdr.description, newhdr.description))
self.assertEqual(hdr.description, newhdr.description)
self.assertEqual(hdr.authors, newhdr.authors)
self.assertEqual(hdr.version, newhdr.version)
self.assertEqual(hdr.url, newhdr.url)
self.assertEqual(hdr.input_order, newhdr.input_order)
# self.assertEqual(hdr.sort_on, newhdr.sort_on)
# DicomHeaderDict[slice].tuple(tagvalue, filename, dicomheader)
try:
self.assertEqual(hdr.DicomHeaderDict.keys(), newhdr.DicomHeaderDict.keys())
# for k in hdr.DicomHeaderDict.keys():
# self.assertEqual(hdr.DicomHeaderDict[k], newhdr.DicomHeaderDict[k])
except ValueError:
pass
self.assertEqual(hdr.tags.keys(), newhdr.tags.keys())
for k in hdr.tags.keys():
np.testing.assert_array_equal(hdr.tags[k], newhdr.tags[k])
if uid:
compare_optional(self, hdr, newhdr, 'studyInstanceUID')
# compare_optional(self, hdr, newhdr, 'seriesInstanceUID')
compare_optional(self, hdr, newhdr, 'frameOfReferenceUID')
compare_optional(self, hdr, newhdr, 'seriesNumber')
compare_optional(self, hdr, newhdr, 'seriesDescription')
compare_optional(self, hdr, newhdr, 'imageType')
self.assertEqual(hdr.color, newhdr.color)
self.assertEqual(hdr.photometricInterpretation,
newhdr.photometricInterpretation)
def compare_optional(self, a, b, attr):
try:
a_attr = getattr(a, attr, None)
except ValueError:
a_attr = None
try:
b_attr = getattr(b, attr, None)
except ValueError:
b_attr = None
self.assertEqual(a_attr, b_attr)
def compare_geometry_headers(self, hdr, newhdr):
try:
np.testing.assert_array_equal(hdr.sliceLocations, newhdr.sliceLocations)
except ValueError:
pass
np.testing.assert_array_almost_equal(hdr.spacing, newhdr.spacing, decimal=4)
np.testing.assert_array_almost_equal(hdr.orientation, newhdr.orientation,
decimal=4)
self.assertEqual(hdr.imagePositions.keys(), newhdr.imagePositions.keys())
for k in hdr.imagePositions.keys():
# logging.debug('compare_headers: hdr.imagePositions[{}]={}'.format(k,hdr.imagePositions[k]))
# logging.debug('compare_headers: newhdr.imagePositions[{}]={}'.format(k,newhdr.imagePositions[k]))
np.testing.assert_array_almost_equal(
hdr.imagePositions[k],
newhdr.imagePositions[k],
decimal=4)
np.testing.assert_array_almost_equal(hdr.transformationMatrix, newhdr.transformationMatrix, decimal=3)
| [
"numpy.testing.assert_array_almost_equal",
"numpy.testing.assert_array_equal"
] | [((2250, 2326), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['hdr.spacing', 'newhdr.spacing'], {'decimal': '(4)'}), '(hdr.spacing, newhdr.spacing, decimal=4)\n', (2286, 2326), True, 'import numpy as np\n'), ((2331, 2419), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['hdr.orientation', 'newhdr.orientation'], {'decimal': '(4)'}), '(hdr.orientation, newhdr.orientation,\n decimal=4)\n', (2367, 2419), True, 'import numpy as np\n'), ((2934, 3041), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['hdr.transformationMatrix', 'newhdr.transformationMatrix'], {'decimal': '(3)'}), '(hdr.transformationMatrix, newhdr.\n transformationMatrix, decimal=3)\n', (2970, 3041), True, 'import numpy as np\n'), ((1210, 1268), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['hdr.tags[k]', 'newhdr.tags[k]'], {}), '(hdr.tags[k], newhdr.tags[k])\n', (1239, 1268), True, 'import numpy as np\n'), ((2137, 2209), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['hdr.sliceLocations', 'newhdr.sliceLocations'], {}), '(hdr.sliceLocations, newhdr.sliceLocations)\n', (2166, 2209), True, 'import numpy as np\n'), ((2796, 2897), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['hdr.imagePositions[k]', 'newhdr.imagePositions[k]'], {'decimal': '(4)'}), '(hdr.imagePositions[k], newhdr.\n imagePositions[k], decimal=4)\n', (2832, 2897), True, 'import numpy as np\n')] |
"""
[References]
- https://stackoverflow.com/questions/8505651/non-repetitive-random-number-in-numpy
- https://algorithmist.com/wiki/Modular_inverse
- https://stackoverflow.com/questions/16044553/solving-a-modular-equation-python
"""
import random
import numpy as np
def get_bitwidth(n):
"""Calculate the bit width (size) for a given number of data elements
"""
return int(2**np.ceil(np.log2(np.log2(2*n))))
def get_bytewidth(n):
"""Calculate the byte width (size) for a given number of data elements
"""
return int(get_bitwidth(n) // 8)
def get_rand_indices(n_total, n_sample):
return random.sample(range(n_total), n_sample)
def rand_degree_power(n_nodes, max_deg, alpha, dtype=np.uint32):
rescaled = max_deg*(1 - np.random.power(alpha, size=n_nodes))
arr = np.ceil(rescaled).astype(dtype)
arr[::-1].sort()
return arr
def iterative_egcd(a, b):
x,y, u,v = 0,1, 1,0
while a != 0:
q,r = b//a,b%a; m,n = x-u*q,y-v*q
b,a, x,y, u,v = a,r, u,v, m,n
return b, x, y
def modinv(a, m):
g, x, y = iterative_egcd(a, m)
if g != 1:
return None
else:
return x % m
| [
"numpy.random.power",
"numpy.ceil",
"numpy.log2"
] | [((753, 789), 'numpy.random.power', 'np.random.power', (['alpha'], {'size': 'n_nodes'}), '(alpha, size=n_nodes)\n', (768, 789), True, 'import numpy as np\n'), ((801, 818), 'numpy.ceil', 'np.ceil', (['rescaled'], {}), '(rescaled)\n', (808, 818), True, 'import numpy as np\n'), ((407, 421), 'numpy.log2', 'np.log2', (['(2 * n)'], {}), '(2 * n)\n', (414, 421), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import norm
from ase import Atoms
from ase.data import covalent_radii
from ase.neighborlist import NeighborList
import ase.neighborlist
import scipy.stats
from scipy.constants import physical_constants
import itertools
from IPython.display import display, clear_output, HTML
import nglview
import ipywidgets as ipw
from collections import Counter
from scipy.signal import find_peaks
from scipy.spatial import ConvexHull
def gaussian(x, sig):
return 1.0/(sig*np.sqrt(2.0*np.pi))*np.exp(-np.power(x, 2.) / (2 * np.power(sig, 2.)))
def boxfilter(x,thr):
return np.asarray([1 if i<thr else 0 for i in x])
def get_types(frame,thr): ## <NAME>
# classify the atmos in:
# 0=molecule
# 1=slab atoms
# 2=adatoms
# 3=hydrogens on the surf
# 5=unknown
# 6=metalating atoms
#frame=ase frame
#thr=threashold in the histogram for being considered a surface layer
nat=frame.get_number_of_atoms()
atype=np.zeros(nat,dtype=np.int16)+5
area=(frame.cell[0][0]*frame.cell[1][1])
minz=np.min(frame.positions[:,2])
maxz=np.max(frame.positions[:,2])
if maxz - minz < 1.0:
maxz += (1.0 - (maxz - minz))/2
minz -= (1.0 - (maxz - minz))/2
##WHICH VALUES SHOULD WE USE BELOW??????
sigma = 0.2 #thr
peak_rel_height = 0.5
layer_tol=1.0*sigma
# quack estimate number atoms in a layer:
nbins=int(np.ceil((maxz-minz)/0.15))
hist, bin_edges = np.histogram(frame.positions[:,2], density=False,bins=nbins)
max_atoms_in_a_layer=max(hist)
lbls=frame.get_chemical_symbols()
n_intervals=int(np.ceil((maxz-minz+3*sigma)/(0.1*sigma)))
z_values = np.linspace(minz-3*sigma, maxz+3*sigma, n_intervals) #1000
atoms_z_pos = frame.positions[:,2]
# OPTION 1: generate 2d array to apply the gaussian on
z_v_exp, at_z_exp = np.meshgrid(z_values, atoms_z_pos)
arr_2d = z_v_exp - at_z_exp
atomic_density = np.sum(gaussian(arr_2d, sigma), axis=0)
# OPTION 2: loop through atoms
# atomic_density = np.zeros(z_values.shape)
#for ia in range(len(atoms)):
# atomic_density += gaussian(z_values - atoms.positions[ia,2], sigma)
peaks=find_peaks(atomic_density, height=None,threshold=None,distance=None,
prominence=None,width=None,wlen=None,rel_height=peak_rel_height)
layersg=z_values[peaks[0].tolist()]
n_tot_layers=len(layersg)
last_layer=layersg[-1]
##check top and bottom layers
found_top_surf = False
while not found_top_surf:
iz = layersg[-1]
twoD_atoms = [frame.positions[i,0:2] for i in range(nat) if np.abs(frame.positions[i,2]-iz) <layer_tol ]
coverage=0
if len(twoD_atoms) > max_atoms_in_a_layer/4:
hull = ConvexHull(twoD_atoms) ##
coverage = hull.volume/area
if coverage > 0.3:
found_top_surf=True
else:
layersg=layersg[0:-1]
found_bottom_surf = False
while not found_bottom_surf:
iz = layersg[0]
twoD_atoms = [frame.positions[i,0:2] for i in range(nat) if np.abs(frame.positions[i,2]-iz) <layer_tol]
coverage=0
if len(twoD_atoms) > max_atoms_in_a_layer/4:
hull = ConvexHull(twoD_atoms) ##
coverage = hull.volume/area
if coverage > 0.3 and len(twoD_atoms) > max_atoms_in_a_layer/4 :
found_bottom_surf=True
else:
layersg=layersg[1:]
bottom_z = layersg[0]
top_z = layersg[-1]
#check if there is a bottom layer of H
found_layer_of_H=True
for i in range(nat):
iz = frame.positions[i,2]
if iz > bottom_z - layer_tol and iz < bottom_z + layer_tol:
if lbls[i]=='H':
atype[i]=3
else:
found_layer_of_H=False
break
if found_layer_of_H:
layersg=layersg[1:]
#bottom_z=layersg[0]
layers_dist = []
iprev = layersg[0]
for inext in layersg[1:]:
layers_dist.append(abs(iprev - inext))
iprev = inext
for i in range(nat):
iz = frame.positions[i,2]
if iz > bottom_z - layer_tol and iz < top_z + layer_tol:
if not (atype[i]==3 and found_layer_of_H):
atype[i]=1
else:
if np.min([np.abs(iz- top_z),np.abs(iz- bottom_z)]) < np.max(layers_dist):
if not (atype[i]==3 and found_layer_of_H):
atype[i]=2
# assign the other types
metalatingtypes=('Au','Ag','Cu','Ni','Co','Zn','Mg')
moltypes=('H','N','B','O','C','F','S','Br','I','Cl')
possible_mol_atoms=[i for i in range(nat) if atype[i]==2 and lbls[i] in moltypes]
possible_mol_atoms+=[i for i in range(nat) if atype[i]==5]
if len(possible_mol_atoms) > 0:
cov_radii = [covalent_radii[a.number] for a in frame[possible_mol_atoms]] #adatoms that have a neigh adatom are in a mol
nl = NeighborList(cov_radii, bothways = True, self_interaction = False)
nl.update(frame[possible_mol_atoms])
for ia in range(len(possible_mol_atoms)):
indices, offsets = nl.get_neighbors(ia)
if len(indices) > 0:
if lbls[possible_mol_atoms[ia]] in metalatingtypes:
atype[possible_mol_atoms[ia]]=6
else:
atype[possible_mol_atoms[ia]]=0
return atype,layersg
def all_connected_to(id_atom,atoms,exclude):
cov_radii = [covalent_radii[a.number] for a in atoms]
atoms.set_pbc([False, False, False])
nl_no_pbc = NeighborList(cov_radii, bothways = True, self_interaction = False)
nl_no_pbc.update(atoms)
atoms.set_pbc([True,True,True])
tofollow=[]
followed=[]
isconnected=[]
tofollow.append(id_atom)
isconnected.append(id_atom)
while len(tofollow) > 0:
indices, offsets = nl_no_pbc.get_neighbors(tofollow[0])
indices=list(indices)
followed.append(tofollow[0])
for i in indices:
if (i not in isconnected) and (atoms[i].symbol not in exclude):
tofollow.append(i)
isconnected.append(i)
for i in followed:
if i in tofollow: ### do not remove this check
tofollow.remove(i)
#try:
# tofollow.remove(i)
#except:
# pass
#
return isconnected
def molecules(ismol,atoms):
all_molecules=[]
to_be_checked=[i for i in range(len(ismol))]
all_found=[]
exclude=['None']
while len(to_be_checked) >0:
one_mol=all_connected_to(to_be_checked[0],atoms[ismol],exclude)
is_new_molecule = True
for ia in one_mol:
if ia in all_found:
is_new_molecule=False
break
if is_new_molecule:
all_molecules.append([ismol[ia] for ia in one_mol])
for ia in one_mol:
all_found.append(ia)
to_be_checked.remove(ia)
return all_molecules
def to_ranges(iterable):
iterable = sorted(set(iterable))
for key, group in itertools.groupby(enumerate(iterable),
lambda t: t[1] - t[0]):
group = list(group)
yield group[0][1], group[-1][1]
def mol_ids_range(ismol):
range_string=''
shifted_list=[i+1 for i in ismol]
ranges=list(to_ranges(shifted_list))
for i in range(len(ranges)):
if ranges[i][1]>ranges[i][0]:
range_string+=str(ranges[i][0])+'..'+str(ranges[i][1])+' '
else:
range_string+=str(ranges[i][0])+' '
return range_string
def string_range_to_list(a):
singles=[int(s) -1 for s in a.split() if s.isdigit()]
ranges = [r for r in a.split() if '..' in r]
for r in ranges:
t=r.split('..')
to_add=[i-1 for i in range(int(t[0]),int(t[1])+1)]
singles+=to_add
return sorted(singles)
def analyze(atoms):
no_cell=atoms.cell[0][0] <0.1 or atoms.cell[1][1] <0.1 or atoms.cell[2][2] <0.1
if no_cell:
# set bounding box as cell
cx =(np.amax(atoms.positions[:,0]) - np.amin(atoms.positions[:,0])) + 10
cy =(np.amax(atoms.positions[:,1]) - np.amin(atoms.positions[:,1])) + 10
cz =(np.amax(atoms.positions[:,2]) - np.amin(atoms.positions[:,2])) + 10
atoms.cell = (cx, cy, cz)
atoms.set_pbc([True,True,True])
total_charge=np.sum(atoms.get_atomic_numbers())
bottom_H=[]
adatoms=[]
remaining=[]
metalatings=[]
unclassified=[]
slabatoms=[]
slab_layers=[]
all_molecules=None
is_a_bulk=False
is_a_molecule=False
is_a_wire=False
spins_up = set(str(the_a.symbol)+str(the_a.tag) for the_a in atoms if the_a.tag == 1)
spins_down = set(str(the_a.symbol)+str(the_a.tag) for the_a in atoms if the_a.tag == 2)
#### check if there is vacuum otherwise classify as bulk and skip
vacuum_x=np.max(atoms.positions[:,0]) - np.min(atoms.positions[:,0]) +4 < atoms.cell[0][0]
vacuum_y=np.max(atoms.positions[:,1]) - np.min(atoms.positions[:,1]) +4 < atoms.cell[1][1]
vacuum_z=np.max(atoms.positions[:,2]) - np.min(atoms.positions[:,2]) +4 < atoms.cell[2][2]
all_elements= atoms.get_chemical_symbols() # list(set(atoms.get_chemical_symbols()))
cov_radii = [covalent_radii[a.number] for a in atoms]
nl = NeighborList(cov_radii, bothways = True, self_interaction = False)
nl.update(atoms)
#metalating_atoms=['Ag','Au','Cu','Co','Ni','Fe']
summary=''
if (not vacuum_z) and (not vacuum_x) and (not vacuum_y):
is_a_bulk=True
sys_type='Bulk'
summary='Bulk contains: \n'
slabatoms=[ia for ia in range(len(atoms))]
if vacuum_x and vacuum_y and vacuum_z:
is_a_molecule=True
sys_type='Molecule'
summary='Molecule: \n'
all_molecules=molecules([i for i in range(len(atoms))],atoms)
com=np.average(atoms.positions,axis=0)
summary+='COM: '+str(com)+', min z: '+str(np.min(atoms.positions[:,2]))
if vacuum_x and vacuum_y and (not vacuum_z):
is_a_wire=True
sys_type='Wire'
summary='Wire along z contains: \n'
slabatoms=[ia for ia in range(len(atoms))]
if vacuum_y and vacuum_z and (not vacuum_x):
is_a_wire=True
sys_type='Wire'
summary='Wire along x contains: \n'
slabatoms=[ia for ia in range(len(atoms))]
if vacuum_x and vacuum_z and (not vacuum_y):
is_a_wire=True
sys_type='Wire'
summary='Wire along y contains: \n'
slabatoms=[ia for ia in range(len(atoms))]
####END check
if not (is_a_bulk or is_a_molecule or is_a_wire):
tipii,layersg=get_types(atoms,0.1)
if vacuum_x:
slabtype='YZ'
elif vacuum_y:
slabtype='XZ'
else:
slabtype='XY'
sys_type='Slab' + slabtype
mol_atoms=np.where(tipii==0)[0].tolist()
#mol_atoms=extract_mol_indexes_from_slab(atoms)
metalatings=np.where(tipii==6)[0].tolist()
mol_atoms+=metalatings
#identify separate molecules
all_molecules=molecules(mol_atoms,atoms)
## bottom_H
bottom_H=np.where(tipii==3)[0].tolist()
## unclassified
unclassified=np.where(tipii==5)[0].tolist()
slabatoms=np.where(tipii==1)[0].tolist()
adatoms=np.where(tipii==2)[0].tolist()
##slab layers
slab_layers=[[]for i in range(len(layersg))]
for ia in slabatoms:
idx = (np.abs(layersg - atoms.positions[ia,2])).argmin()
slab_layers[idx].append(ia)
##end slab layers
summary='Slab '+slabtype+' contains: \n'
if len(slabatoms) == 0:
slab_elements = set([])
else:
slab_elements=set(atoms[slabatoms].get_chemical_symbols())
if len(bottom_H) >0:
summary+='bottom H: ' + mol_ids_range(bottom_H) + '\n'
if len(slabatoms) > 0:
summary+='slab atoms: ' + mol_ids_range(slabatoms) + '\n'
for nlayer in range(len(slab_layers)):
summary+='slab layer '+str(nlayer+1)+': '+mol_ids_range(slab_layers[nlayer])+'\n'
if len(adatoms)>0:
summary+='adatoms: ' + mol_ids_range(adatoms) + '\n'
if all_molecules:
summary+='#'+str(len(all_molecules)) + ' molecules: '
for nmols in range(len(all_molecules)):
summary+=str(nmols)+') '+mol_ids_range(all_molecules[nmols])
summary+=' \n'
if len(mol_ids_range(metalatings))>0:
summary+='metal atoms inside molecules (already counted): '+ mol_ids_range(metalatings) + '\n'
if len(mol_ids_range(unclassified))>0:
summary+='unclassified: ' + mol_ids_range(unclassified)
return {'total_charge' : total_charge,
'system_type' : sys_type,
'cell' : " ".join([str(i) for i in itertools.chain(*atoms.cell.tolist())]),
'slab_layers' : slab_layers,
'bottom_H' : sorted(bottom_H),
'slabatoms' : sorted(slabatoms),
'adatoms' : sorted(adatoms),
'all_molecules' : all_molecules,
'metalatings' : sorted(metalatings),
'unclassified' : sorted(unclassified),
'numatoms' : len(atoms),
'all_elements' : all_elements,
'slab_elements' : slab_elements,
'spins_up' : spins_up,
'spins_down' : spins_down,
'summary':summary
} | [
"numpy.abs",
"numpy.histogram",
"numpy.ceil",
"numpy.sqrt",
"numpy.amin",
"numpy.average",
"scipy.signal.find_peaks",
"numpy.power",
"numpy.where",
"numpy.asarray",
"numpy.max",
"scipy.spatial.ConvexHull",
"numpy.linspace",
"numpy.zeros",
"numpy.min",
"ase.neighborlist.NeighborList",
... | [((609, 655), 'numpy.asarray', 'np.asarray', (['[(1 if i < thr else 0) for i in x]'], {}), '([(1 if i < thr else 0) for i in x])\n', (619, 655), True, 'import numpy as np\n'), ((1066, 1095), 'numpy.min', 'np.min', (['frame.positions[:, 2]'], {}), '(frame.positions[:, 2])\n', (1072, 1095), True, 'import numpy as np\n'), ((1104, 1133), 'numpy.max', 'np.max', (['frame.positions[:, 2]'], {}), '(frame.positions[:, 2])\n', (1110, 1133), True, 'import numpy as np\n'), ((1475, 1537), 'numpy.histogram', 'np.histogram', (['frame.positions[:, 2]'], {'density': '(False)', 'bins': 'nbins'}), '(frame.positions[:, 2], density=False, bins=nbins)\n', (1487, 1537), True, 'import numpy as np\n'), ((1691, 1751), 'numpy.linspace', 'np.linspace', (['(minz - 3 * sigma)', '(maxz + 3 * sigma)', 'n_intervals'], {}), '(minz - 3 * sigma, maxz + 3 * sigma, n_intervals)\n', (1702, 1751), True, 'import numpy as np\n'), ((1877, 1911), 'numpy.meshgrid', 'np.meshgrid', (['z_values', 'atoms_z_pos'], {}), '(z_values, atoms_z_pos)\n', (1888, 1911), True, 'import numpy as np\n'), ((2216, 2358), 'scipy.signal.find_peaks', 'find_peaks', (['atomic_density'], {'height': 'None', 'threshold': 'None', 'distance': 'None', 'prominence': 'None', 'width': 'None', 'wlen': 'None', 'rel_height': 'peak_rel_height'}), '(atomic_density, height=None, threshold=None, distance=None,\n prominence=None, width=None, wlen=None, rel_height=peak_rel_height)\n', (2226, 2358), False, 'from scipy.signal import find_peaks\n'), ((5658, 5720), 'ase.neighborlist.NeighborList', 'NeighborList', (['cov_radii'], {'bothways': '(True)', 'self_interaction': '(False)'}), '(cov_radii, bothways=True, self_interaction=False)\n', (5670, 5720), False, 'from ase.neighborlist import NeighborList\n'), ((9538, 9600), 'ase.neighborlist.NeighborList', 'NeighborList', (['cov_radii'], {'bothways': '(True)', 'self_interaction': '(False)'}), '(cov_radii, bothways=True, self_interaction=False)\n', (9550, 9600), False, 'from ase.neighborlist import NeighborList\n'), ((981, 1010), 'numpy.zeros', 'np.zeros', (['nat'], {'dtype': 'np.int16'}), '(nat, dtype=np.int16)\n', (989, 1010), True, 'import numpy as np\n'), ((1426, 1455), 'numpy.ceil', 'np.ceil', (['((maxz - minz) / 0.15)'], {}), '((maxz - minz) / 0.15)\n', (1433, 1455), True, 'import numpy as np\n'), ((1634, 1684), 'numpy.ceil', 'np.ceil', (['((maxz - minz + 3 * sigma) / (0.1 * sigma))'], {}), '((maxz - minz + 3 * sigma) / (0.1 * sigma))\n', (1641, 1684), True, 'import numpy as np\n'), ((5025, 5087), 'ase.neighborlist.NeighborList', 'NeighborList', (['cov_radii'], {'bothways': '(True)', 'self_interaction': '(False)'}), '(cov_radii, bothways=True, self_interaction=False)\n', (5037, 5087), False, 'from ase.neighborlist import NeighborList\n'), ((10120, 10155), 'numpy.average', 'np.average', (['atoms.positions'], {'axis': '(0)'}), '(atoms.positions, axis=0)\n', (10130, 10155), True, 'import numpy as np\n'), ((2794, 2816), 'scipy.spatial.ConvexHull', 'ConvexHull', (['twoD_atoms'], {}), '(twoD_atoms)\n', (2804, 2816), False, 'from scipy.spatial import ConvexHull\n'), ((3288, 3310), 'scipy.spatial.ConvexHull', 'ConvexHull', (['twoD_atoms'], {}), '(twoD_atoms)\n', (3298, 3310), False, 'from scipy.spatial import ConvexHull\n'), ((504, 524), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (511, 524), True, 'import numpy as np\n'), ((4429, 4448), 'numpy.max', 'np.max', (['layers_dist'], {}), '(layers_dist)\n', (4435, 4448), True, 'import numpy as np\n'), ((8260, 8290), 'numpy.amax', 'np.amax', (['atoms.positions[:, 0]'], {}), '(atoms.positions[:, 0])\n', (8267, 8290), True, 'import numpy as np\n'), ((8292, 8322), 'numpy.amin', 'np.amin', (['atoms.positions[:, 0]'], {}), '(atoms.positions[:, 0])\n', (8299, 8322), True, 'import numpy as np\n'), ((8341, 8371), 'numpy.amax', 'np.amax', (['atoms.positions[:, 1]'], {}), '(atoms.positions[:, 1])\n', (8348, 8371), True, 'import numpy as np\n'), ((8373, 8403), 'numpy.amin', 'np.amin', (['atoms.positions[:, 1]'], {}), '(atoms.positions[:, 1])\n', (8380, 8403), True, 'import numpy as np\n'), ((8422, 8452), 'numpy.amax', 'np.amax', (['atoms.positions[:, 2]'], {}), '(atoms.positions[:, 2])\n', (8429, 8452), True, 'import numpy as np\n'), ((8454, 8484), 'numpy.amin', 'np.amin', (['atoms.positions[:, 2]'], {}), '(atoms.positions[:, 2])\n', (8461, 8484), True, 'import numpy as np\n'), ((9105, 9134), 'numpy.max', 'np.max', (['atoms.positions[:, 0]'], {}), '(atoms.positions[:, 0])\n', (9111, 9134), True, 'import numpy as np\n'), ((9136, 9165), 'numpy.min', 'np.min', (['atoms.positions[:, 0]'], {}), '(atoms.positions[:, 0])\n', (9142, 9165), True, 'import numpy as np\n'), ((9200, 9229), 'numpy.max', 'np.max', (['atoms.positions[:, 1]'], {}), '(atoms.positions[:, 1])\n', (9206, 9229), True, 'import numpy as np\n'), ((9231, 9260), 'numpy.min', 'np.min', (['atoms.positions[:, 1]'], {}), '(atoms.positions[:, 1])\n', (9237, 9260), True, 'import numpy as np\n'), ((9295, 9324), 'numpy.max', 'np.max', (['atoms.positions[:, 2]'], {}), '(atoms.positions[:, 2])\n', (9301, 9324), True, 'import numpy as np\n'), ((9326, 9355), 'numpy.min', 'np.min', (['atoms.positions[:, 2]'], {}), '(atoms.positions[:, 2])\n', (9332, 9355), True, 'import numpy as np\n'), ((10205, 10234), 'numpy.min', 'np.min', (['atoms.positions[:, 2]'], {}), '(atoms.positions[:, 2])\n', (10211, 10234), True, 'import numpy as np\n'), ((532, 548), 'numpy.power', 'np.power', (['x', '(2.0)'], {}), '(x, 2.0)\n', (540, 548), True, 'import numpy as np\n'), ((555, 573), 'numpy.power', 'np.power', (['sig', '(2.0)'], {}), '(sig, 2.0)\n', (563, 573), True, 'import numpy as np\n'), ((2658, 2692), 'numpy.abs', 'np.abs', (['(frame.positions[i, 2] - iz)'], {}), '(frame.positions[i, 2] - iz)\n', (2664, 2692), True, 'import numpy as np\n'), ((3145, 3179), 'numpy.abs', 'np.abs', (['(frame.positions[i, 2] - iz)'], {}), '(frame.positions[i, 2] - iz)\n', (3151, 3179), True, 'import numpy as np\n'), ((11124, 11144), 'numpy.where', 'np.where', (['(tipii == 0)'], {}), '(tipii == 0)\n', (11132, 11144), True, 'import numpy as np\n'), ((11231, 11251), 'numpy.where', 'np.where', (['(tipii == 6)'], {}), '(tipii == 6)\n', (11239, 11251), True, 'import numpy as np\n'), ((11429, 11449), 'numpy.where', 'np.where', (['(tipii == 3)'], {}), '(tipii == 3)\n', (11437, 11449), True, 'import numpy as np\n'), ((11516, 11536), 'numpy.where', 'np.where', (['(tipii == 5)'], {}), '(tipii == 5)\n', (11524, 11536), True, 'import numpy as np\n'), ((11576, 11596), 'numpy.where', 'np.where', (['(tipii == 1)'], {}), '(tipii == 1)\n', (11584, 11596), True, 'import numpy as np\n'), ((11623, 11643), 'numpy.where', 'np.where', (['(tipii == 2)'], {}), '(tipii == 2)\n', (11631, 11643), True, 'import numpy as np\n'), ((11786, 11826), 'numpy.abs', 'np.abs', (['(layersg - atoms.positions[ia, 2])'], {}), '(layersg - atoms.positions[ia, 2])\n', (11792, 11826), True, 'import numpy as np\n'), ((4386, 4404), 'numpy.abs', 'np.abs', (['(iz - top_z)'], {}), '(iz - top_z)\n', (4392, 4404), True, 'import numpy as np\n'), ((4404, 4425), 'numpy.abs', 'np.abs', (['(iz - bottom_z)'], {}), '(iz - bottom_z)\n', (4410, 4425), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Created on Mon Feb 11 21:25:53 2019
@author: satya
"""
#The extraction process uses the marker dots injected from the inject code
#and the image orientation is obtained from the line joining the location of the marker dots
#The location of the marker dots in the corners are identified using template matching along the corners
#Using the alignment, the image is rotated to its correct orientation.
#Then a window array for the barcode is selected using th dots as relative postions
#The subarray flattened to mean across columns and a threshold is applied to identify black and white pixels of the barcode
# The series of black pixels are counted and divided by the fixed width of a number used in inject.py
#to obtain the unique code that was injected.
#The obtained unique code is then used to look up the answer key in the json file created while injecting
#Then the answers are saved into an output file as specified.
#Instructions to run the code
#Make sure that the key_dic.json generated while inject.py file is in the same directory as the extract.py
#Then the answers are saved into an output file as specified.
#Importing required libraries
import sys
import os
import numpy as np
from PIL import Image
import json
from scipy.ndimage import rotate
import math
#Check if a earlier key file exists in the location
try:
with open('key_dic.json', 'r') as fp:
key_dict = json.load(fp)
except:
sys.exit("Ouch! key dict json file not found! Check if the extract.py and key_dic.json are in the same directory")
#Generate the dot for template matching
def gen_dot(x,y,r):
xx, yy = np.mgrid[:x, :y]
circle = (xx - x//2) ** 2 + (yy - y//2) ** 2
dot = np.logical_and(circle < (r**2), circle > 0).astype(int)
dot[dot >1] = 0
dot[dot <1] =255
dot[x//2,y//2]=0
return(dot)
#Function to match the dot in left corner
def dot_match_l(image,template):
a,b = image.shape
x,y = template.shape
mat = np.inf
X = 0
Y = 0
for i in range(100-x):
for j in range(100-y):
x_c = a -i -x//2-10
y_c = j + y//2 + 10
sim = np.sum(image[x_c-10:x_c+10,y_c-10:y_c+10] - template)
if sim < mat:
#print(x_c,y_c,sim)
mat = sim
X = x_c
Y = y_c
return X,Y
#Function to match the dot in right corner
def dot_match_r(image,template):
a,b = image.shape
x,y = template.shape
mat = np.inf
X = 0
Y = 0
for i in range(100-x):
for j in range(100-y):
x_c = a - i -x//2 -10
y_c = b - j - y//2 -10
sim = np.sum(image[x_c-10:x_c+10,y_c-10:y_c+10] - template)
if sim < mat:
#print(x_c,y_c,sim)
mat = sim
X = x_c
Y = y_c
return X,Y
#Function to give the subarray from the rotated image which contans the barcode
#Rotation identified using the locations of the dot in the corners
def gen_line_index(x1,y1,x2,y2,image,dot):
x,y = image.shape
xx, yy = np.mgrid[:x, :y]
m = (x2-x1)/(y1-y2)
#print(m,x1,y1,x2,y2)
if m ==0:
# p_line1 = yy - min(y1,y2)
# p_line2 = yy - max(y1,y2)
avg_x = x1
l_y = min(y1,y2)
r_y = max(y1,y2)
image_rot=image
else:
angle = -1*math.degrees(math.atan(m))
image_rot = rotate(image,angle,mode='constant',cval=255)
l_x,l_y = dot_match_l(image_rot,dot)
r_x,r_y = dot_match_r(image_rot,dot)
##print(l_x,l_y,r_x,r_y, angle)
##Image.fromarray(image_rot).show()
avg_x = (l_x +r_x)//2
# m = (r_x-l_x)/(l_y-r_y)
# c = l_x - m*l_y
# line = xx -m*yy - c
# m2 = -1/m
# c1 = l_x - m2*l_y
# c2 = r_x - m2*r_y
# p_line1 = xx -m2*yy - c1
# p_line2 = xx -m2*yy - c2
# line_bool = ((line < 8) & (line > -8) & (p_line1 <0) & (p_line2 >0)).astype(int)
##
# a = []
# for i in range(-10,10,1):
# line_bool = ((line==i) & (p_line1 < 0) & (p_line2 > 0)).astype(int)
# print(linebool.sum)
# a.append(image[line_bool==1])
#index = np.argwhere(line_bool==1)
#line_bool[index[:,0].min():index[:,0].max()+1,index[:,1].min():index[:,1].max()+1] = 1
#return image[line_bool==1].reshape(21,1700).reshape(21,r_y-l_y-1)
return image_rot[avg_x-20:avg_x+20,l_y:r_y]
#return image[line_bool==1].reshape(index[:,0].max()-index[:,0].min()+1,index[:,1].max()-index[:,1].min()+1)
#return image_rot,l_x,l_y,r_x,r_y,m,angle
#return image[line_bool==1]#.reshape(21,1700)
#im,x1,y1,x2,y2,m,angle=gen_line_index(l_x,l_y,r_x,r_y,image,dot)
#
#Image.fromarray(image).show()
#Image.fromarray(im).show()
#Decode the barcode from the subarray returned from the above function
#Applying a threshhold on the mean of the subarray to find the barcode
#Count the sequence of 0's and divide the sequnce by the width of 15 to get the unique code
def scanner(subarray):
x,y=subarray.shape
subarray = subarray[:,200:y-200]
c = subarray.all(axis=1)
subarray = subarray[ ~c,:]
rd = subarray.mean(axis=0)
rd[rd>=100] = 255
rd[rd<100] = 0
a = []
j = 0
for i in range(len(rd)):
if rd[i] == 0:
j += 1
elif rd[i] >0:
if j >0:
a.append(int(np.ceil(j/15)))
j = 0
return int(''.join(map(str,a)))
#return subarray
#Taking the inputs from the user
injected,output = sys.argv[1],sys.argv[2]
#convert the image to grayscale and numpy array
image = np.asarray(Image.open(injected).convert('L'))
#generate dot for template matching
dot = gen_dot(20,20,8)
#Find the coordinates of left dot
l_x,l_y = dot_match_l(image,dot)
#Find the coordinates of right dot
r_x,r_y = dot_match_r(image,dot)
#Find the subarray containing the bar code
subarray = gen_line_index(l_x,l_y,r_x,r_y,image,dot)
#Extract the key from the barcode
key = scanner(subarray)
#print(key)
#Write the output from the dictionary using th key extracted to a text file
with open(output, 'w') as f:
f.write("\n".join(key_dict[str(key)]))
| [
"numpy.ceil",
"PIL.Image.open",
"numpy.logical_and",
"numpy.sum",
"sys.exit",
"json.load",
"scipy.ndimage.rotate",
"math.atan"
] | [((1430, 1443), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1439, 1443), False, 'import json\n'), ((1456, 1580), 'sys.exit', 'sys.exit', (['"""Ouch! key dict json file not found! Check if the extract.py and key_dic.json are in the same directory"""'], {}), "(\n 'Ouch! key dict json file not found! Check if the extract.py and key_dic.json are in the same directory'\n )\n", (1464, 1580), False, 'import sys\n'), ((3536, 3583), 'scipy.ndimage.rotate', 'rotate', (['image', 'angle'], {'mode': '"""constant"""', 'cval': '(255)'}), "(image, angle, mode='constant', cval=255)\n", (3542, 3583), False, 'from scipy.ndimage import rotate\n'), ((1729, 1772), 'numpy.logical_and', 'np.logical_and', (['(circle < r ** 2)', '(circle > 0)'], {}), '(circle < r ** 2, circle > 0)\n', (1743, 1772), True, 'import numpy as np\n'), ((2173, 2235), 'numpy.sum', 'np.sum', (['(image[x_c - 10:x_c + 10, y_c - 10:y_c + 10] - template)'], {}), '(image[x_c - 10:x_c + 10, y_c - 10:y_c + 10] - template)\n', (2179, 2235), True, 'import numpy as np\n'), ((2724, 2786), 'numpy.sum', 'np.sum', (['(image[x_c - 10:x_c + 10, y_c - 10:y_c + 10] - template)'], {}), '(image[x_c - 10:x_c + 10, y_c - 10:y_c + 10] - template)\n', (2730, 2786), True, 'import numpy as np\n'), ((5813, 5833), 'PIL.Image.open', 'Image.open', (['injected'], {}), '(injected)\n', (5823, 5833), False, 'from PIL import Image\n'), ((3502, 3514), 'math.atan', 'math.atan', (['m'], {}), '(m)\n', (3511, 3514), False, 'import math\n'), ((5579, 5594), 'numpy.ceil', 'np.ceil', (['(j / 15)'], {}), '(j / 15)\n', (5586, 5594), True, 'import numpy as np\n')] |
from sklearn import linear_model
import numpy as np
N, F = list(map(int,input().split()))
observed = []
output = []
for i in range(F):
d = list(map(float,input().split()))
output.append(d[-1])
observed.append(d[:len(d)-1].copy())
T = int(input())
house = []
for i in range(T):
d = list(map(float,input().split()))
house.append(d.copy())
np.set_printoptions(precision=2)
regr = linear_model.LinearRegression()
regr.fit(observed, output)
data = regr.predict(house)
for i in data:
print(i) | [
"sklearn.linear_model.LinearRegression",
"numpy.set_printoptions"
] | [((358, 390), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (377, 390), True, 'import numpy as np\n'), ((398, 429), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (427, 429), False, 'from sklearn import linear_model\n')] |
import csv
import numpy as np
import time
from dataclasses import dataclass
from pathlib import Path
from typing import List, Type, TextIO
@dataclass
class DataRecord:
pass
@dataclass
class FederatorRecord(DataRecord):
num_selected_clients: int
round_id: int
round_duration: int
test_loss: float
test_accuracy: float
# Accuracy per class?
timestamp: float = time.time()
node_name: str = ''
confusion_matrix: np.array = None
@dataclass
class ClientRecord(DataRecord):
round_id: int
train_duration: float
test_duration: float
round_duration: float
num_epochs: int
trained_items: int
accuracy: float
train_loss: float
test_loss: float
# Accuracy per class?
timestamp: float = time.time()
node_name: str = ''
confusion_matrix: np.array = None
class DataContainer:
"""
Datacontainer class for collecting experiment data. By default, an 'Excel' compatible format is used by numpy and
the csv library. As such, it is advised to use a library such as `pandas` to load data for analysis purposes.
"""
records: List[DataRecord]
file_name: str
file_handle: TextIO
file_path: Path
append_mode: bool
record_type: Type[DataRecord]
delimiter = ','
name: str
def __init__(self, name: str, output_location: Path, record_type: Type[DataRecord], append_mode: bool = False):
# print(f'Creating new Data container for client {name}')
self.records = []
self.file_name = f'{name}.csv'
self.name = name
output_location = Path(output_location)
output_location.mkdir(parents=True, exist_ok=True)
self.file_path = output_location / self.file_name
self.append_mode = append_mode
file_flag = 'a' if append_mode else 'w'
self.file_handle = open(self.file_path, file_flag)
print(f'[<=========>] Creating data container at {self.file_path}')
self.record_type = record_type
if self.append_mode:
open(self.file_path, 'w').close()
dw = csv.DictWriter(self.file_handle, self.record_type.__annotations__)
dw.writeheader()
self.file_handle.flush()
def append(self, record: DataRecord):
record.node_name = self.name
self.records.append(record)
if self.append_mode:
dw = csv.DictWriter(self.file_handle, self.record_type.__annotations__)
dw.writerow(record.__dict__)
self.file_handle.flush()
def save(self):
"""
Function to save the encapsulated data to the experiment file. The format is 'excel' compatible,
resulting in the capability of loading complex objects such as ndarrays as a field.
@return: None
@rtype: None
"""
if self.append_mode:
return
import numpy as np
np.set_printoptions(linewidth=10**6)
dw = csv.DictWriter(self.file_handle, self.record_type.__annotations__)
dw.writeheader()
# print(f'Saving {len(self.records)} for node {self.name}')
for record in self.records:
record.node_name = self.name
dw.writerow(record.__dict__)
self.file_handle.flush()
| [
"csv.DictWriter",
"numpy.set_printoptions",
"time.time",
"pathlib.Path"
] | [((395, 406), 'time.time', 'time.time', ([], {}), '()\n', (404, 406), False, 'import time\n'), ((763, 774), 'time.time', 'time.time', ([], {}), '()\n', (772, 774), False, 'import time\n'), ((1590, 1611), 'pathlib.Path', 'Path', (['output_location'], {}), '(output_location)\n', (1594, 1611), False, 'from pathlib import Path\n'), ((2890, 2928), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(10 ** 6)'}), '(linewidth=10 ** 6)\n', (2909, 2928), True, 'import numpy as np\n'), ((2940, 3006), 'csv.DictWriter', 'csv.DictWriter', (['self.file_handle', 'self.record_type.__annotations__'], {}), '(self.file_handle, self.record_type.__annotations__)\n', (2954, 3006), False, 'import csv\n'), ((2082, 2148), 'csv.DictWriter', 'csv.DictWriter', (['self.file_handle', 'self.record_type.__annotations__'], {}), '(self.file_handle, self.record_type.__annotations__)\n', (2096, 2148), False, 'import csv\n'), ((2377, 2443), 'csv.DictWriter', 'csv.DictWriter', (['self.file_handle', 'self.record_type.__annotations__'], {}), '(self.file_handle, self.record_type.__annotations__)\n', (2391, 2443), False, 'import csv\n')] |
"""
Example code for reproducing some of the results
from the accompanying paper.
For a particular RGG ensemble we pick a set of parameters.
We then draw some samples from the algebraic connectivity
distriution and:
1) display the mean and Coefficient of Variation.
2) Plot a histogram (see figure 4).
"""
#Standard python modules:
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import math
#Modules from RGG ensemble analysis:
import rgg_ensemble_analysis
import rgg_ensemble_analysis.Analytic_Functions as Analytic
import rgg_ensemble_analysis.RGG as RGG
import rgg_ensemble_analysis.Statistics_Computation as StatComp
import rgg_ensemble_analysis.get_connection_radii_estimate as connection
#Choose parameters for the ensemble:
Samples = 50
N = 10000
d = 5
boundary = 'P'
#Se the expected mean degree parameter:
C = 1.5
Kappa = C*math.log(N)
#Create a dictionary for the input.
Input_Param_Dict = {}
Input_Param_Dict['N'] = N
Input_Param_Dict['d'] = d
Input_Param_Dict['boundary'] = boundary
#Should in fact derive radius from the scaling param:
Input_Param_Dict['degree_scaling_parameter'] = Kappa
#Number of samples for estimating the connection radius:
radii_samps = 10
#Perform several radius estimates and take the mean:
radii_estimates = [ connection.Get_Required_Kappa(Kappa , N, 1000 , d , Boundaries = boundary , positions = None , pattern="uniform") for k in range(radii_samps) ]
r = np.mean(radii_estimates)
Input_Param_Dict['r'] = r
database_name = "example_database.db"
graph_data_table_name = "graph_properties"
#Choose the properties to sample:
pattern = "uniform"
methods_to_call = [ "Algebraic_Connectivity" ,'LCC_Min_Degree' , 'Mean_Degree' ]
### Sample RGGs ###
RGG.Sample_RGGs(N, r, d, boundary,Kappa,Samples, methods_to_call, database_name,graph_data_table_name,pattern,Save_Positions=False,connect_to_server=False)
### DATA READING IN ###
ensemble = RGG.Graph_Ensemble(database_name,graph_data_table_name,Input_Param_Dict)
mu2_array = ensemble.get_prop_array('algebraic_connectivity')
kmin_array = ensemble.get_prop_array('LCC_Min_Degree')
Mean_Mu2 = np.mean(mu2_array)
CV_Mu2 = StatComp.CV(mu2_array)
CV_ER_Mu2 = StatComp.CV_Error(mu2_array)
#Compute the coefficient of variation and its error:
print("Number of samples = {}".format(len(mu2_array)) )
print("Mean Mu2 = {}".format(Mean_Mu2) )
if boundary =='P' :
print("Theoretical Mean Mu2 (periodic) = {}".format( Analytic.Theory_Algebraic(N,Kappa,d) ) )
print("CV = {} +/- {}".format(CV_Mu2 ,CV_ER_Mu2 ) )
#Compute the correlation between algebraic connectivity and min degree:
correlation_mu2_kmin, pval = stats.spearmanr(mu2_array, kmin_array)
print("correlation = {} (P val = {})\n\n".format(correlation_mu2_kmin,pval) )
###Plot the histogram ####
file_path = "hist_example"
d = ensemble.d
#Code below bins mu2 values according to the corresponding
#values of the minimim degree (kmin).
#Zip mu2 and kmin together:
combined_array = zip(mu2_array,kmin_array)
#get the set of different kmin values:
distinct_kmin_vals = set([ int(k) for k in set(kmin_array) ] )
#Make a dictionary where keys are the values:
kmin_mu2_dict = { }
for k in distinct_kmin_vals :
kmin_mu2_dict[k] = [ ]
for i in range(len(mu2_array)) :
kmin_mu2_dict[kmin_array[i]].append(mu2_array[i])
#Make histogram bins:
num_of_bins = 20
BIN_POINTS = np.linspace(0.9*min(mu2_array), 1.1*max(mu2_array), num = num_of_bins)
plt.clf()
plt.figure(1)
ax = plt.gca()
colors = [ 'k' , 'b' , 'r' , 'g' , 'm']
line_styles = [ '-', '--', '-.', ':']
col_index = 0
all_counts = [ ]
for k in kmin_mu2_dict.keys() :
counts , bins = np.histogram(kmin_mu2_dict[k] , bins= BIN_POINTS )
#bins is len(counts)+1 so need to take the mid points:
mid_points = [ (bins[i+1]+bins[i])/2.0 for i in range(len(bins)-1) ]
#normalize by dividing by the total # of counts:
counts = [ i/float(len(mu2_array)) for i in counts ]
#Make the plot:
ax.plot(mid_points, counts, c = colors[col_index] , linestyle= line_styles[col_index] , linewidth=2,label = "$\kappa_{min}$ = " + str(k))
col_index +=1
all_counts = np.concatenate((all_counts,counts))
#Set axis labels etc.. and save plot.
plt.title("$E(\mu_2)$ = {} , $CV(\mu_2)$ = {} +\- {}".format( round(Mean_Mu2,3), round(CV_Mu2,3), round(CV_ER_Mu2,3) ) )
plt.legend(loc=2)
plt.xlabel("$\mu_2$", fontsize=20)
plt.ylabel("$P(\mu_2)$", fontsize=20)
plt.ylim(0.0, 1.1*max(all_counts))
plt.savefig(file_path + ".png" , bbox_inches="tight",format="png")
| [
"scipy.stats.spearmanr",
"numpy.mean",
"numpy.histogram",
"rgg_ensemble_analysis.Statistics_Computation.CV_Error",
"matplotlib.pyplot.savefig",
"rgg_ensemble_analysis.Analytic_Functions.Theory_Algebraic",
"rgg_ensemble_analysis.RGG.Sample_RGGs",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
... | [((1442, 1466), 'numpy.mean', 'np.mean', (['radii_estimates'], {}), '(radii_estimates)\n', (1449, 1466), True, 'import numpy as np\n'), ((1733, 1902), 'rgg_ensemble_analysis.RGG.Sample_RGGs', 'RGG.Sample_RGGs', (['N', 'r', 'd', 'boundary', 'Kappa', 'Samples', 'methods_to_call', 'database_name', 'graph_data_table_name', 'pattern'], {'Save_Positions': '(False)', 'connect_to_server': '(False)'}), '(N, r, d, boundary, Kappa, Samples, methods_to_call,\n database_name, graph_data_table_name, pattern, Save_Positions=False,\n connect_to_server=False)\n', (1748, 1902), True, 'import rgg_ensemble_analysis.RGG as RGG\n'), ((1925, 1999), 'rgg_ensemble_analysis.RGG.Graph_Ensemble', 'RGG.Graph_Ensemble', (['database_name', 'graph_data_table_name', 'Input_Param_Dict'], {}), '(database_name, graph_data_table_name, Input_Param_Dict)\n', (1943, 1999), True, 'import rgg_ensemble_analysis.RGG as RGG\n'), ((2129, 2147), 'numpy.mean', 'np.mean', (['mu2_array'], {}), '(mu2_array)\n', (2136, 2147), True, 'import numpy as np\n'), ((2157, 2179), 'rgg_ensemble_analysis.Statistics_Computation.CV', 'StatComp.CV', (['mu2_array'], {}), '(mu2_array)\n', (2168, 2179), True, 'import rgg_ensemble_analysis.Statistics_Computation as StatComp\n'), ((2192, 2220), 'rgg_ensemble_analysis.Statistics_Computation.CV_Error', 'StatComp.CV_Error', (['mu2_array'], {}), '(mu2_array)\n', (2209, 2220), True, 'import rgg_ensemble_analysis.Statistics_Computation as StatComp\n'), ((2651, 2689), 'scipy.stats.spearmanr', 'stats.spearmanr', (['mu2_array', 'kmin_array'], {}), '(mu2_array, kmin_array)\n', (2666, 2689), False, 'from scipy import stats\n'), ((3446, 3455), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3453, 3455), True, 'import matplotlib.pyplot as plt\n'), ((3456, 3469), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3466, 3469), True, 'import matplotlib.pyplot as plt\n'), ((3475, 3484), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3482, 3484), True, 'import matplotlib.pyplot as plt\n'), ((4314, 4331), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (4324, 4331), True, 'import matplotlib.pyplot as plt\n'), ((4332, 4367), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu_2$"""'], {'fontsize': '(20)'}), "('$\\\\mu_2$', fontsize=20)\n", (4342, 4367), True, 'import matplotlib.pyplot as plt\n'), ((4367, 4405), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P(\\\\mu_2)$"""'], {'fontsize': '(20)'}), "('$P(\\\\mu_2)$', fontsize=20)\n", (4377, 4405), True, 'import matplotlib.pyplot as plt\n'), ((4440, 4506), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(file_path + '.png')"], {'bbox_inches': '"""tight"""', 'format': '"""png"""'}), "(file_path + '.png', bbox_inches='tight', format='png')\n", (4451, 4506), True, 'import matplotlib.pyplot as plt\n'), ((873, 884), 'math.log', 'math.log', (['N'], {}), '(N)\n', (881, 884), False, 'import math\n'), ((1294, 1402), 'rgg_ensemble_analysis.get_connection_radii_estimate.Get_Required_Kappa', 'connection.Get_Required_Kappa', (['Kappa', 'N', '(1000)', 'd'], {'Boundaries': 'boundary', 'positions': 'None', 'pattern': '"""uniform"""'}), "(Kappa, N, 1000, d, Boundaries=boundary,\n positions=None, pattern='uniform')\n", (1323, 1402), True, 'import rgg_ensemble_analysis.get_connection_radii_estimate as connection\n'), ((3645, 3692), 'numpy.histogram', 'np.histogram', (['kmin_mu2_dict[k]'], {'bins': 'BIN_POINTS'}), '(kmin_mu2_dict[k], bins=BIN_POINTS)\n', (3657, 3692), True, 'import numpy as np\n'), ((4115, 4151), 'numpy.concatenate', 'np.concatenate', (['(all_counts, counts)'], {}), '((all_counts, counts))\n', (4129, 4151), True, 'import numpy as np\n'), ((2451, 2489), 'rgg_ensemble_analysis.Analytic_Functions.Theory_Algebraic', 'Analytic.Theory_Algebraic', (['N', 'Kappa', 'd'], {}), '(N, Kappa, d)\n', (2476, 2489), True, 'import rgg_ensemble_analysis.Analytic_Functions as Analytic\n')] |
"""
@author: eagle705
https://github.com/eagle705/pytorch-bert-crf-ner/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import torch
from pathlib import Path
from tensorflow import keras
import numpy as np
from konlpy.tag import Twitter
from collections import Counter
from threading import Thread
import six
from torch import nn
class Config:
def __init__(self, json_path):
with open(json_path, mode='r') as io:
params = json.loads(io.read())
self.__dict__.update(params)
def save(self, json_path):
with open(json_path, mode='w') as io:
json.dump(self.__dict__, io, indent=4)
def update(self, json_path):
with open(json_path, mode='r') as io:
params = json.loads(io.read())
self.__dict__.update(params)
@property
def dict(self):
return self.__dict__
class CheckpointManager:
def __init__(self, model_dir):
if not isinstance(model_dir, Path):
model_dir = Path(model_dir)
self._model_dir = model_dir
def save_checkpoint(self, state, filename):
torch.save(state, self._model_dir / filename)
def load_checkpoint(self, filename):
state = torch.load(self._model_dir / filename, map_location=torch.device('cpu'))
return state
class SummaryManager:
def __init__(self, model_dir):
if not isinstance(model_dir, Path):
model_dir = Path(model_dir)
self._model_dir = model_dir
self._summary = {}
def save(self, filename):
with open(self._model_dir / filename, mode='w') as io:
json.dump(self._summary, io, indent=4)
def load(self, filename):
with open(self._model_dir / filename, mode='r') as io:
metric = json.loads(io.read())
self.update(metric)
def update(self, summary):
self._summary.update(summary)
def reset(self):
self._summary = {}
@property
def summary(self):
return self._summary
class Vocabulary(object):
"""Vocab Class"""
def __init__(self, token_to_idx=None):
self.token_to_idx = {}
self.idx_to_token = {}
self.idx = 0
self.PAD = self.padding_token = "[PAD]"
self.START_TOKEN = "<S>"
self.END_TOKEN = "<T>"
self.UNK = "[UNK]"
self.CLS = "[CLS]"
self.MASK = "[MASK]"
self.SEP = "[SEP]"
self.SEG_A = "[SEG_A]"
self.SEG_B = "[SEG_B]"
self.NUM = "<num>"
self.cls_token = self.CLS
self.sep_token = self.SEP
self.special_tokens = [self.PAD,
self.START_TOKEN,
self.END_TOKEN,
self.UNK,
self.CLS,
self.MASK,
self.SEP,
self.SEG_A,
self.SEG_B,
self.NUM]
self.init_vocab()
if token_to_idx is not None:
self.token_to_idx = token_to_idx
self.idx_to_token = {v: k for k, v in token_to_idx.items()}
self.idx = len(token_to_idx) - 1
# if pad token in token_to_idx dict, get pad_id
if self.PAD in self.token_to_idx:
self.PAD_ID = self.transform_token2idx(self.PAD)
else:
self.PAD_ID = 0
def init_vocab(self):
for special_token in self.special_tokens:
self.add_token(special_token)
self.PAD_ID = self.transform_token2idx(self.PAD)
def __len__(self):
return len(self.token_to_idx)
def to_indices(self, tokens):
return [self.transform_token2idx(X_token) for X_token in tokens]
def add_token(self, token):
if not token in self.token_to_idx:
self.token_to_idx[token] = self.idx
self.idx_to_token[self.idx] = token
self.idx += 1
def transform_token2idx(self, token, show_oov=False):
try:
return self.token_to_idx[token]
except:
if show_oov is True:
print("key error: " + str(token))
token = self.UNK
return self.token_to_idx[token]
def transform_idx2token(self, idx):
try:
return self.idx_to_token[idx]
except:
print("key error: " + str(idx))
idx = self.token_to_idx[self.UNK]
return self.idx_to_token[idx]
def build_vocab(self, list_of_str, threshold=1, vocab_save_path="./data_in/token_vocab.json",
split_fn=Twitter().morphs):
"""Build a token vocab"""
def do_concurrent_tagging(start, end, text_list, counter):
for i, text in enumerate(text_list[start:end]):
text = text.strip()
text = text.lower()
try:
tokens_ko = split_fn(text)
# tokens_ko = [str(pos[0]) + '/' + str(pos[1]) for pos in tokens_ko]
counter.update(tokens_ko)
if i % 1000 == 0:
print("[%d/%d (total: %d)] Tokenized input text." % (
start + i, start + len(text_list[start:end]), len(text_list)))
except Exception as e: # OOM, Parsing Error
print(e)
continue
counter = Counter()
num_thread = 4
thread_list = []
num_list_of_str = len(list_of_str)
for i in range(num_thread):
thread_list.append(Thread(target=do_concurrent_tagging, args=(
int(i * num_list_of_str / num_thread), int((i + 1) * num_list_of_str / num_thread), list_of_str,
counter)))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
# vocab_report
print(counter.most_common(10)) # print most common tokens
tokens = [token for token, cnt in counter.items() if cnt >= threshold]
for i, token in enumerate(tokens):
self.add_token(str(token))
print("len(self.token_to_idx): ", len(self.token_to_idx))
import json
with open(vocab_save_path, 'w', encoding='utf-8') as f:
json.dump(self.token_to_idx, f, ensure_ascii=False, indent=4)
return self.token_to_idx
def keras_pad_fn(token_ids_batch, maxlen, pad_id=0, padding='post', truncating='post'):
padded_token_ids_batch = pad_sequences(token_ids_batch,
value=pad_id, # vocab.transform_token2idx(PAD),
padding=padding,
truncating=truncating,
maxlen=maxlen)
return padded_token_ids_batch
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the end.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
for x in sequences:
try:
lengths.append(len(x))
except TypeError:
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
class Tokenizer:
""" Tokenizer class"""
def __init__(self, vocab, split_fn, pad_fn, maxlen):
self._vocab = vocab
self._split = split_fn
self._pad = pad_fn
self._maxlen = maxlen
# def split(self, string: str) -> list[str]:
def split(self, string):
tokens = self._split(string)
return tokens
# def transform(self, list_of_tokens: list[str]) -> list[int]:
def transform(self, tokens):
indices = self._vocab.to_indices(tokens)
pad_indices = self._pad(indices, pad_id=0, maxlen=self._maxlen) if self._pad else indices
return pad_indices
# def split_and_transform(self, string: str) -> list[int]:
def split_and_transform(self, string):
return self.transform(self.split(string))
@property
def vocab(self):
return self._vocab
def list_of_tokens_to_list_of_token_ids(self, X_token_batch):
X_ids_batch = []
for X_tokens in X_token_batch:
X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])
return X_ids_batch
def list_of_string_to_list_of_tokens(self, X_str_batch):
X_token_batch = [self._split(X_str) for X_str in X_str_batch]
return X_token_batch
def list_of_tokens_to_list_of_token_ids(self, X_token_batch):
X_ids_batch = []
for X_tokens in X_token_batch:
X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])
return X_ids_batch
def list_of_string_to_list_token_ids(self, X_str_batch):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
X_ids_batch = self.list_of_tokens_to_list_of_token_ids(X_token_batch)
return X_ids_batch
def list_of_string_to_arr_of_pad_token_ids(self, X_str_batch, add_start_end_token=False):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
# print("X_token_batch: ", X_token_batch)
if add_start_end_token is True:
return self.add_start_end_token_with_pad(X_token_batch)
else:
X_ids_batch = self.list_of_tokens_to_list_of_token_ids(X_token_batch)
pad_X_ids_batch = self._pad(X_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
return pad_X_ids_batch
def list_of_tokens_to_list_of_cls_sep_token_ids(self, X_token_batch):
X_ids_batch = []
for X_tokens in X_token_batch:
X_tokens = [self._vocab.cls_token] + X_tokens + [self._vocab.sep_token]
X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])
return X_ids_batch
def list_of_string_to_arr_of_cls_sep_pad_token_ids(self, X_str_batch):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
X_ids_batch = self.list_of_tokens_to_list_of_cls_sep_token_ids(X_token_batch)
pad_X_ids_batch = self._pad(X_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
return pad_X_ids_batch
def list_of_string_to_list_of_cls_sep_token_ids(self, X_str_batch):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
X_ids_batch = self.list_of_tokens_to_list_of_cls_sep_token_ids(X_token_batch)
return X_ids_batch
def add_start_end_token_with_pad(self, X_token_batch):
dec_input_token_batch = [[self._vocab.START_TOKEN] + X_token for X_token in X_token_batch]
dec_output_token_batch = [X_token + [self._vocab.END_TOKEN] for X_token in X_token_batch]
dec_input_token_batch = self.list_of_tokens_to_list_of_token_ids(dec_input_token_batch)
pad_dec_input_ids_batch = self._pad(dec_input_token_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
dec_output_ids_batch = self.list_of_tokens_to_list_of_token_ids(dec_output_token_batch)
pad_dec_output_ids_batch = self._pad(dec_output_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
return pad_dec_input_ids_batch, pad_dec_output_ids_batch
def decode_token_ids(self, token_ids_batch):
list_of_token_batch = []
for token_ids in token_ids_batch:
token_token = [self._vocab.transform_idx2token(token_id) for token_id in token_ids]
# token_token = [self._vocab[token_id] for token_id in token_ids]
list_of_token_batch.append(token_token)
return list_of_token_batch
class BERTClassifier(nn.Module):
def __init__(self,
bert,
hidden_size = 768,
num_classes = 7,
dr_rate = None,
params = None):
super(BERTClassifier, self).__init__()
self.bert = bert
self.dr_rate = dr_rate
self.classifier = nn.Linear(hidden_size, num_classes)
if dr_rate:
self.dropout = nn.Dropout(p=dr_rate)
def gen_attention_mask(self, token_ids, valid_length):
attention_mask = torch.zeros_like(token_ids)
for i, v in enumerate(valid_length):
attention_mask[i][:v] = 1
return attention_mask.float()
def forward(self, token_ids, valid_length, segment_ids):
attention_mask = self.gen_attention_mask(token_ids, valid_length)
_, pooler = self.bert(input_ids=token_ids, token_type_ids=segment_ids.long(), attention_mask=attention_mask.float().to(token_ids.device))
if self.dr_rate:
out = self.dropout(pooler)
else:
out = pooler
return self.classifier(out) | [
"torch.nn.Dropout",
"pathlib.Path",
"numpy.asarray",
"numpy.max",
"collections.Counter",
"numpy.issubdtype",
"konlpy.tag.Twitter",
"torch.save",
"torch.nn.Linear",
"numpy.full",
"torch.zeros_like",
"json.dump",
"torch.device"
] | [((9663, 9728), 'numpy.full', 'np.full', (['((num_samples, maxlen) + sample_shape)', 'value'], {'dtype': 'dtype'}), '((num_samples, maxlen) + sample_shape, value, dtype=dtype)\n', (9670, 9728), True, 'import numpy as np\n'), ((1150, 1195), 'torch.save', 'torch.save', (['state', '(self._model_dir / filename)'], {}), '(state, self._model_dir / filename)\n', (1160, 1195), False, 'import torch\n'), ((5495, 5504), 'collections.Counter', 'Counter', ([], {}), '()\n', (5502, 5504), False, 'from collections import Counter\n'), ((8983, 8998), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (8989, 8998), True, 'import numpy as np\n'), ((9280, 9309), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.str_'], {}), '(dtype, np.str_)\n', (9293, 9309), True, 'import numpy as np\n'), ((9313, 9346), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.unicode_'], {}), '(dtype, np.unicode_)\n', (9326, 9346), True, 'import numpy as np\n'), ((10160, 10190), 'numpy.asarray', 'np.asarray', (['trunc'], {'dtype': 'dtype'}), '(trunc, dtype=dtype)\n', (10170, 10190), True, 'import numpy as np\n'), ((15479, 15514), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (15488, 15514), False, 'from torch import nn\n'), ((15669, 15696), 'torch.zeros_like', 'torch.zeros_like', (['token_ids'], {}), '(token_ids)\n', (15685, 15696), False, 'import torch\n'), ((649, 687), 'json.dump', 'json.dump', (['self.__dict__', 'io'], {'indent': '(4)'}), '(self.__dict__, io, indent=4)\n', (658, 687), False, 'import json\n'), ((1041, 1056), 'pathlib.Path', 'Path', (['model_dir'], {}), '(model_dir)\n', (1045, 1056), False, 'from pathlib import Path\n'), ((1474, 1489), 'pathlib.Path', 'Path', (['model_dir'], {}), '(model_dir)\n', (1478, 1489), False, 'from pathlib import Path\n'), ((1659, 1697), 'json.dump', 'json.dump', (['self._summary', 'io'], {'indent': '(4)'}), '(self._summary, io, indent=4)\n', (1668, 1697), False, 'import json\n'), ((4691, 4700), 'konlpy.tag.Twitter', 'Twitter', ([], {}), '()\n', (4698, 4700), False, 'from konlpy.tag import Twitter\n'), ((6390, 6451), 'json.dump', 'json.dump', (['self.token_to_idx', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(self.token_to_idx, f, ensure_ascii=False, indent=4)\n', (6399, 6451), False, 'import json\n'), ((15562, 15583), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dr_rate'}), '(p=dr_rate)\n', (15572, 15583), False, 'from torch import nn\n'), ((1306, 1325), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1318, 1325), False, 'import torch\n'), ((9218, 9231), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (9228, 9231), True, 'import numpy as np\n')] |
"""
Calculation Parser | Cannlytics
Author: <NAME>
Contact: <<EMAIL>>
Created: 6/8/2021
Updated: 6/8/2021
License: MIT License <https://opensource.org/licenses/MIT>
"""
import pandas as pd
import numpy as np
import re
import random
# Prepare testing dataset
tags = np.array(['tag'+str(i) for i in np.random.randint(10, size=200)]) # randomly generate tag list
vals = np.random.randint(20, size=200) # generate a list of random integers
raw_df = pd.DataFrame({
'tag': tags,
'value': vals
})
# Functions
def parentheses_enclosed(s):
paren_order = re.findall(r'[\(\)]', s)
if paren_order.count('(') != paren_order.count(')'):
return False
curr_levels = []
nest_lv = 0
for p in paren_order:
if p == '(':
nest_lv += 1
else:
nest_lv -= 1
curr_levels.append(nest_lv)
if 0 in curr_levels[:-1]:
return False
else:
return True
def remove_matched_parentheses(s):
if ')' in s:
# find the first ')'
end = s.find(')')
# find the last '(' before the first ')'
start = max([i for i, char in enumerate(s[:end]) if char == '(' ])
# remove the parentheses
return remove_matched_parentheses(s[:start] + s[end+1:])
else:
return s
def interpret(f, df):
if re.match(r'\Atag[\d]+\Z', f): # e.g. 'tag1'
return df[df.tag == f]['value'].values
elif parentheses_enclosed(f) and \
re.match(r'\Asum\(.+[\+\-].+\)\Z|\Aavg\(.+[\+\-].+\)\Z|\Amin\(.+[\+\-].+\)\Z|\Amax\(.+[\+\-].+\)\Z', f):
f_name = f[:3] # get agg func name
f_stripped = f[4:-1] # strip outer func
while re.match(r'\A\(.+\)\Z', f_stripped) and parentheses_enclosed(f_stripped):
f_stripped = f_stripped[1:-1]
comps = re.compile(r'[\+\-]').split(f_stripped) # split by + or -
operators = re.findall(r'[\+\-]', f_stripped)
comps_final = []
temp_str = ''
for c in comps:
temp_str += c
if re.match(r'\Atag[\d]+\Z', temp_str) or parentheses_enclosed(temp_str):
comps_final.append(f'{f_name}({temp_str})')
if len(operators) > 0:
comps_final.append(operators.pop(0))
temp_str = ''
else:
temp_str += operators.pop(0)
return interpret(''.join(comps_final), df)
elif re.match(r'\Asum\([^\(\)]+\)\Z', f): # e.g. 'sum(tag1)'
return np.sum(interpret(f[4:-1], df))
elif re.match(r'\Aavg\([^\(\)]+\)\Z', f): # e.g. 'avg(tag1)'
return np.average(interpret(f[4:-1], df))
elif re.match(r'\Amin\([^\(\)]+\)\Z', f): # e.g. 'min(tag1)'
return np.min(interpret(f[4:-1], df))
elif re.match(r'\Amax\([^\(\)]+\)\Z', f): # e.g. 'max(tag1)'
return np.max(interpret(f[4:-1], df))
elif re.match(r'\A\(.+\)\Z', f) and parentheses_enclosed(f): # e.g. '(tag1-tag2)'
return interpret(f[1:-1], df)
elif f.replace('.', '', 1).isdigit():
return float(f)
else:
rest_f = remove_matched_parentheses(f)
if '+' in rest_f or '-' in rest_f:
comps = re.compile(r'[\+\-]').split(f)
else:
comps = re.compile(r'[\*\/]').split(f)
if comps[0].count('(') != comps[0].count(')'):
nested_level = comps[0].count('(') - comps[0].count(')')
pos = len(comps[0])
for comp in comps[1:]:
if '(' in comp:
nested_level += comp.count('(')
if ')' in comp:
nested_level -= comp.count(')')
pos += len(comp) + 1 # +1 because of the operator inside parenthesis
if nested_level == 0:
break
else:
pos = len(comps[0])
left = f[:pos] # left component
right = f[pos+1:] # right component
operator = f[pos] # the operator
if operator == '+':
return interpret(left, df) + interpret(right, df)
elif operator == '-':
return interpret(left, df) - interpret(right, df)
elif operator == '*':
return interpret(left, df) * interpret(right, df)
elif operator == '/':
denominator = interpret(right, df)
if denominator == 0 or denominator is np.nan:
return np.nan
else:
return interpret(left, df) / interpret(right, df)
return np.nan
# Verification
assert np.sum(raw_df[raw_df.tag == 'tag1']['value'].values) == interpret('sum(tag1)', raw_df), "Wrong!"
assert np.average(raw_df[raw_df.tag == 'tag1']['value'].values) == interpret('avg(tag1)', raw_df), "Wrong!"
assert np.min(raw_df[raw_df.tag == 'tag1']['value'].values) == interpret('min(tag1)', raw_df), "Wrong!"
assert np.max(raw_df[raw_df.tag == 'tag1']['value'].values) == interpret('max(tag1)', raw_df), "Wrong!"
assert np.sum(raw_df[raw_df.tag == 'tag1']['value'].values) + \
np.sum(raw_df[raw_df.tag == 'tag2']['value'].values) == \
interpret('sum(tag1)+sum(tag2)', raw_df), "Wrong!"
assert np.sum(raw_df[raw_df.tag == 'tag1']['value'].values) + \
np.sum(raw_df[raw_df.tag == 'tag2']['value'].values) + \
np.average(raw_df[raw_df.tag == 'tag3']['value'].values) == \
interpret('sum(tag1)+sum(tag2)+avg(tag3)', raw_df), "Wrong!"
assert np.sum(raw_df[raw_df.tag == 'tag1']['value'].values) + \
np.sum(raw_df[raw_df.tag == 'tag2']['value'].values) * \
np.average(raw_df[raw_df.tag == 'tag3']['value'].values) == \
interpret('sum(tag1)+sum(tag2)*avg(tag3)', raw_df), "Wrong!"
assert (
np.sum(raw_df[raw_df.tag == 'tag1']['value'].values) + \
(np.sum(raw_df[raw_df.tag == 'tag2']['value'].values) - \
np.sum(raw_df[raw_df.tag == 'tag3']['value'].values)) + 10
) * (
np.max(raw_df[raw_df.tag == 'tag4']['value'].values) + \
np.average(raw_df[raw_df.tag == 'tag5']['value'].values)
) * 0.2 == interpret('(sum(tag1+(tag2-tag3))+10)*(max(tag4)+avg(tag5))*0.2', raw_df), "Wrong!"
print('All pass!') | [
"numpy.average",
"re.compile",
"re.match",
"numpy.max",
"numpy.sum",
"numpy.random.randint",
"numpy.min",
"pandas.DataFrame",
"re.findall"
] | [((371, 402), 'numpy.random.randint', 'np.random.randint', (['(20)'], {'size': '(200)'}), '(20, size=200)\n', (388, 402), True, 'import numpy as np\n'), ((451, 493), 'pandas.DataFrame', 'pd.DataFrame', (["{'tag': tags, 'value': vals}"], {}), "({'tag': tags, 'value': vals})\n", (463, 493), True, 'import pandas as pd\n'), ((564, 589), 're.findall', 're.findall', (['"""[\\\\(\\\\)]"""', 's'], {}), "('[\\\\(\\\\)]', s)\n", (574, 589), False, 'import re\n'), ((1343, 1373), 're.match', 're.match', (['"""\\\\Atag[\\\\d]+\\\\Z"""', 'f'], {}), "('\\\\Atag[\\\\d]+\\\\Z', f)\n", (1351, 1373), False, 'import re\n'), ((4641, 4693), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag1']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag1']['value'].values)\n", (4647, 4693), True, 'import numpy as np\n'), ((4745, 4801), 'numpy.average', 'np.average', (["raw_df[raw_df.tag == 'tag1']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag1']['value'].values)\n", (4755, 4801), True, 'import numpy as np\n'), ((4853, 4905), 'numpy.min', 'np.min', (["raw_df[raw_df.tag == 'tag1']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag1']['value'].values)\n", (4859, 4905), True, 'import numpy as np\n'), ((4957, 5009), 'numpy.max', 'np.max', (["raw_df[raw_df.tag == 'tag1']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag1']['value'].values)\n", (4963, 5009), True, 'import numpy as np\n'), ((5062, 5114), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag1']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag1']['value'].values)\n", (5068, 5114), True, 'import numpy as np\n'), ((5123, 5175), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag2']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag2']['value'].values)\n", (5129, 5175), True, 'import numpy as np\n'), ((5366, 5422), 'numpy.average', 'np.average', (["raw_df[raw_df.tag == 'tag3']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag3']['value'].values)\n", (5376, 5422), True, 'import numpy as np\n'), ((5501, 5553), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag1']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag1']['value'].values)\n", (5507, 5553), True, 'import numpy as np\n'), ((300, 331), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(200)'}), '(10, size=200)\n', (317, 331), True, 'import numpy as np\n'), ((1487, 1623), 're.match', 're.match', (['"""\\\\Asum\\\\(.+[\\\\+\\\\-].+\\\\)\\\\Z|\\\\Aavg\\\\(.+[\\\\+\\\\-].+\\\\)\\\\Z|\\\\Amin\\\\(.+[\\\\+\\\\-].+\\\\)\\\\Z|\\\\Amax\\\\(.+[\\\\+\\\\-].+\\\\)\\\\Z"""', 'f'], {}), "(\n '\\\\Asum\\\\(.+[\\\\+\\\\-].+\\\\)\\\\Z|\\\\Aavg\\\\(.+[\\\\+\\\\-].+\\\\)\\\\Z|\\\\Amin\\\\(.+[\\\\+\\\\-].+\\\\)\\\\Z|\\\\Amax\\\\(.+[\\\\+\\\\-].+\\\\)\\\\Z'\n , f)\n", (1495, 1623), False, 'import re\n'), ((1950, 1984), 're.findall', 're.findall', (['"""[\\\\+\\\\-]"""', 'f_stripped'], {}), "('[\\\\+\\\\-]', f_stripped)\n", (1960, 1984), False, 'import re\n'), ((2481, 2521), 're.match', 're.match', (['"""\\\\Asum\\\\([^\\\\(\\\\)]+\\\\)\\\\Z"""', 'f'], {}), "('\\\\Asum\\\\([^\\\\(\\\\)]+\\\\)\\\\Z', f)\n", (2489, 2521), False, 'import re\n'), ((5244, 5296), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag1']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag1']['value'].values)\n", (5250, 5296), True, 'import numpy as np\n'), ((5305, 5357), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag2']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag2']['value'].values)\n", (5311, 5357), True, 'import numpy as np\n'), ((5562, 5614), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag2']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag2']['value'].values)\n", (5568, 5614), True, 'import numpy as np\n'), ((5623, 5679), 'numpy.average', 'np.average', (["raw_df[raw_df.tag == 'tag3']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag3']['value'].values)\n", (5633, 5679), True, 'import numpy as np\n'), ((1717, 1755), 're.match', 're.match', (['"""\\\\A\\\\(.+\\\\)\\\\Z"""', 'f_stripped'], {}), "('\\\\A\\\\(.+\\\\)\\\\Z', f_stripped)\n", (1725, 1755), False, 'import re\n'), ((2598, 2638), 're.match', 're.match', (['"""\\\\Aavg\\\\([^\\\\(\\\\)]+\\\\)\\\\Z"""', 'f'], {}), "('\\\\Aavg\\\\([^\\\\(\\\\)]+\\\\)\\\\Z', f)\n", (2606, 2638), False, 'import re\n'), ((5956, 6008), 'numpy.max', 'np.max', (["raw_df[raw_df.tag == 'tag4']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag4']['value'].values)\n", (5962, 6008), True, 'import numpy as np\n'), ((6017, 6073), 'numpy.average', 'np.average', (["raw_df[raw_df.tag == 'tag5']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag5']['value'].values)\n", (6027, 6073), True, 'import numpy as np\n'), ((1862, 1884), 're.compile', 're.compile', (['"""[\\\\+\\\\-]"""'], {}), "('[\\\\+\\\\-]')\n", (1872, 1884), False, 'import re\n'), ((2096, 2133), 're.match', 're.match', (['"""\\\\Atag[\\\\d]+\\\\Z"""', 'temp_str'], {}), "('\\\\Atag[\\\\d]+\\\\Z', temp_str)\n", (2104, 2133), False, 'import re\n'), ((2719, 2759), 're.match', 're.match', (['"""\\\\Amin\\\\([^\\\\(\\\\)]+\\\\)\\\\Z"""', 'f'], {}), "('\\\\Amin\\\\([^\\\\(\\\\)]+\\\\)\\\\Z', f)\n", (2727, 2759), False, 'import re\n'), ((5764, 5816), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag1']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag1']['value'].values)\n", (5770, 5816), True, 'import numpy as np\n'), ((2836, 2876), 're.match', 're.match', (['"""\\\\Amax\\\\([^\\\\(\\\\)]+\\\\)\\\\Z"""', 'f'], {}), "('\\\\Amax\\\\([^\\\\(\\\\)]+\\\\)\\\\Z', f)\n", (2844, 2876), False, 'import re\n'), ((5826, 5878), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag2']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag2']['value'].values)\n", (5832, 5878), True, 'import numpy as np\n'), ((5887, 5939), 'numpy.sum', 'np.sum', (["raw_df[raw_df.tag == 'tag3']['value'].values"], {}), "(raw_df[raw_df.tag == 'tag3']['value'].values)\n", (5893, 5939), True, 'import numpy as np\n'), ((2953, 2982), 're.match', 're.match', (['"""\\\\A\\\\(.+\\\\)\\\\Z"""', 'f'], {}), "('\\\\A\\\\(.+\\\\)\\\\Z', f)\n", (2961, 2982), False, 'import re\n'), ((3265, 3287), 're.compile', 're.compile', (['"""[\\\\+\\\\-]"""'], {}), "('[\\\\+\\\\-]')\n", (3275, 3287), False, 'import re\n'), ((3330, 3352), 're.compile', 're.compile', (['"""[\\\\*\\\\/]"""'], {}), "('[\\\\*\\\\/]')\n", (3340, 3352), False, 'import re\n')] |
from transformers import DistilBertModel, DistilBertConfig
import sys
import pandas as pd
import numpy as np
import torch
import sklearn
from sklearn.model_selection import train_test_split
from torch import nn
from transformers import Trainer
npz = np.load('hilbert.npz')
np_eeg = npz['EEG']
np_labels = npz['labels']
np_eeg_processed = []
np_labels_processed = []
#print(np_eeg)
#print(np_labels)
for item in np_eeg:
np_eeg_processed.append(item[0])
for item in np_labels:
np_labels_processed.append(item[1])
#print(np_eeg_processed)
#print(np_labels_processed)
X_train, X_test, y_train, y_test = train_test_split(np_eeg_processed,np_labels_processed,test_size=0.2,random_state=28)
class MyDataset(Dataset):
def __init__(self,file_name):
price_df=pd.read_csv(file_name)
x=price_df.iloc[:,0:8].values
y=price_df.iloc[:,8].values
self.x_train=torch.tensor(x,dtype=torch.float32)
self.y_train=torch.tensor(y,dtype=torch.float32)
def __len__(self):
return len(self.y_train)
def __getitem__(self,idx):
return self.x_train[idx],self.y_train[idx]
print("ok")
def trainingArgs(
epochs: int,
trainDir: str,
batchSizeTrain=16,
batchSizeEval=32,
training_set_len = len(train_dataset)
):
"""Return a TrainingArguments instance to be passed to Trainer class."""
totalSteps = int((training_set_len / batchSizeTrain) * epochs)
warmup = int(totalSteps * 0.05)
return TrainingArguments(
output_dir=f"./{trainDir}/results",
logging_dir=f"./{trainDir}/logs",
overwrite_output_dir=True,
# trains faster without evaluation
#evaluate_during_training=False,
per_device_train_batch_size=batchSizeTrain,
per_device_eval_batch_size=batchSizeEval,
num_train_epochs=epochs,
warmup_steps=warmup,
# I won't be logging or checkpointing since
# training occurs fairly quickly
logging_steps=9999,
save_steps=9999,
save_total_limit=1,
# standard arguments
learning_rate=5e-5,
weight_decay=1e-2,
)
# training arguments
trainDir = "training"
saveModelDir = "tuned-bert"
epochsList = [2, 3, 4]
embArgs= trainingArgs(2, trainDir)
trainDs = train_dataset
testDs = test_dataset
# training arguments
trainDir = "training"
saveModelDir = "tuned-bert"
epochsList = [2, 3, 4]
embArgs= trainingArgs(2, trainDir)
trainDs = train_dataset
testDs = test_dataset
#Trainer(model="distilbert-base-uncased",train_dataset=X_train,eval_dataset=X_test)
| [
"sklearn.model_selection.train_test_split",
"torch.tensor",
"numpy.load",
"pandas.read_csv"
] | [((250, 272), 'numpy.load', 'np.load', (['"""hilbert.npz"""'], {}), "('hilbert.npz')\n", (257, 272), True, 'import numpy as np\n'), ((607, 698), 'sklearn.model_selection.train_test_split', 'train_test_split', (['np_eeg_processed', 'np_labels_processed'], {'test_size': '(0.2)', 'random_state': '(28)'}), '(np_eeg_processed, np_labels_processed, test_size=0.2,\n random_state=28)\n', (623, 698), False, 'from sklearn.model_selection import train_test_split\n'), ((770, 792), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {}), '(file_name)\n', (781, 792), True, 'import pandas as pd\n'), ((890, 926), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.float32'}), '(x, dtype=torch.float32)\n', (902, 926), False, 'import torch\n'), ((947, 983), 'torch.tensor', 'torch.tensor', (['y'], {'dtype': 'torch.float32'}), '(y, dtype=torch.float32)\n', (959, 983), False, 'import torch\n')] |
from sklearn import tree, svm
from sklearn.model_selection import cross_val_score
import numpy as np
class Classifier:
def __init__(self):
self.model = None
def train(self, x, y):
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x, y)
self.model = clf
def validate(self, x, y):
# clf = svm.SVC(kernel='linear', C=1)
clf = tree.DecisionTreeClassifier(max_depth=15)
scores = cross_val_score(clf, x, y, cv=5)
print(scores, np.mean(scores))
return scores
def test(self, x, y):
assert self.model is not None
predict_y = self.model.predict(x)
# print(predict_y, y)
def plot(self):
assert self.model is not None
tree.plot_tree(self.model)
if __name__ == '__main__':
from django.settings import DATA_ROOT_DIRECTORY
from software_mining.preprocess import PreProcessor
# project_name = 'abarisain_dmix'
# project_name = 'rspec_rspec-core'
# project_name = 'codeforamerica_adopt-a-hydrant'
project_name = 'apache_storm'
# project_name = 'justinfrench_formtastic'
# project_name = 'activescaffold_active_scaffold'
# project_name = 'eirslett_frontend-maven-plugin'
print("Project name: ", project_name)
processor = PreProcessor(data_root_directory=DATA_ROOT_DIRECTORY)
train_x, train_y = processor.get_x_and_y(project_name, 'train_set.txt')
print(*processor.file_name_postfix_set)
classifier = Classifier()
# classifier.train(train_x, train_y)
# test_x, test_y = processor.get_x_and_y(project_name, 'test_set.txt', lambda build: True)
# classifier.test(test_x, test_y)
classifier.validate(train_x, train_y)
print('Done')
| [
"numpy.mean",
"software_mining.preprocess.PreProcessor",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.tree.plot_tree",
"sklearn.model_selection.cross_val_score"
] | [((1287, 1340), 'software_mining.preprocess.PreProcessor', 'PreProcessor', ([], {'data_root_directory': 'DATA_ROOT_DIRECTORY'}), '(data_root_directory=DATA_ROOT_DIRECTORY)\n', (1299, 1340), False, 'from software_mining.preprocess import PreProcessor\n'), ((213, 242), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (240, 242), False, 'from sklearn import tree, svm\n'), ((387, 428), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'max_depth': '(15)'}), '(max_depth=15)\n', (414, 428), False, 'from sklearn import tree, svm\n'), ((446, 478), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['clf', 'x', 'y'], {'cv': '(5)'}), '(clf, x, y, cv=5)\n', (461, 478), False, 'from sklearn.model_selection import cross_val_score\n'), ((744, 770), 'sklearn.tree.plot_tree', 'tree.plot_tree', (['self.model'], {}), '(self.model)\n', (758, 770), False, 'from sklearn import tree, svm\n'), ((501, 516), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (508, 516), True, 'import numpy as np\n')] |
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
import sys
import io
import zipfile
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import argparse
import torchvision
from PIL import Image
import numpy as np
from pytorch_i3d import InceptionI3d
from torchvision import datasets
#from charades_dataset_full import Thumos as Dataset
#import pdb
def load_frame(frame_file, resize=False):
data = Image.open(frame_file)
assert(data.size[1] == 240)
assert(data.size[0] == 320)
if resize:
data = data.resize((224, 224), Image.ANTIALIAS)
data = np.array(data)
data = data.astype(float)
data = (data * 2 / 255) - 1
assert(data.max()<=1.0)
assert(data.min()>=-1.0)
return data
def load_zipframe(zipdata, name, resize=False):
stream = zipdata.read(name)
data = Image.open(io.BytesIO(stream))
assert(data.size[1] == 240)
assert(data.size[0] == 320)
if resize:
data = data.resize((224, 224), Image.ANTIALIAS)
data = np.array(data)
data = data.astype(float)
data = (data * 2 / 255) - 1
assert(data.max()<=1.0)
assert(data.min()>=-1.0)
return data
def oversample_data(data): # (?, 16, 224, 224, 2) # Check twice
data_flip = np.array(data[:,:,:,::-1,:])
data_1 = np.array(data[:, :, :224, :224, :])
data_2 = np.array(data[:, :, :224, -224:, :])
data_3 = np.array(data[:, :, 16:240, 58:282, :]) # ,:,16:240,58:282,:
data_4 = np.array(data[:, :, -224:, :224, :])
data_5 = np.array(data[:, :, -224:, -224:, :])
data_f_1 = np.array(data_flip[:, :, :224, :224, :])
data_f_2 = np.array(data_flip[:, :, :224, -224:, :])
data_f_3 = np.array(data_flip[:, :, 16:240, 58:282, :])
data_f_4 = np.array(data_flip[:, :, -224:, :224, :])
data_f_5 = np.array(data_flip[:, :, -224:, -224:, :])
return [data_1, data_2, data_3, data_4, data_5,
data_f_1, data_f_2, data_f_3, data_f_4, data_f_5]
def load_rgb_batch(frames_dir, rgb_files,
frame_indices, resize=False):
if resize:
batch_data = np.zeros(frame_indices.shape + (224,224,3))
else:
batch_data = np.zeros(frame_indices.shape + (240,320,3))
for i in range(frame_indices.shape[0]):
for j in range(frame_indices.shape[1]):
batch_data[i,j,:,:,:] = load_frame(os.path.join(frames_dir,
rgb_files[frame_indices[i][j]]), resize)
return batch_data
def load_ziprgb_batch(rgb_zipdata, rgb_files,
frame_indices, resize=False):
if resize:
batch_data = np.zeros(frame_indices.shape + (224,224,3))
else:
batch_data = np.zeros(frame_indices.shape + (240,320,3))
for i in range(frame_indices.shape[0]):
for j in range(frame_indices.shape[1]):
batch_data[i,j,:,:,:] = load_zipframe(rgb_zipdata,
rgb_files[frame_indices[i][j]], resize)
return batch_data
def load_flow_batch(frames_dir, flow_x_files, flow_y_files,
frame_indices, resize=False):
if resize:
batch_data = np.zeros(frame_indices.shape + (224,224,2))
else:
batch_data = np.zeros(frame_indices.shape + (240,320,2))
for i in range(frame_indices.shape[0]):
for j in range(frame_indices.shape[1]):
flowx_dir = os.path.join(frames_dir, 'flow_x')
batch_data[i,j,:,:,0] = load_frame(os.path.join(flowx_dir,
flow_x_files[frame_indices[i][j]]), resize)
flowy_dir = os.path.join(frames_dir, 'flow_y')
batch_data[i,j,:,:,1] = load_frame(os.path.join(flowy_dir,
flow_y_files[frame_indices[i][j]]), resize)
return batch_data
def load_zipflow_batch(flow_x_zipdata, flow_y_zipdata,
flow_x_files, flow_y_files,
frame_indices, resize=False):
if resize:
batch_data = np.zeros(frame_indices.shape + (224,224,2))
else:
batch_data = np.zeros(frame_indices.shape + (240,320,2))
for i in range(frame_indices.shape[0]):
for j in range(frame_indices.shape[1]):
batch_data[i,j,:,:,0] = load_zipframe(flow_x_zipdata,
flow_x_files[frame_indices[i][j]], resize)
batch_data[i,j,:,:,1] = load_zipframe(flow_y_zipdata,
flow_y_files[frame_indices[i][j]], resize)
return batch_data
def run(mode='rgb', load_model='', sample_mode='oversample', frequency=16,
input_dir='', output_dir='', batch_size=4, usezip=False):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
chunk_size = 16
assert(mode in ['rgb', 'flow'])
assert(sample_mode in ['oversample', 'center_crop', 'resize'])
# setup the model
if mode == 'flow':
load_model = os.path.join(load_model, 'flow_imagenet.pt')
i3d = InceptionI3d(400, in_channels=2) #400 classes representing Kinetics dataset
else:
load_model = os.path.join(load_model, 'rgb_imagenet.pt')
i3d = InceptionI3d(400, in_channels=3)
#i3d.replace_logits(157)
i3d.load_state_dict(torch.load(load_model))
i3d.cuda()
i3d.train(False) # Set model to evaluate mode
def forward_batch(b_data):
with torch.no_grad():
b_data = b_data.transpose([0, 4, 1, 2, 3])
b_data = torch.from_numpy(b_data) # b,c,t,h,w # ?x3x16x224x224 (for RGB)
b_data = Variable(b_data.cuda()).float()
b_features = i3d.extract_features(b_data)
b_features = b_features.data.cpu().numpy()[:,:,0,0,0]
return b_features
video_names_list = []
for class_name in os.listdir(input_dir):
if os.path.exists(os.path.join(output_dir, class_name).replace('\\', '/')):
pass
else:
os.makedirs(os.path.join(output_dir, class_name).replace('\\', '/'))
for vid_name in os.listdir(os.path.join(input_dir, class_name).replace('\\', '/')):
video_names_list.append(os.path.join(class_name, vid_name).replace('\\', '/'))
for idx, video_name in enumerate(video_names_list):
v_name = video_name.split('/')[1] # Only retrieve name of every .mp4 video
save_file = '{}-{}.npz'.format(v_name, mode)
if os.path.exists(os.path.join(output_dir, video_names_list[idx]).replace('\\', '/')):
pass
else:
os.makedirs(os.path.join(output_dir, video_names_list[idx]).replace('\\', '/'))
if save_file in os.listdir(os.path.join(output_dir, video_names_list[idx])):
continue
frames_dir = os.path.join(input_dir, video_name)
if mode == 'rgb':
if usezip:
rgb_zipdata = zipfile.ZipFile(os.path.join(frames_dir, 'rgb.zip'), 'r')
rgb_files = [i for i in rgb_zipdata.namelist() if i.startswith('rgb')]
else:
frames_dir = os.path.join(frames_dir, mode)
rgb_files = [i for i in os.listdir(frames_dir) if i.startswith('rgb')]
rgb_files.sort()
frame_cnt = len(rgb_files)
else:
if usezip:
flow_x_zipdata = zipfile.ZipFile(os.path.join(frames_dir, 'flow_x.zip'), 'r')
flow_x_files = [i for i in flow_x_zipdata.namelist() if i.startswith('flow_x')]
flow_y_zipdata = zipfile.ZipFile(os.path.join(frames_dir, 'flow_y.zip'), 'r')
flow_y_files = [i for i in flow_y_zipdata.namelist() if i.startswith('flow_y')]
else:
flowx_dir = os.path.join(frames_dir, 'flow_x')
flow_x_files = [i for i in os.listdir(flowx_dir) if i.startswith('flow_x')]
flowy_dir = os.path.join(frames_dir, 'flow_y')
flow_y_files = [i for i in os.listdir(flowy_dir) if i.startswith('flow_y')]
flow_x_files.sort()
flow_y_files.sort()
assert(len(flow_y_files) == len(flow_x_files))
frame_cnt = len(flow_y_files)
# clipped_length = (frame_cnt // chunk_size) * chunk_size # Cut frames
# Cut frames
assert(frame_cnt > chunk_size)
clipped_length = frame_cnt - chunk_size
clipped_length = (clipped_length // frequency) * frequency # The start of last chunk
frame_indices = [] # Frames to chunks
for i in range(clipped_length // frequency + 1):
frame_indices.append(
[j for j in range(i * frequency, i * frequency + chunk_size)])
frame_indices = np.array(frame_indices)
#frame_indices = np.reshape(frame_indices, (-1, 16)) # Frames to chunks
chunk_num = frame_indices.shape[0]
batch_num = int(np.ceil(chunk_num / batch_size)) # Chunks to batches
frame_indices = np.array_split(frame_indices, batch_num, axis=0)
if sample_mode == 'oversample':
full_features = [[] for i in range(10)]
else:
full_features = [[]]
for batch_id in range(batch_num):
require_resize = sample_mode == 'resize'
if mode == 'rgb':
if usezip:
batch_data = load_ziprgb_batch(rgb_zipdata, rgb_files,
frame_indices[batch_id], require_resize)
else:
batch_data = load_rgb_batch(frames_dir, rgb_files,
frame_indices[batch_id], require_resize)
else:
if usezip:
batch_data = load_zipflow_batch(
flow_x_zipdata, flow_y_zipdata,
flow_x_files, flow_y_files,
frame_indices[batch_id], require_resize)
else:
batch_data = load_flow_batch(frames_dir,
flow_x_files, flow_y_files,
frame_indices[batch_id], require_resize)
if sample_mode == 'oversample':
batch_data_ten_crop = oversample_data(batch_data)
for i in range(10):
#pdb.set_trace()
assert(batch_data_ten_crop[i].shape[-2]==224)
assert(batch_data_ten_crop[i].shape[-3]==224)
full_features[i].append(forward_batch(batch_data_ten_crop[i]))
else:
if sample_mode == 'center_crop':
batch_data = batch_data[:,:,16:240,58:282,:] # Center Crop (4, 16, 224, 224, 2)
assert(batch_data.shape[-2]==224)
assert(batch_data.shape[-3]==224)
full_features[0].append(forward_batch(batch_data))
full_features = [np.concatenate(i, axis=0) for i in full_features]
full_features = [np.expand_dims(i, axis=0) for i in full_features]
full_features = np.concatenate(full_features, axis=0)
np.savez(os.path.join(os.path.join(output_dir, video_names_list[idx]), save_file),
feature=full_features,
frame_cnt=frame_cnt,
video_name=v_name)
print('{} Extracted features {}: {} / {}, {}'.format(
v_name, mode, frame_cnt, clipped_length, full_features.shape))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str)
parser.add_argument('--load_model', type=str)
parser.add_argument('--input_dir', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--sample_mode', type=str)
parser.add_argument('--frequency', type=int, default=16)
parser.add_argument('--usezip', dest='usezip', action='store_true')
parser.add_argument('--no-usezip', dest='usezip', action='store_false')
parser.set_defaults(usezip=False)
args = parser.parse_args()
run(mode=args.mode,
load_model=args.load_model,
sample_mode=args.sample_mode,
input_dir=args.input_dir,
output_dir=args.output_dir,
batch_size=args.batch_size,
frequency=args.frequency,
usezip=args.usezip)
| [
"os.path.exists",
"numpy.ceil",
"PIL.Image.open",
"os.listdir",
"argparse.ArgumentParser",
"os.makedirs",
"pytorch_i3d.InceptionI3d",
"torch.load",
"io.BytesIO",
"os.path.join",
"torch.from_numpy",
"numpy.array_split",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.expand_di... | [((531, 553), 'PIL.Image.open', 'Image.open', (['frame_file'], {}), '(frame_file)\n', (541, 553), False, 'from PIL import Image\n'), ((703, 717), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (711, 717), True, 'import numpy as np\n'), ((1129, 1143), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1137, 1143), True, 'import numpy as np\n'), ((1367, 1399), 'numpy.array', 'np.array', (['data[:, :, :, ::-1, :]'], {}), '(data[:, :, :, ::-1, :])\n', (1375, 1399), True, 'import numpy as np\n'), ((1410, 1445), 'numpy.array', 'np.array', (['data[:, :, :224, :224, :]'], {}), '(data[:, :, :224, :224, :])\n', (1418, 1445), True, 'import numpy as np\n'), ((1459, 1495), 'numpy.array', 'np.array', (['data[:, :, :224, -224:, :]'], {}), '(data[:, :, :224, -224:, :])\n', (1467, 1495), True, 'import numpy as np\n'), ((1509, 1548), 'numpy.array', 'np.array', (['data[:, :, 16:240, 58:282, :]'], {}), '(data[:, :, 16:240, 58:282, :])\n', (1517, 1548), True, 'import numpy as np\n'), ((1585, 1621), 'numpy.array', 'np.array', (['data[:, :, -224:, :224, :]'], {}), '(data[:, :, -224:, :224, :])\n', (1593, 1621), True, 'import numpy as np\n'), ((1635, 1672), 'numpy.array', 'np.array', (['data[:, :, -224:, -224:, :]'], {}), '(data[:, :, -224:, -224:, :])\n', (1643, 1672), True, 'import numpy as np\n'), ((1689, 1729), 'numpy.array', 'np.array', (['data_flip[:, :, :224, :224, :]'], {}), '(data_flip[:, :, :224, :224, :])\n', (1697, 1729), True, 'import numpy as np\n'), ((1745, 1786), 'numpy.array', 'np.array', (['data_flip[:, :, :224, -224:, :]'], {}), '(data_flip[:, :, :224, -224:, :])\n', (1753, 1786), True, 'import numpy as np\n'), ((1802, 1846), 'numpy.array', 'np.array', (['data_flip[:, :, 16:240, 58:282, :]'], {}), '(data_flip[:, :, 16:240, 58:282, :])\n', (1810, 1846), True, 'import numpy as np\n'), ((1862, 1903), 'numpy.array', 'np.array', (['data_flip[:, :, -224:, :224, :]'], {}), '(data_flip[:, :, -224:, :224, :])\n', (1870, 1903), True, 'import numpy as np\n'), ((1919, 1961), 'numpy.array', 'np.array', (['data_flip[:, :, -224:, -224:, :]'], {}), '(data_flip[:, :, -224:, -224:, :])\n', (1927, 1961), True, 'import numpy as np\n'), ((5836, 5857), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (5846, 5857), False, 'import os\n'), ((11466, 11491), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11489, 11491), False, 'import argparse\n'), ((960, 978), 'io.BytesIO', 'io.BytesIO', (['stream'], {}), '(stream)\n', (970, 978), False, 'import io\n'), ((2206, 2251), 'numpy.zeros', 'np.zeros', (['(frame_indices.shape + (224, 224, 3))'], {}), '(frame_indices.shape + (224, 224, 3))\n', (2214, 2251), True, 'import numpy as np\n'), ((2281, 2326), 'numpy.zeros', 'np.zeros', (['(frame_indices.shape + (240, 320, 3))'], {}), '(frame_indices.shape + (240, 320, 3))\n', (2289, 2326), True, 'import numpy as np\n'), ((2707, 2752), 'numpy.zeros', 'np.zeros', (['(frame_indices.shape + (224, 224, 3))'], {}), '(frame_indices.shape + (224, 224, 3))\n', (2715, 2752), True, 'import numpy as np\n'), ((2782, 2827), 'numpy.zeros', 'np.zeros', (['(frame_indices.shape + (240, 320, 3))'], {}), '(frame_indices.shape + (240, 320, 3))\n', (2790, 2827), True, 'import numpy as np\n'), ((3213, 3258), 'numpy.zeros', 'np.zeros', (['(frame_indices.shape + (224, 224, 2))'], {}), '(frame_indices.shape + (224, 224, 2))\n', (3221, 3258), True, 'import numpy as np\n'), ((3288, 3333), 'numpy.zeros', 'np.zeros', (['(frame_indices.shape + (240, 320, 2))'], {}), '(frame_indices.shape + (240, 320, 2))\n', (3296, 3333), True, 'import numpy as np\n'), ((4037, 4082), 'numpy.zeros', 'np.zeros', (['(frame_indices.shape + (224, 224, 2))'], {}), '(frame_indices.shape + (224, 224, 2))\n', (4045, 4082), True, 'import numpy as np\n'), ((4112, 4157), 'numpy.zeros', 'np.zeros', (['(frame_indices.shape + (240, 320, 2))'], {}), '(frame_indices.shape + (240, 320, 2))\n', (4120, 4157), True, 'import numpy as np\n'), ((4678, 4704), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (4692, 4704), False, 'import os\n'), ((4714, 4737), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (4725, 4737), False, 'import os\n'), ((4934, 4978), 'os.path.join', 'os.path.join', (['load_model', '"""flow_imagenet.pt"""'], {}), "(load_model, 'flow_imagenet.pt')\n", (4946, 4978), False, 'import os\n'), ((4993, 5025), 'pytorch_i3d.InceptionI3d', 'InceptionI3d', (['(400)'], {'in_channels': '(2)'}), '(400, in_channels=2)\n', (5005, 5025), False, 'from pytorch_i3d import InceptionI3d\n'), ((5100, 5143), 'os.path.join', 'os.path.join', (['load_model', '"""rgb_imagenet.pt"""'], {}), "(load_model, 'rgb_imagenet.pt')\n", (5112, 5143), False, 'import os\n'), ((5158, 5190), 'pytorch_i3d.InceptionI3d', 'InceptionI3d', (['(400)'], {'in_channels': '(3)'}), '(400, in_channels=3)\n', (5170, 5190), False, 'from pytorch_i3d import InceptionI3d\n'), ((5249, 5271), 'torch.load', 'torch.load', (['load_model'], {}), '(load_model)\n', (5259, 5271), False, 'import torch\n'), ((6787, 6822), 'os.path.join', 'os.path.join', (['input_dir', 'video_name'], {}), '(input_dir, video_name)\n', (6799, 6822), False, 'import os\n'), ((8725, 8748), 'numpy.array', 'np.array', (['frame_indices'], {}), '(frame_indices)\n', (8733, 8748), True, 'import numpy as np\n'), ((8978, 9026), 'numpy.array_split', 'np.array_split', (['frame_indices', 'batch_num'], {'axis': '(0)'}), '(frame_indices, batch_num, axis=0)\n', (8992, 9026), True, 'import numpy as np\n'), ((11054, 11091), 'numpy.concatenate', 'np.concatenate', (['full_features'], {'axis': '(0)'}), '(full_features, axis=0)\n', (11068, 11091), True, 'import numpy as np\n'), ((3449, 3483), 'os.path.join', 'os.path.join', (['frames_dir', '"""flow_x"""'], {}), "(frames_dir, 'flow_x')\n", (3461, 3483), False, 'import os\n'), ((3653, 3687), 'os.path.join', 'os.path.join', (['frames_dir', '"""flow_y"""'], {}), "(frames_dir, 'flow_y')\n", (3665, 3687), False, 'import os\n'), ((5385, 5400), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5398, 5400), False, 'import torch\n'), ((5491, 5515), 'torch.from_numpy', 'torch.from_numpy', (['b_data'], {}), '(b_data)\n', (5507, 5515), False, 'import torch\n'), ((8898, 8929), 'numpy.ceil', 'np.ceil', (['(chunk_num / batch_size)'], {}), '(chunk_num / batch_size)\n', (8905, 8929), True, 'import numpy as np\n'), ((10905, 10930), 'numpy.concatenate', 'np.concatenate', (['i'], {'axis': '(0)'}), '(i, axis=0)\n', (10919, 10930), True, 'import numpy as np\n'), ((10980, 11005), 'numpy.expand_dims', 'np.expand_dims', (['i'], {'axis': '(0)'}), '(i, axis=0)\n', (10994, 11005), True, 'import numpy as np\n'), ((2466, 2522), 'os.path.join', 'os.path.join', (['frames_dir', 'rgb_files[frame_indices[i][j]]'], {}), '(frames_dir, rgb_files[frame_indices[i][j]])\n', (2478, 2522), False, 'import os\n'), ((3531, 3589), 'os.path.join', 'os.path.join', (['flowx_dir', 'flow_x_files[frame_indices[i][j]]'], {}), '(flowx_dir, flow_x_files[frame_indices[i][j]])\n', (3543, 3589), False, 'import os\n'), ((3735, 3793), 'os.path.join', 'os.path.join', (['flowy_dir', 'flow_y_files[frame_indices[i][j]]'], {}), '(flowy_dir, flow_y_files[frame_indices[i][j]])\n', (3747, 3793), False, 'import os\n'), ((6686, 6733), 'os.path.join', 'os.path.join', (['output_dir', 'video_names_list[idx]'], {}), '(output_dir, video_names_list[idx])\n', (6698, 6733), False, 'import os\n'), ((7096, 7126), 'os.path.join', 'os.path.join', (['frames_dir', 'mode'], {}), '(frames_dir, mode)\n', (7108, 7126), False, 'import os\n'), ((7748, 7782), 'os.path.join', 'os.path.join', (['frames_dir', '"""flow_x"""'], {}), "(frames_dir, 'flow_x')\n", (7760, 7782), False, 'import os\n'), ((7903, 7937), 'os.path.join', 'os.path.join', (['frames_dir', '"""flow_y"""'], {}), "(frames_dir, 'flow_y')\n", (7915, 7937), False, 'import os\n'), ((11123, 11170), 'os.path.join', 'os.path.join', (['output_dir', 'video_names_list[idx]'], {}), '(output_dir, video_names_list[idx])\n', (11135, 11170), False, 'import os\n'), ((5885, 5921), 'os.path.join', 'os.path.join', (['output_dir', 'class_name'], {}), '(output_dir, class_name)\n', (5897, 5921), False, 'import os\n'), ((6091, 6126), 'os.path.join', 'os.path.join', (['input_dir', 'class_name'], {}), '(input_dir, class_name)\n', (6103, 6126), False, 'import os\n'), ((6459, 6506), 'os.path.join', 'os.path.join', (['output_dir', 'video_names_list[idx]'], {}), '(output_dir, video_names_list[idx])\n', (6471, 6506), False, 'import os\n'), ((6920, 6955), 'os.path.join', 'os.path.join', (['frames_dir', '"""rgb.zip"""'], {}), "(frames_dir, 'rgb.zip')\n", (6932, 6955), False, 'import os\n'), ((7370, 7408), 'os.path.join', 'os.path.join', (['frames_dir', '"""flow_x.zip"""'], {}), "(frames_dir, 'flow_x.zip')\n", (7382, 7408), False, 'import os\n'), ((7561, 7599), 'os.path.join', 'os.path.join', (['frames_dir', '"""flow_y.zip"""'], {}), "(frames_dir, 'flow_y.zip')\n", (7573, 7599), False, 'import os\n'), ((5999, 6035), 'os.path.join', 'os.path.join', (['output_dir', 'class_name'], {}), '(output_dir, class_name)\n', (6011, 6035), False, 'import os\n'), ((6184, 6218), 'os.path.join', 'os.path.join', (['class_name', 'vid_name'], {}), '(class_name, vid_name)\n', (6196, 6218), False, 'import os\n'), ((6583, 6630), 'os.path.join', 'os.path.join', (['output_dir', 'video_names_list[idx]'], {}), '(output_dir, video_names_list[idx])\n', (6595, 6630), False, 'import os\n'), ((7167, 7189), 'os.listdir', 'os.listdir', (['frames_dir'], {}), '(frames_dir)\n', (7177, 7189), False, 'import os\n'), ((7826, 7847), 'os.listdir', 'os.listdir', (['flowx_dir'], {}), '(flowx_dir)\n', (7836, 7847), False, 'import os\n'), ((7981, 8002), 'os.listdir', 'os.listdir', (['flowy_dir'], {}), '(flowy_dir)\n', (7991, 8002), False, 'import os\n')] |
import argparse
import copy
import numpy as np
import re
import os
import sys
from sklearn.feature_extraction.text import CountVectorizer
def get_dual_stopwords(add_stopwords):
return add_stopwords | getStopwordsSet()
def bifurcate_stopwords(add_stopwords):
unigrams = set()
multigrams = set()
for word in add_stopwords:
wlen = len(word.split(' '))
if( wlen == 1 ):
unigrams.add(word)
else:
multigrams.add(word)
return {
'unigrams': unigrams,
'multigrams': multigrams
}
def combine_ngrams(ngram_lst):
ngram_sentence = []
for ngram in ngram_lst:
ngram_sentence.append( set(ngram['ngram'].lower().split(' ')) )
return ngram_sentence
def get_ngram_dct(ngram, tf, postings, extra_fields=None):
if( extra_fields is None or isinstance(extra_fields, dict) == False ):
extra_fields = {}
payload = {'ngram': ngram, 'term_freq': tf}
if( postings is not None ):
payload['postings'] = postings
for key, val in extra_fields:
payload[key] = val
return payload
def fmt_posting(doc_dets):
dets_cp = copy.deepcopy(doc_dets)
if( 'text' in dets_cp ):
del dets_cp['text']
if( 'doc_id' in dets_cp ):
del dets_cp['doc_id']
return dets_cp
def fmt_report(ngram_lst, params):
params['add_stopwords'] = list( params['add_stopwords'] )
if( params['include_postings'] ):
return
for i in range(len(ngram_lst)):
del ngram_lst[i]['postings']
def calc_avg_sentence_overlap(ngram_sentences, doc_sentence):
ngram_sent_size = len(ngram_sentences)
if( ngram_sent_size == 0 or len(doc_sentence) == 0 ):
return 0
ov = 0
for ngram_sent in ngram_sentences:
ov += overlapFor2Sets(ngram_sent, doc_sentence)
return ov/ngram_sent_size
def get_docs_sentence_score(ngram_sentences, sentences, doc_indx, doc_id, params):
if( len(sentences) == 0 ):
return []
sentences_lst = []
for i in range( len(sentences) ):
sentence = sentences[i]['sentence'].strip()
if( sentence == '' ):
continue
#ensure the splitting pattern corresponds to that used for ngrams
sent_set = set( re.findall(params['token_pattern'], sentence.lower()) )
ov = calc_avg_sentence_overlap( ngram_sentences, sent_set )
sentences_lst.append({
'avg_overlap': ov,
'sentence': sentence,
'doc_indx': doc_indx,
'doc_id': doc_id,
'sent_indx': i
})
return sentences_lst
def extract_doc_sentences(text, sentence_tokenizer, dedup_set, multi_word_proper_nouns, params=None):
if( text == '' ):
return []
if( params is None ):
params = {}
params.setdefault('corenlp_host', 'localhost')
params.setdefault('corenlp_port', '9000')
filtered_sentences = []
if( params['stanford_corenlp_server'] ):
doc = nlpSentenceAnnotate(text.replace('\n', ' '), host=params['corenlp_host'], port=params['corenlp_port'])
else:
doc = {}
if( 'sentences' in doc ):
if( len(doc['sentences']) != 0 ):
for sent in doc['sentences']:
sentence = sent['sentence'].replace('\n', ' ').strip()
lowercase_sent = sentence.lower()
if( sentence == '' ):
continue
if( lowercase_sent in dedup_set ):
continue
tok_len = len(sent['tokens'])
dedup_set.add(lowercase_sent)
extract_proper_nouns( sent['tokens'], multi_word_proper_nouns )
if( tok_len > params['corenlp_max_sentence_words'] ):
#this sentence is too long so force split
filtered_sentences += regex_get_sentences(sent['sentence'], sentence_tokenizer, dedup_set)
else:
filtered_sentences.append({ 'sentence': sentence, 'tok_len': tok_len })
#if corenlp sentence segmentation is not used, use regex sentence segmentation
if( len(filtered_sentences) == 0 ):
filtered_sentences = regex_get_sentences(text, sentence_tokenizer, dedup_set)
return filtered_sentences
def regex_get_sentences(text, sentence_tokenizer, dedup_set):
if( text == '' ):
return []
sentences = re.split(sentence_tokenizer, text)
filtered_sentences = []
for sentence in sentences:
sentence = sentence.replace('\n', ' ').strip()
lowercase_sent = sentence.lower()
if( sentence == '' ):
continue
if( lowercase_sent in dedup_set ):
continue
dedup_set.add(lowercase_sent)
filtered_sentences.append({ 'sentence': sentence })
return filtered_sentences
def get_ranked_docs(ngram_lst, doc_dct_lst):
ranked_docs = {}
N = len(ngram_lst)
#print('\nget_ranked_docs(): N =', N)
for i in range( len(ngram_lst) ):
#print('\t\ti:', i)
#print('\t\tt:', ngram_lst[i][0])
for posting in ngram_lst[i]['postings']:
'''
Give credit to documents that have highly ranked (bigger diff: N - i) terms in the ngram_lst
a document's score is awarded by accumulating the points awarded by the position of terms in the ngram_lst.
Documents without terms in ngram_lst are not given points
'''
doc_indx = posting['doc_indx']
ranked_docs.setdefault( doc_indx, {'score': 0, 'doc_id': doc_dct_lst[doc_indx]['doc_id'], 'doc_details': fmt_posting( doc_dct_lst[doc_indx] )} )
ranked_docs[doc_indx]['score'] += (N - i)
ranked_docs = sortDctByKey(ranked_docs, 'score')
return ranked_docs
def rank_sents_frm_top_ranked_docs(ngram_sentences, ranked_docs, all_doc_sentences, extra_params=None):
if( extra_params is None ):
extra_params = {}
extra_params.setdefault('sentences_rank_count', 20)
print('\nrank_sents_frm_top_ranked_docs():')
all_top_ranked_docs_sentences = []
for doc in ranked_docs:
doc_indx = doc[0]
doc_id = doc[1]['doc_id']
all_top_ranked_docs_sentences += get_docs_sentence_score(ngram_sentences, all_doc_sentences[doc_indx], doc_indx, doc_id, extra_params)
return sorted(all_top_ranked_docs_sentences, key=lambda x: x['avg_overlap'], reverse=True)[:extra_params['sentences_rank_count']]
def extract_proper_nouns(sent_toks, container):
'''
NNP: Proper Noun Singular
NNPS: Proper Noun plural
CC: Coordinating conjunction
IN: Preposition or subordinating conjunction
The goal here is to extract multi-word proper nouns. E.g.,
"<NAME>" (NNP NNP)
"Centers for Disease Control" (NNP IN NNP NNP)
"Federal Emergency Management Agency" (NNP NNP NNP NNP)
This is achieved extracting a contiguous list of NNP or a contiguous list of NNP interleaved with CC or IN
'''
if( len(sent_toks) == 0 ):
return []
last_pos = ''
proper_nouns = {'toks': [[]], 'pos': [[]]}
for i in range( len(sent_toks) ):
pos = sent_toks[i]['pos']
tok = sent_toks[i]['tok']
if( pos.startswith('NNP') ):
#label 0
#match e.g., NNP
proper_nouns['toks'][-1].append(tok)
proper_nouns['pos'][-1].append(pos)
elif( len(proper_nouns['toks'][-1]) != 0 and pos in ['IN', 'CC'] ):
#label 1
#match e.g., NNP IN or NNP CC
proper_nouns['toks'][-1].append(tok)
proper_nouns['pos'][-1].append(pos)
elif( len(proper_nouns['toks'][-1]) != 0 ):
#label 2
if( len(proper_nouns['toks'][-1]) == 1 ):
#label 2.0
#match NNP (single NNP, violates multi-word condition)
proper_nouns['toks'][-1] = []
proper_nouns['pos'][-1] = []
elif( last_pos not in ['NNP', 'NNPS'] ):
#label 2.1
#match e.g., NNP IN or NNP CC or NNP NNP IN or NNP IN NNP CC
proper_nouns['toks'][-1] = proper_nouns['toks'][-1][:-1]
proper_nouns['pos'][-1] = proper_nouns['pos'][-1][:-1]
if( len(proper_nouns['toks'][-1]) == 1 ):
#label 2.2
#match e.g, originally NNP CC, here means CC removed leaving just single NNP - violates multi-word condition
proper_nouns['toks'][-1] = []
proper_nouns['pos'][-1] = []
else:
#label 2.3
#match e.g, originally NNP NNP IN, here means IN was remove leaving NNP NNP
proper_nouns['toks'].append( [] )
proper_nouns['pos'].append( [] )
else:
#label 3
#match e.g, NNP NNP IN NNP, here means multi-word proper noun group has ended so create empty slots for next proper noun group
proper_nouns['toks'].append( [] )
proper_nouns['pos'].append( [] )
last_pos = pos
if( len(proper_nouns['toks'][0]) == 0 ):
#here means no proper noun was found
return []
else:
if( len(proper_nouns['toks'][-1]) == 0 ):
#here means at least one multi-word proper noun was found, so remove last empty slot
proper_nouns['toks'] = proper_nouns['toks'][:-1]
proper_nouns['pos'] = proper_nouns['pos'][:-1]
for i in range( len(proper_nouns['toks']) ):
proper_noun = proper_nouns['toks'][i]
proper_noun = ' '.join(proper_noun)
proper_noun_lower = proper_noun.lower()
proper_noun_rate = round(proper_nouns['pos'][i].count('NNP')/len(proper_nouns['pos'][i]), 4)
if( proper_noun_lower in container ):
container[proper_noun_lower]['freq'] += 1
else:
container[proper_noun_lower] = {'freq': 1, 'raw': proper_noun, 'nnp_rate': proper_noun_rate }
return proper_nouns
def rank_proper_nouns(multi_word_proper_nouns):
multi_word_proper_nouns = sorted(multi_word_proper_nouns.items(), key=lambda prnoun: prnoun[1]['freq']*prnoun[1]['nnp_rate'], reverse=True)
for i in range( len(multi_word_proper_nouns) ):
multi_word_proper_nouns[i][1]['rank'] = i
return multi_word_proper_nouns
def get_ngram_pos(toks, key_indx):
'''
Given ngram: 'convention center'
found at index: 13
in this sentence: at the brown convention center
sentence toks: ['at', the', 'brown', 'convention', 'center']
The purpose of this function is to return sentence toks index: 3
'''
offset = 0
for i in range(len(toks)):
if( offset == key_indx ):
return i
offset += len(toks[i]) + 1
return -1
def indx_where_ngram_ends(st_indx, ngram_toks, sent_toks):
'''
Case 1: where sent_toks contains stopwords making it hard to match ngram that already has stopwords removed:
Given st_index = 2 (sent_toks index where ngram_toks starts)
Given ngram_toks: ['orange', 'new', 'black']
Given sent_toks: ['best', 'is', 'orange', 'is', 'the', 'new', 'black']
The goal of this function is return 5: length of substring (['orange', 'is', 'the', 'new', 'black']) with stopwords that encompass ngram_toks
Negative Case 1:
Given st_index = 5 (sent_toks index where ngram_toks starts)
Given ngram_toks: ['convention', 'center']
Given sent_toks: ['it', 'is', 'the', 'brown', 'r.', 'convention', 'center']
The goal of this function is return 2: length of substring ([convention', 'center']) without stopwords that encompass ngram_toks
'''
j = st_indx
length = 0
start = -1
match_count = 0
for i in range(len(ngram_toks)):
while j < len(sent_toks):
length += 1
ngram_tok = ngram_toks[i].strip().lower()
sent_tok = sent_toks[j].strip().lower()
#to avoid situations where sent_tok is has some additional characters (due to different formatting from ngram)
#e.g., given ngram_tok "aransas", this should match sent_tok: "aransas'"
if( sent_tok.find(ngram_tok) != -1 ):
if( start == -1 ):
start = j
j += 1
match_count += 1
break
j += 1
if( match_count == len(ngram_toks) ):
#all ngram_toks where found
return start, length
else:
return start, -1
def is_ngram_subset(parent, child, stopwords):
parent = parent.strip().lower()
child = child.strip().lower()
#perfect match
if( parent.find(child) != -1 ):
return True
parent = rmStopwords(parent, stopwords)
child = rmStopwords(child, stopwords)
#match when stopwords
if( ' '.join(parent).find(' '.join(child)) != -1 ):
return True
ov = overlapFor2Sets( set(parent), set(child) )
if( ov != 1 ):
return False
return isMatchInOrder(child, parent)
def get_sentence_match_ngram(ngram, ngram_toks, sentences, doc_indx):
debug_verbose = False
phrase_cands = []
for i in range(len(sentences)):
ori_sent = sentences[i]['sentence'].replace('\n', ' ')
sentence = ori_sent.lower()
indx = sentence.find(ngram)
if( indx != -1 ):
sentence = sentence.strip()
sent_toks = phraseTokenizer(sentence)
ngram_start_indx = get_ngram_pos(sent_toks, indx)
ngram_start, ngram_length = indx_where_ngram_ends(ngram_start_indx, ngram_toks, sent_toks)
if( ngram_start_indx == -1 ):
if( debug_verbose ):
print('\nDID NOT FIND NGRAM POST SPLITTING' * 10)
print('\tngram:', ngram)
print('\tsentence:', sentence)
print('\tsent_tok not printed')
print()
continue
sentence_dets = {
'ori_sent': ori_sent,
'sent_indx': i,
'doc_indx': doc_indx,
'toks': sent_toks,
'ngram_start_indx': ngram_start_indx,
'ngram_length': ngram_length
}
phrase_cands.append(sentence_dets)
return phrase_cands
def rank_mltwd_proper_nouns(ngram, ngram_toks, sentences, params=None):
if( params is None ):
params = {}
params.setdefault('mvg_window_min_proper_noun_rate', 0.5)
sent_count = len(sentences)
ngram = ngram.strip()
if( sent_count == 1 or ngram == '' ):
#it's possible for ngram = '', search for 'ngram_history'
return ''
window_size = 0
max_sent_toks = 0
final_multi_word_proper_noun = {}
max_multiprpnoun_lrb = {}
while True:
window_size += 1
max_multiprpnoun_lrb[window_size] = {'lrb': '', 'ngram': '', 'rate': 0}
phrase_counts = { 'left': {}, 'right': {}, 'both': {} }
proper_noun_phrases = {'left': '', 'right': '', 'both': ''}
for i in range(sent_count):
sent = sentences[i]
ngram_start = sent['ngram_start_indx']
ngram_length = sent['ngram_length']
if( ngram_start == -1 or ngram_length == -1 ):
continue
sent_toks_count = len(sent['toks'])
if( params['debug_verbose'] ):
print( '\n\twindow_size:', window_size )
print( '\tngram:', ngram_toks )
print( '\tngram in sent (start/length):', ngram_start, ngram_length )
print( '\tsent keys:', sent.keys() )
print( '\tori:', sent['ori_sent'] )
print( '\tsent:', i, 'of', sent_count, ':', sent['toks'] )
print( '\tsent_len:', sent_toks_count)
if( window_size == 1 and sent_toks_count > max_sent_toks ):
max_sent_toks = sent_toks_count
ngram_prefix = sent['toks'][ngram_start - window_size:ngram_start]
ngram_suffix = sent['toks'][ngram_start + ngram_length:ngram_start + ngram_length + window_size]
ngram_prefix = ' '.join(filter(None, ngram_prefix)).strip()
ngram_suffix = ' '.join(filter(None, ngram_suffix)).strip()
proper_noun_phrases['left'] = ngram_prefix + ' ' + ngram
proper_noun_phrases['right'] = ngram + ' ' + ngram_suffix
proper_noun_phrases['both'] = ngram_prefix + ' ' + ngram + ' ' + ngram_suffix
for lrb in ['left', 'right', 'both']:
'''
#does not account for ties consider: if( max_multiprpnoun_lrb[window_size-1]['lrb'] != lrb and max_multiprpnoun_lrb[window_size-1]['tie'] == False ):
if( window_size > 1 ):
if( max_multiprpnoun_lrb[window_size-1]['lrb'] != lrb ):
if( params['debug_verbose'] ):
print('\t\tskipping:', lrb, 'since previous lrb was below threshold')
continue
'''
multi_word_proper_noun_lrb = proper_noun_phrases[lrb].strip()
if( multi_word_proper_noun_lrb != ngram ):
phrase_counts[ lrb ].setdefault( multi_word_proper_noun_lrb, {'freq': 0, 'rate': 0} )
phrase_counts[ lrb ][multi_word_proper_noun_lrb]['freq'] += 1
phrase_counts[ lrb ][multi_word_proper_noun_lrb]['rate'] = round( phrase_counts[lrb][multi_word_proper_noun_lrb]['freq']/sent_count, 4 )
if( params['debug_verbose'] ):
print( '\t\t' + lrb + ':', multi_word_proper_noun_lrb )
if( params['debug_verbose'] ):
print( '\t\tsent_count:', sent_count )
if( params['debug_verbose'] ):
print('\n\twindow_size:', window_size, 'results:')
#find multi-word proper noun with the highest frequency for left, right, and both sentence building policies
for lrb, multiprpnoun_lrb in phrase_counts.items():
multiprpnoun_lrb = sorted(multiprpnoun_lrb.items(), key=lambda x: x[1]['rate'], reverse=True)
if( len(multiprpnoun_lrb) != 0 ):
if( params['debug_verbose'] ):
print('\t\tmax', lrb + ':', multiprpnoun_lrb[0])
rate = multiprpnoun_lrb[0][1]['rate']
if( rate > max_multiprpnoun_lrb[window_size]['rate'] ):
max_multiprpnoun_lrb[window_size]['ngram'] = multiprpnoun_lrb[0][0]
max_multiprpnoun_lrb[window_size]['rate'] = rate
max_multiprpnoun_lrb[window_size]['lrb'] = lrb
if( params['debug_verbose'] ):
print('\tlast max for this window_size:', max_multiprpnoun_lrb[window_size])
print('\tmax_sent_toks:', max_sent_toks)
print()
if( params['mvg_window_min_proper_noun_rate'] > max_multiprpnoun_lrb[window_size]['rate'] or window_size == max_sent_toks ):
if( params['debug_verbose'] ):
print("\tbreaking criteria reached: mvg_window_min_proper_noun_rate > max_multiprpnoun_lrb[window_size]['rate'] OR window_size (" + str(window_size) + ") == max_sent_toks (" + str(max_sent_toks) + ")")
print('\tmvg_window_min_proper_noun_rate:', params['mvg_window_min_proper_noun_rate'])
print("\tmax_multiprpnoun_lrb[window_size]['rate']:", max_multiprpnoun_lrb[window_size]['rate'])
break
while( window_size != 0 ):
#get best match longest multi-word ngram
if( max_multiprpnoun_lrb[window_size]['rate'] >= params['mvg_window_min_proper_noun_rate'] ):
final_multi_word_proper_noun['proper_noun'] = max_multiprpnoun_lrb[window_size]['ngram']
final_multi_word_proper_noun['rate'] = max_multiprpnoun_lrb[window_size]['rate']
if( params['debug_verbose'] ):
print('\tfinal winning max:', max_multiprpnoun_lrb[window_size])
print('\twindow_size:', window_size)
break
window_size -= 1
return final_multi_word_proper_noun
def pos_glue_split_ngrams(top_ngrams, k, pos_glue_split_ngrams_coeff, ranked_multi_word_proper_nouns, params):
if( pos_glue_split_ngrams_coeff == 0 ):
pos_glue_split_ngrams_coeff = 1
stopwords = get_dual_stopwords( params['add_stopwords'] )
multi_word_proper_noun_dedup_set = set()#it's possible for different ngrams to resolve to the same multi-word proper noun so deduplicate favoring higher ranked top_ngrams
for i in range( len(top_ngrams) ):
if( i == k - 1 ):
break
ngram = top_ngrams[i]['ngram']
for mult_wd_prpnoun in ranked_multi_word_proper_nouns:
multi_word_proper_noun = mult_wd_prpnoun[0]
match_flag = is_ngram_subset(multi_word_proper_noun, ngram, stopwords)
if( match_flag ):
if( ngram == multi_word_proper_noun or mult_wd_prpnoun[1]['freq'] < top_ngrams[i]['term_freq']/pos_glue_split_ngrams_coeff ):
#this ngram exactly matched a multi_word_proper_noun, and thus very unlikely to be a fragment ngram to be replaced
#to avoid replacing high-quality ngram with poor-quality ngram - start
'''
rationale for: mult_wd_prpnoun[1]['freq'] < top_ngrams[i]['term_freq']/2
bad replacement:
ngram: tropical storm (freq: 121)
replaced with multi_word_proper_noun: ddhhmm tropical storm harvey discussion number
rank: {'freq': 5, 'raw': 'DDHHMM TROPICAL STORM HARVEY DISCUSSION NUMBER', 'nnp_rate': 1.0, 'rank': 358}
ngram: tropical cyclone (43)
replace with multi_word_proper_noun: wikipedia tropical cyclone
rank: {'freq': 1, 'raw': 'WIKIPEDIA TROPICAL CYCLONE', 'nnp_rate': 1.0, 'rank': 3340}
good replacement:
ngram: national hurricane (67)
match: national hurricane center
rank: {'freq': 68, 'raw': 'National Hurricane Center', 'nnp_rate': 1.0, 'rank': 11}
ngram: gulf mexico (56)
match: gulf of mexico
rank: {'freq': 46, 'raw': 'Gulf of Mexico', 'nnp_rate': 0.6667, 'rank': 41}
'''
#to avoid replacing high-quality ngram with poor-quality ngram - end
pass
else:
new_ngram_dct = {
'prev_ngram': top_ngrams[i]['ngram'],
'annotator': 'pos',
'cur_freq': mult_wd_prpnoun[1]['freq']
}
if( multi_word_proper_noun in multi_word_proper_noun_dedup_set ):
top_ngrams[i]['ngram'] = ''
new_ngram_dct['cur_ngram'] = ''
else:
top_ngrams[i]['ngram'] = multi_word_proper_noun
new_ngram_dct['cur_ngram'] = multi_word_proper_noun
multi_word_proper_noun_dedup_set.add(multi_word_proper_noun)
top_ngrams[i].setdefault('ngram_history', [])
top_ngrams[i]['ngram_history'].append(new_ngram_dct)
break
def mvg_window_glue_split_ngrams(top_ngrams, k, all_doc_sentences, params=None):
print('\nmvg_window_glue_split_ngrams():')
if( params is None ):
params = {}
#params['debug_verbose'] = True
multi_word_proper_noun_dedup_set = set()#it's possible for different ngrams to resolve to the same multi-word proper noun so deduplicate favoring higher ranked top_ngrams
for i in range( len(top_ngrams) ):
if( i == k - 1 ):
break
ngram = top_ngrams[i]['ngram']
if( ngram == '' ):
#ngram could be blank due to rm by pos_glue_split_ngrams
continue
ngram_toks = phraseTokenizer(ngram)#processed in a similar method as the sentences in get_sentence_match_ngram
phrase_cands = []
phrase_cands_minus_toks = []
if( params['debug_verbose'] ):
print('\t', i, 'ngram:', ngram)
for doc_dct in top_ngrams[i]['postings']:
doc_indx = doc_dct['doc_indx']
phrase_cands += get_sentence_match_ngram( ngram, ngram_toks, all_doc_sentences[doc_indx], doc_indx )
for phrase in phrase_cands:
phrase_cands_minus_toks.append({
'sentence': phrase['ori_sent'],
'sent_indx': phrase['sent_indx'],
'doc_indx': phrase['doc_indx']
})
multi_word_proper_noun = rank_mltwd_proper_nouns( ngram, ngram_toks, phrase_cands, params=params )
if( len(multi_word_proper_noun) != 0 ):
new_ngram_dct = {
'prev_ngram': top_ngrams[i]['ngram'],
'annotator': 'mvg_window'
}
if( multi_word_proper_noun['proper_noun'] in multi_word_proper_noun_dedup_set ):
top_ngrams[i]['ngram'] = ''
new_ngram_dct['cur_ngram'] = ''
else:
top_ngrams[i]['ngram'] = multi_word_proper_noun['proper_noun']
new_ngram_dct['cur_ngram'] = multi_word_proper_noun['proper_noun']
new_ngram_dct['proper_noun_rate'] = multi_word_proper_noun['rate']
multi_word_proper_noun_dedup_set.add( multi_word_proper_noun['proper_noun'] )
top_ngrams[i].setdefault('ngram_history', [])
top_ngrams[i]['ngram_history'].append(new_ngram_dct)
top_ngrams[i]['parent_sentences'] = phrase_cands_minus_toks
if( params['debug_verbose'] ):
print('*' * 200)
print('*' * 200)
print()
def rm_empty_ngrams(top_ngrams, k):
final_top_ngrams = []
for i in range( len(top_ngrams) ):
if( i == k - 1 ):
break
if( top_ngrams[i]['ngram'] == '' ):
continue
final_top_ngrams.append( top_ngrams[i] )
return final_top_ngrams
def rm_subset_top_ngrams(top_ngrams, k, rm_subset_top_ngrams_coeff, params):
if( rm_subset_top_ngrams_coeff == 0 ):
rm_subset_top_ngrams_coeff = 1
debug_verbose = False
ngram_tok_sizes = {}
stopwords = get_dual_stopwords( params['add_stopwords'] )
for i in range( len(top_ngrams) ):
if( i == k - 1 ):
break
ngram = top_ngrams[i]['ngram']
top_ngrams[i]['adopted_child'] = False
ngram_tok_sizes[i] = len( phraseTokenizer(ngram) )
#prioritize longer top_ngrams
ngram_tok_sizes = sorted(ngram_tok_sizes.items(), key=lambda x: x[1], reverse=True)
for ngram_indx in ngram_tok_sizes:
parent_indx = ngram_indx[0]
parent_ngram_cand = top_ngrams[parent_indx]['ngram']
if( parent_ngram_cand == '' ):
continue
for child_indx in range( len(ngram_tok_sizes) ):
if( parent_indx == child_indx ):
continue
child_ngram_cand = top_ngrams[child_indx]['ngram']
if( child_ngram_cand == '' ):
continue
if( is_ngram_subset(parent_ngram_cand, child_ngram_cand, stopwords) ):
if( parent_indx < child_indx ):
#if parent is at a higher rank (lower index) than child, so delete child, parent remains unmodified (top_ngrams[parent_indx]['ngram'] already has parent_ngram_cand)
top_ngrams[child_indx]['ngram'] = ''
else:
#parent (longer) is at a lower rank (higher index) than child,
#INSTEAD OF: delete parent to give preference shorter highly ranked child, child remains unmodified
#replace child (higher rank, shorter ngram) with parent (lower rank, longer ngram) if parent's TF >= child's TF * 1/k,
#multiple children may fulfil this criteria, so parent should adopt (replace) the first child and remove subsequent children that fulfill this criteria
top_ngrams[parent_indx]['ngram'] = ''
if( top_ngrams[parent_indx]['term_freq'] >= top_ngrams[child_indx]['term_freq']/rm_subset_top_ngrams_coeff ):
if( top_ngrams[parent_indx]['adopted_child'] == False ):
top_ngrams[parent_indx]['adopted_child'] = True
top_ngrams[child_indx]['ngram'] = parent_ngram_cand
new_ngram_dct = {
'prev_ngram': child_ngram_cand,
'cur_ngram': parent_ngram_cand,
'annotator': 'subset'
}
top_ngrams[child_indx].setdefault('ngram_history', [])
top_ngrams[child_indx]['ngram_history'].append(new_ngram_dct)
if( debug_verbose ):
print('\teven though parent (' + str(parent_indx) + ') is in lower index than child:', child_indx)
print('\treplacing child_ngram_cand:', '"' + child_ngram_cand + '" with parent_ngram_cand: "' + parent_ngram_cand + '"')
print('\treplacing parent/child tf:', top_ngrams[parent_indx]['term_freq'], top_ngrams[child_indx]['term_freq'])
print()
else:
top_ngrams[child_indx]['ngram'] = ''
if( debug_verbose ):
print('\teven though parent (' + str(parent_indx) + ') is in lower index than child:', child_indx)
print('\twould have replaced child_ngram_cand:', '"' + child_ngram_cand + '" with parent_ngram_cand: "' + parent_ngram_cand + '"')
print('\twould have replaced parent/child tf:', top_ngrams[parent_indx]['term_freq'], top_ngrams[child_indx]['term_freq'])
print('\tbut this parent has already adopted a child so delete this child.')
print()
return top_ngrams
def print_top_ngrams(n, top_ngrams, top_ngram_count, params=None):
if( params is None ):
params = {}
params.setdefault('ngram_printing_mw', 40)
params.setdefault('title', '')
mw = params['ngram_printing_mw']
ngram_count = len(top_ngrams)
print('Summary for', ngram_count, 'top n-grams (base n: ' + str(n) + '):')
print()
if( params['title'] != '' ):
print( params['title'] )
print( '{:^6} {:<{mw}} {:^6} {:<6}'.format('rank', 'ngram', 'TF', 'TF-Rate', mw=mw) )
for i in range(top_ngram_count):
if( i == ngram_count ):
break
ngram = top_ngrams[i]
ngram_txt = ngram['ngram']
if( len(ngram_txt) > mw ) :
ngram_txt = ngram_txt[:mw-3] + '...'
print( "{:^6} {:<{mw}} {:^6} {:^6}".format(i+1, ngram_txt, ngram['term_freq'], "{:.2f}".format(ngram['term_rate']), mw=mw) )
print()
def extract_top_ngrams(doc_lst, doc_dct_lst, n, params):
print('\nextract_top_ngrams(): token_pattern:', params['token_pattern'])
'''
Note on unified all_doc_sentences and top_ngrams text processing:
#By default, all_doc_sentences are generated from stanford corenlp sentence annotator. The else: case was an attempt to similarly process the text used to generate top_ngrams in the same fashion (
utilizing corenlp's context-sensitive tokenizer (e.g, dot in aug. belongs to month, and not to be used as sentence boundary)). But requires feeding CountVectorizer the vocabulary (ngrams), which
is easy to do unless when unigrams are to be generated. I didn't consider it worth the effort to custom build my n-gram generator, so I opted to not proceed to treat top_ngrams as all_doc_sentences.
The consequence of this is that some ngrams from top_ngrams may not match those extracted from all_sentences.
from genericCommon import nlpBuildVocab
vocab = nlpBuildVocab(doc_lst)
if( len(vocab) == 0 ):
count_vectorizer = CountVectorizer(stop_words=getStopwordsSet(), token_pattern=params['token_pattern'], ngram_range=(n, n), binary=params['binary_tf_flag'])
else:
count_vectorizer = CountVectorizer(stop_words=None, vocabulary=vocab, token_pattern=params['token_pattern'], ngram_range=(n, n), binary=params['binary_tf_flag'])
'''
doc_count = len(doc_dct_lst)
if( doc_count == 1 ):
binary_tf_flag = False
else:
binary_tf_flag = True
bif_stopwords = bifurcate_stopwords( params['add_stopwords'] )
stopwords = getStopwordsSet() | bif_stopwords['unigrams']
count_vectorizer = CountVectorizer(stop_words=stopwords, token_pattern=params['token_pattern'], ngram_range=(n, n), binary=binary_tf_flag)
try:
#tf_matrix is a binary TF matrix if doc_lst.len > 1, non-binary otherwise
tf_matrix = count_vectorizer.fit_transform(doc_lst).toarray()
except:
genericErrorInfo()
return []
#every entry in list top_ngrams is of type: (a, b), a: term, b: term position in TF matrix
top_ngrams = count_vectorizer.get_feature_names()
filtered_top_ngrams = []
total_freq = 0
for i in range(tf_matrix.shape[1]):
if( top_ngrams[i] in bif_stopwords['multigrams'] ):
continue
matrix_row = tf_matrix[:,i]
if( binary_tf_flag ):
row_sum_tf = np.count_nonzero(matrix_row)#row_sum_tf count (TF) of documents with non-zero entries
else:
row_sum_tf = int(matrix_row[0])
#select documents with non-zero entries for term, doc index begins at 1
non_zero_docs = np.flatnonzero(matrix_row)#non_zero_docs: list of index positions of documents with nonzero entry for vocabulary at i
#find a simpler way to convert Int64 to native int
postings = []
for doc_indx in non_zero_docs:
doc_indx = int(doc_indx)
postings.append({
'doc_indx': doc_indx, #Int64 to native int
'doc_id': doc_dct_lst[doc_indx]['doc_id'],
'doc_details': fmt_posting( doc_dct_lst[doc_indx] )
})
filtered_top_ngrams.append( get_ngram_dct(top_ngrams[i], row_sum_tf, postings) )
total_freq += filtered_top_ngrams[-1]['term_freq']
if( doc_count == 1 ):
N = total_freq
else:
N = doc_count
for i in range(len(filtered_top_ngrams)):
filtered_top_ngrams[i]['term_rate'] = filtered_top_ngrams[i]['term_freq']/N
return sorted(filtered_top_ngrams, key=lambda ngramEntry: ngramEntry['term_freq'], reverse=True)
def get_user_stopwords(comma_sep_stopwords):
comma_sep_stopwords = comma_sep_stopwords.strip()
if( comma_sep_stopwords == '' ):
return set()
add_stopwords = comma_sep_stopwords.split(',')
return set( [s.strip().lower() for s in add_stopwords] )
def get_top_ngrams(n, doc_dct_lst, params=None):
print('\nget_top_ngram():')
np.set_printoptions(threshold=np.nan, linewidth=120)
if( params is None or isinstance(params, dict) == False ):
params = {}
report = {}
if( len(doc_dct_lst) == 0 ):
return report
if( n < 1 ):
n = 1
params = get_default_args(params)
params['add_stopwords'] = get_user_stopwords( params['add_stopwords'] )
params.setdefault('binary_tf_flag', True)#Multiple occurrence of term T in a document counts as 1, TF = total number of times term appears in collection
params['stanford_corenlp_server'] = nlpIsServerOn()
if( params['stanford_corenlp_server'] == False ):
print('\n\tAttempting to start Stanford CoreNLP Server (we need it to segment sentences)\n')
nlpServerStartStop('start')
params['stanford_corenlp_server'] = nlpIsServerOn()
doc_lst = []
#doc_dct_lst: {doc_id: , text: }
all_doc_sentences = {}
multi_word_proper_nouns = {}
dedup_set = set()
for i in range(len(doc_dct_lst)):
doc_dct_lst[i].setdefault('doc_id', i)
doc_lst.append( doc_dct_lst[i]['text'] )
#placing sentences inside doc_dct_lst[i] accounted for more runtime overhead
all_doc_sentences[i] = extract_doc_sentences(
doc_dct_lst[i]['text'],
params['sentence_tokenizer'],
dedup_set,
multi_word_proper_nouns,
params=params
)
del doc_dct_lst[i]['text']
multi_word_proper_nouns = rank_proper_nouns(multi_word_proper_nouns)
print('\tdone adding sentences')
print('\tshift:', params['shift'])
top_ngrams = extract_top_ngrams(doc_lst, doc_dct_lst, n, params)
if( len(top_ngrams) == 0 ):
return report
if( params['top_ngram_count'] < 1 or params['top_ngram_count'] > len(top_ngrams) ):
params['top_ngram_count'] = len(top_ngrams)
'''
shifting is off by default
shifting is done in an attempt to perform ngram summary on non-top terms
this may be required for comparing two different collections that have similar top terms
so shifting is an attempt to perform process non-top terms in order to find distinguishing ngrams below the top ngrams
'''
shift_factor = params['shift'] * params['top_ngram_count']
if( shift_factor >= len(top_ngrams) ):
shift_factor = 0
print('\ttop_ngrams.len:', len(top_ngrams))
if( shift_factor > 0 ):
params['top_ngram_shift_factor'] = shift_factor
top_ngrams = top_ngrams[shift_factor:]
print('\ttop_ngrams.post shift len:', len(top_ngrams))
doc_count = len(doc_dct_lst)
if( doc_count == 1 ):
N = len(top_ngrams)
params['tf_label'] = 'Single Document Term Frequency'
params['binary_tf_flag'] = False
else:
N = doc_count
params['tf_label'] = 'Collection Term Frequency (1 term count per document)'
params['tf_normalizing_divisor'] = N
report = { 'n': n, 'top_ngram_count': params['top_ngram_count']}
print('\tdoc_lst.len:', doc_count)
print('\ntop ngrams before finding multi-word proper nouns:')
print_top_ngrams( n, top_ngrams, params['top_ngram_count'], params=params )
if( params['no_pos_glue_split_ngrams'] == False ):
pos_glue_split_ngrams( top_ngrams, params['top_ngram_count'] * 2, params['pos_glue_split_ngrams_coeff'], multi_word_proper_nouns, params )
#subset top_ngrams will be replace with their supersets, thus shrinking top_ngram_count counts after this operation, so maximize the chances of reporting user-supplied c, begin by processing: top_ngram_count * 2
if( params['no_mvg_window_glue_split_ngrams'] == False ):
mvg_window_glue_split_ngrams( top_ngrams, params['top_ngram_count'] * 2, all_doc_sentences, params=params )
print('\ntop ngrams after finding multi-word proper nouns:')
print_top_ngrams( n, top_ngrams, params['top_ngram_count'], params=params )
top_ngrams = rm_subset_top_ngrams( top_ngrams, params['top_ngram_count'] * 2, params['rm_subset_top_ngrams_coeff'], params )
print('\ntop ngrams after removing subset phrases:')
print_top_ngrams( n, top_ngrams, params['top_ngram_count'], params=params )
top_ngrams = rm_empty_ngrams( top_ngrams, params['top_ngram_count'] * 2 )
if( params['no_rank_docs'] == False ):
report['ranked_docs'] = get_ranked_docs( top_ngrams, doc_dct_lst )
if( params['sentences_rank_count'] > 0 and params['no_rank_sentences'] == False ):
ngram_sentences = combine_ngrams( top_ngrams[:params['top_ngram_count']] )
report['ranked_sentences'] = rank_sents_frm_top_ranked_docs( ngram_sentences, report['ranked_docs'], all_doc_sentences, params )
report['top_ngrams'] = top_ngrams[:params['top_ngram_count']]
print('\ntop ngrams after shifting empty slots:')
print_top_ngrams( n, top_ngrams, params['top_ngram_count'], params=params )
#fmt_report() need to be called last since it potentially could modify merged_ngrams
fmt_report( report['top_ngrams'], params )
report['params'] = params
report['params']['collection_doc_count'] = doc_count
if( params['stanford_corenlp_server'] == False ):
print('\n\tStanford CoreNLP Server was OFF after an attempt to start it, so regex_get_sentences() was used to segment sentences.\n\tWe highly recommend you install and run it \n\t(see: https://ws-dl.blogspot.com/2018/03/2018-03-04-installing-stanford-corenlp.html)\n\tbecause Stanford CoreNLP does a better job segmenting sentences than regex.\n')
return report
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('path', help='Folder path containing input documents or path to single file')
parser.add_argument('-n', help='The base n (integer) for generating top ngrams, if n = 2, bigrams would be the base ngram', type=int, default=2)
parser.add_argument('-o', '--output', help='Output file')
parser.add_argument('-s', '--sentences-rank-count', help='The count of top ranked sentences to generate', type=int, default=10)
parser.add_argument('-t', '--top-ngram-count', help='The count of top ngrams to generate', type=int, default=10)
parser.add_argument('--add-stopwords', help='Comma-separated list of additional stopwords', default='')
parser.add_argument('--corenlp-host', help='Stanford CoreNLP Server host (needed for decent sentence tokenizer)', default='localhost')
parser.add_argument('--corenlp-port', help='Stanford CoreNLP Server port (needed for decent sentence tokenizer)', default='9000')
parser.add_argument('--corenlp-max-sentence-words', help='Stanford CoreNLP maximum words per sentence', default=100)
parser.add_argument('--debug-verbose', help='Print statements needed for debugging purpose', action='store_true')
parser.add_argument('--include-postings', help='Include inverted index of term document mappings', action='store_true')#default is false except not included, in which case it's true
parser.add_argument('--mvg-window-min-proper-noun-rate', help='Mininum rate threshold (larger, stricter) to consider a multi-word proper noun a candidate to replace an ngram', type=float, default=0.5)
parser.add_argument('--ngram-printing-mw', help='Mininum width for printing ngrams', type=int, default=50)
parser.add_argument('--no-rank-docs', help='Do not rank documents flag (default is True)', action='store_true')
parser.add_argument('--no-rank-sentences', help='Do not rank sentences flag (default is True)', action='store_true')
parser.add_argument('--no-pos-glue-split-ngrams', help='Do not glue split top ngrams with POS method (default is True)', action='store_true')
parser.add_argument('--no-mvg-window-glue-split-ngrams', help='Do not glue split top ngrams with MOVING WINDOW method (default is True)', action='store_true')
parser.add_argument('--pos-glue-split-ngrams-coeff', help='Coeff for permitting matched ngram replacement. Interpreted as 1/coeff', type=int, default=2)
parser.add_argument('--pretty-print', help='Pretty print JSON output', action='store_true')
parser.add_argument('--rm-subset-top-ngrams-coeff', help='Coeff. for permitting matched ngram replacement. Interpreted as 1/coeff', type=int, default=2)
parser.add_argument('--sentence-tokenizer', help='For sentence ranking: Regex string that specifies tokens for sentence tokenization', default='[.?!][ \n]|\n+')
parser.add_argument('--shift', help='Factor to shift top ngram calculation', type=int, default=0)
parser.add_argument('--token-pattern', help='Regex string that specifies tokens for document tokenization', default=r'(?u)\b[a-zA-Z\'\’-]+[a-zA-Z]+\b|\d+[.,]?\d*')
parser.add_argument('--title', help='Text label to be used as a heading when printing top ngrams', default='')
return parser
def get_default_args(user_params):
#to be used by those who do not use this program from main, but call get_top_ngrams directly
parser = get_args()
for key, val in parser._option_string_actions.items():
if( val.default is None ):
continue
if( val.dest not in user_params ):
user_params[val.dest] = val.default
del user_params['help']
return user_params
def proc_req(doc_lst, params):
report = get_top_ngrams(params['n'], doc_lst, params)
if( params['output'] is not None ):
dumpJsonToFile( params['output'], report, indentFlag=params['pretty_print'] )
def main():
parser = get_args()
args = parser.parse_args()
params = vars(args)
doc_lst = getText(args.path)
proc_req(doc_lst, params)
if __name__ == 'sumgram.sumgram':
from sumgram.util import dumpJsonToFile
from sumgram.util import getStopwordsSet
from sumgram.util import genericErrorInfo
from sumgram.util import getText
from sumgram.util import isMatchInOrder
from sumgram.util import nlpIsServerOn
from sumgram.util import nlpSentenceAnnotate
from sumgram.util import nlpServerStartStop
from sumgram.util import overlapFor2Sets
from sumgram.util import parallelTask
from sumgram.util import phraseTokenizer
from sumgram.util import readTextFromFile
from sumgram.util import rmStopwords
from sumgram.util import sortDctByKey
else:
from util import dumpJsonToFile
from util import getStopwordsSet
from util import genericErrorInfo
from util import getText
from util import isMatchInOrder
from util import nlpIsServerOn
from util import nlpSentenceAnnotate
from util import nlpServerStartStop
from util import overlapFor2Sets
from util import parallelTask
from util import phraseTokenizer
from util import readTextFromFile
from util import rmStopwords
from util import sortDctByKey
if __name__ == '__main__':
main()
| [
"re.split",
"util.nlpServerStartStop",
"argparse.ArgumentParser",
"util.dumpJsonToFile",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.flatnonzero",
"util.isMatchInOrder",
"util.nlpIsServerOn",
"util.sortDctByKey",
"util.overlapFor2Sets",
"util.phraseTokenizer",
"util.genericErrorI... | [((1042, 1065), 'copy.deepcopy', 'copy.deepcopy', (['doc_dets'], {}), '(doc_dets)\n', (1055, 1065), False, 'import copy\n'), ((3850, 3884), 're.split', 're.split', (['sentence_tokenizer', 'text'], {}), '(sentence_tokenizer, text)\n', (3858, 3884), False, 'import re\n'), ((5033, 5067), 'util.sortDctByKey', 'sortDctByKey', (['ranked_docs', '"""score"""'], {}), "(ranked_docs, 'score')\n", (5045, 5067), False, 'from util import sortDctByKey\n'), ((11218, 11248), 'util.rmStopwords', 'rmStopwords', (['parent', 'stopwords'], {}), '(parent, stopwords)\n', (11229, 11248), False, 'from util import rmStopwords\n'), ((11258, 11287), 'util.rmStopwords', 'rmStopwords', (['child', 'stopwords'], {}), '(child, stopwords)\n', (11269, 11287), False, 'from util import rmStopwords\n'), ((11469, 11498), 'util.isMatchInOrder', 'isMatchInOrder', (['child', 'parent'], {}), '(child, parent)\n', (11483, 11498), False, 'from util import isMatchInOrder\n'), ((28408, 28531), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': 'stopwords', 'token_pattern': "params['token_pattern']", 'ngram_range': '(n, n)', 'binary': 'binary_tf_flag'}), "(stop_words=stopwords, token_pattern=params['token_pattern'],\n ngram_range=(n, n), binary=binary_tf_flag)\n", (28423, 28531), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((30502, 30554), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan', 'linewidth': '(120)'}), '(threshold=np.nan, linewidth=120)\n', (30521, 30554), True, 'import numpy as np\n'), ((31017, 31032), 'util.nlpIsServerOn', 'nlpIsServerOn', ([], {}), '()\n', (31030, 31032), False, 'from util import nlpIsServerOn\n'), ((35721, 35746), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (35744, 35746), False, 'import argparse\n'), ((39568, 39586), 'util.getText', 'getText', (['args.path'], {}), '(args.path)\n', (39575, 39586), False, 'from util import getText\n'), ((203, 220), 'util.getStopwordsSet', 'getStopwordsSet', ([], {}), '()\n', (218, 220), False, 'from util import getStopwordsSet\n'), ((1616, 1657), 'util.overlapFor2Sets', 'overlapFor2Sets', (['ngram_sent', 'doc_sentence'], {}), '(ngram_sent, doc_sentence)\n', (1631, 1657), False, 'from util import overlapFor2Sets\n'), ((20804, 20826), 'util.phraseTokenizer', 'phraseTokenizer', (['ngram'], {}), '(ngram)\n', (20819, 20826), False, 'from util import phraseTokenizer\n'), ((28341, 28358), 'util.getStopwordsSet', 'getStopwordsSet', ([], {}), '()\n', (28356, 28358), False, 'from util import getStopwordsSet\n'), ((29307, 29333), 'numpy.flatnonzero', 'np.flatnonzero', (['matrix_row'], {}), '(matrix_row)\n', (29321, 29333), True, 'import numpy as np\n'), ((31182, 31209), 'util.nlpServerStartStop', 'nlpServerStartStop', (['"""start"""'], {}), "('start')\n", (31200, 31209), False, 'from util import nlpServerStartStop\n'), ((31248, 31263), 'util.nlpIsServerOn', 'nlpIsServerOn', ([], {}), '()\n', (31261, 31263), False, 'from util import nlpIsServerOn\n'), ((39392, 39467), 'util.dumpJsonToFile', 'dumpJsonToFile', (["params['output']", 'report'], {'indentFlag': "params['pretty_print']"}), "(params['output'], report, indentFlag=params['pretty_print'])\n", (39406, 39467), False, 'from util import dumpJsonToFile\n'), ((11835, 11860), 'util.phraseTokenizer', 'phraseTokenizer', (['sentence'], {}), '(sentence)\n', (11850, 11860), False, 'from util import phraseTokenizer\n'), ((23035, 23057), 'util.phraseTokenizer', 'phraseTokenizer', (['ngram'], {}), '(ngram)\n', (23050, 23057), False, 'from util import phraseTokenizer\n'), ((28685, 28703), 'util.genericErrorInfo', 'genericErrorInfo', ([], {}), '()\n', (28701, 28703), False, 'from util import genericErrorInfo\n'), ((29083, 29111), 'numpy.count_nonzero', 'np.count_nonzero', (['matrix_row'], {}), '(matrix_row)\n', (29099, 29111), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Given a trace file, gives the self-time of userspace slices broken
down by process, thread and thread state.
"""
import argparse
import cmd
import logging
import numpy as np
import pandas as pd
import plotille
from perfetto.batch_trace_processor.api import BatchTraceProcessor, BatchTraceProcessorConfig
from perfetto.trace_processor import TraceProcessorException, TraceProcessorConfig
from typing import List
class TpBatchShell(cmd.Cmd):
def __init__(self, files: List[str], batch_tp: BatchTraceProcessor):
super().__init__()
self.files = files
self.batch_tp = batch_tp
def do_table(self, arg: str):
try:
data = self.batch_tp.query_and_flatten(arg)
print(data)
except TraceProcessorException as ex:
logging.error("Query failed: {}".format(ex))
def do_histogram(self, arg: str):
try:
data = self.batch_tp.query_single_result(arg)
print(plotille.histogram(data))
self.print_percentiles(data)
except TraceProcessorException as ex:
logging.error("Query failed: {}".format(ex))
def do_vhistogram(self, arg: str):
try:
data = self.batch_tp.query_single_result(arg)
print(plotille.hist(data))
self.print_percentiles(data)
except TraceProcessorException as ex:
logging.error("Query failed: {}".format(ex))
def do_count(self, arg: str):
try:
data = self.batch_tp.query_single_result(arg)
counts = dict()
for i in data:
counts[i] = counts.get(i, 0) + 1
print(counts)
except TraceProcessorException as ex:
logging.error("Query failed: {}".format(ex))
def do_close(self, _):
return True
def do_quit(self, _):
return True
def do_EOF(self, _):
print("")
return True
def print_percentiles(self, data):
percentiles = [25, 50, 75, 95, 99, 99.9]
nearest = np.percentile(data, percentiles, interpolation='nearest')
logging.info("Representative traces for percentiles")
for i, near in enumerate(nearest):
print("{}%: {}".format(percentiles[i], self.files[data.index(near)]))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--shell-path', default=None)
parser.add_argument('--verbose', action='store_true', default=False)
parser.add_argument('--file-list', default=None)
parser.add_argument('--query-file', default=None)
parser.add_argument('--interactive', default=None)
parser.add_argument('files', nargs='*')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
files = args.files
if args.file_list:
with open(args.file_list, 'r') as f:
files += f.read().splitlines()
if not files:
logging.info("At least one file must be specified in files or file list")
logging.info('Loading traces...')
config = BatchTraceProcessorConfig(
tp_config=TraceProcessorConfig(
bin_path=args.shell_path,
verbose=args.verbose,
))
with BatchTraceProcessor(files, config) as batch_tp:
if args.query_file:
logging.info('Running query file...')
with open(args.query_file, 'r') as f:
queries_str = f.read()
queries = [q.strip() for q in queries_str.split(";\n")]
for q in queries[:-1]:
batch_tp.query(q)
res = batch_tp.query_and_flatten(queries[-1])
print(res.to_csv(index=False))
if args.interactive or not args.query_file:
try:
TpBatchShell(files, batch_tp).cmdloop()
except KeyboardInterrupt:
pass
logging.info("Closing; please wait...")
if __name__ == '__main__':
exit(main())
| [
"logging.basicConfig",
"perfetto.batch_trace_processor.api.BatchTraceProcessor",
"argparse.ArgumentParser",
"perfetto.trace_processor.TraceProcessorConfig",
"plotille.histogram",
"numpy.percentile",
"logging.info",
"plotille.hist"
] | [((2726, 2751), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2749, 2751), False, 'import argparse\n'), ((3105, 3145), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (3124, 3145), False, 'import logging\n'), ((3365, 3398), 'logging.info', 'logging.info', (['"""Loading traces..."""'], {}), "('Loading traces...')\n", (3377, 3398), False, 'import logging\n'), ((2470, 2527), 'numpy.percentile', 'np.percentile', (['data', 'percentiles'], {'interpolation': '"""nearest"""'}), "(data, percentiles, interpolation='nearest')\n", (2483, 2527), True, 'import numpy as np\n'), ((2532, 2585), 'logging.info', 'logging.info', (['"""Representative traces for percentiles"""'], {}), "('Representative traces for percentiles')\n", (2544, 2585), False, 'import logging\n'), ((3288, 3361), 'logging.info', 'logging.info', (['"""At least one file must be specified in files or file list"""'], {}), "('At least one file must be specified in files or file list')\n", (3300, 3361), False, 'import logging\n'), ((3560, 3594), 'perfetto.batch_trace_processor.api.BatchTraceProcessor', 'BatchTraceProcessor', (['files', 'config'], {}), '(files, config)\n', (3579, 3594), False, 'from perfetto.batch_trace_processor.api import BatchTraceProcessor, BatchTraceProcessorConfig\n'), ((4118, 4157), 'logging.info', 'logging.info', (['"""Closing; please wait..."""'], {}), "('Closing; please wait...')\n", (4130, 4157), False, 'import logging\n'), ((3453, 3521), 'perfetto.trace_processor.TraceProcessorConfig', 'TraceProcessorConfig', ([], {'bin_path': 'args.shell_path', 'verbose': 'args.verbose'}), '(bin_path=args.shell_path, verbose=args.verbose)\n', (3473, 3521), False, 'from perfetto.trace_processor import TraceProcessorException, TraceProcessorConfig\n'), ((3638, 3675), 'logging.info', 'logging.info', (['"""Running query file..."""'], {}), "('Running query file...')\n", (3650, 3675), False, 'import logging\n'), ((1531, 1555), 'plotille.histogram', 'plotille.histogram', (['data'], {}), '(data)\n', (1549, 1555), False, 'import plotille\n'), ((1796, 1815), 'plotille.hist', 'plotille.hist', (['data'], {}), '(data)\n', (1809, 1815), False, 'import plotille\n')] |
import numpy as np
from Get_global_value import num_q
from Get_global_value import J_type
from Get_global_value import Ez
from Get_global_value import BB
from Get_global_value import m0
from Get_global_value import m
from Get_global_value import mass
from Get_global_value import inertia0
from Get_global_value import inertia
from Get_global_value import cc
from Get_global_value import c0
from calc_jr import calc_jr
from calc_jt import calc_jt
from skew_sym import skew_sym
from calc_vel import calc_vel
from calc_acc import calc_acc
from Get_global_value import Gravity
from Get_global_value import SS
from Get_global_value import SE
from Get_global_value import ce
from Get_global_value import S0
from cross import cross
def r_ne(R0, RR, A0, AA, v0, w0, vd0, wd0, q, qd, qdd, Fe, Te):
A_I_0 = A0
vv, ww = calc_vel(A0, AA, v0, w0, q, qd)
vd, wd = calc_acc(A0, AA, w0, ww, vd0, wd0, q, qd, qdd)
FF0 = m0 * (vd0 - Gravity)
inertia_I_0 = np.dot(np.dot(A_I_0, inertia0), A_I_0.T)
TT0 = np.dot(inertia_I_0, wd0[0:3]) + cross(w0[0:3], np.dot(inertia_I_0, w0[0:3]))
FF = np.zeros((num_q, 3))
TT = np.zeros((num_q, 3))
if num_q == 0:
print('Single body, there is no link')
else:
for i in range(num_q):
A_I_i = AA[i, :, :]
in_i = inertia[i, :, :]
FF[i, :] = m[i] * (vd[i, :] - Gravity)
inertia_I_i = np.dot(np.dot(A_I_i, in_i), A_I_i.T)
TT[i, :] = np.dot(inertia_I_i, wd[i, :]) + cross(ww[i, :], np.dot(inertia_I_i, ww[i, :]))
Fj = np.zeros((num_q, 3))
Tj = np.zeros((num_q, 3))
if num_q != 0:
for i in range(num_q-1, -1, -1):
F = np.zeros(3)
T = np.zeros(3)
for j in range(i+1, num_q, 1):
F = F + SS[i, j] * Fj[j, :]
Fj[i, :] = FF[i, :] + F - SE[i] * Fe[i, :]
for j in range(i+1, num_q, 1):
A_I_i = AA[i, :, :]
T = T + SS[i, j] * (cross(np.dot(A_I_i, (cc[i, j, :] - cc[i, i, :] + np.dot(np.dot((J_type[i] == 'P'),
Ez), q[i]))),
Fj[j, :]) + Tj[j, :])
if J_type[i] == 'R':
A_I_i = AA[i, :, :]
Tj[i, :] = TT[i, :] + T - cross(np.dot(A_I_i, cc[i, i, :]), FF[i, :])
else:
A_I_i = AA[i, :, :]
Tj[i, :] = TT[i, :] + T + cross(np.dot(A_I_i, np.dot(Ez, q[i])) - np.dot(A_I_i, cc[i, i, :]), FF[i, :])
Tj[i, :] = Tj[i, :] - SE[i] * (cross(np.dot(A_I_i, (ce[i, :] - cc[i, i, :]
+ np.dot(np.dot((J_type[i] == 'P'), Ez), q[i]))),
Fe[i, :]) + Te[i, :])
F = np.zeros(3)
T = np.zeros(3)
for i in range(num_q):
if S0[i] != 0:
F = F + S0[i] * Fj[i, :]
FF0 = FF0 + F
for i in range(num_q):
if S0[i] != 0:
T = T + S0[i] * (cross(np.dot(A_I_0, c0[i, :]), Fj[i, :]) + Tj[i, :])
TT0 = TT0 + T
tau = np.zeros(num_q)
if num_q == 0:
tau = np.zeros(num_q)
else:
for i in range(num_q):
A_I_i = AA[i, :, :]
if J_type == 'R':
tau[i] = np.dot(Tj[i, :].T, np.dot(A_I_i, Ez))
else:
tau[i] = np.dot(Fj[i, :].T, np.dot(A_I_i, Ez))
Force = np.zeros(num_q+6)
if num_q == 0:
Force[0:3] = FF0
Force[3:6] = TT0
else:
Force[0:3] = FF0
Force[3:6] = TT0
Force[6:num_q+6] = tau
return Force
| [
"numpy.dot",
"numpy.zeros",
"calc_acc.calc_acc",
"calc_vel.calc_vel"
] | [((819, 850), 'calc_vel.calc_vel', 'calc_vel', (['A0', 'AA', 'v0', 'w0', 'q', 'qd'], {}), '(A0, AA, v0, w0, q, qd)\n', (827, 850), False, 'from calc_vel import calc_vel\n'), ((864, 910), 'calc_acc.calc_acc', 'calc_acc', (['A0', 'AA', 'w0', 'ww', 'vd0', 'wd0', 'q', 'qd', 'qdd'], {}), '(A0, AA, w0, ww, vd0, wd0, q, qd, qdd)\n', (872, 910), False, 'from calc_acc import calc_acc\n'), ((1098, 1118), 'numpy.zeros', 'np.zeros', (['(num_q, 3)'], {}), '((num_q, 3))\n', (1106, 1118), True, 'import numpy as np\n'), ((1128, 1148), 'numpy.zeros', 'np.zeros', (['(num_q, 3)'], {}), '((num_q, 3))\n', (1136, 1148), True, 'import numpy as np\n'), ((1551, 1571), 'numpy.zeros', 'np.zeros', (['(num_q, 3)'], {}), '((num_q, 3))\n', (1559, 1571), True, 'import numpy as np\n'), ((1581, 1601), 'numpy.zeros', 'np.zeros', (['(num_q, 3)'], {}), '((num_q, 3))\n', (1589, 1601), True, 'import numpy as np\n'), ((3190, 3205), 'numpy.zeros', 'np.zeros', (['num_q'], {}), '(num_q)\n', (3198, 3205), True, 'import numpy as np\n'), ((3515, 3534), 'numpy.zeros', 'np.zeros', (['(num_q + 6)'], {}), '(num_q + 6)\n', (3523, 3534), True, 'import numpy as np\n'), ((967, 990), 'numpy.dot', 'np.dot', (['A_I_0', 'inertia0'], {}), '(A_I_0, inertia0)\n', (973, 990), True, 'import numpy as np\n'), ((1011, 1040), 'numpy.dot', 'np.dot', (['inertia_I_0', 'wd0[0:3]'], {}), '(inertia_I_0, wd0[0:3])\n', (1017, 1040), True, 'import numpy as np\n'), ((2851, 2862), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2859, 2862), True, 'import numpy as np\n'), ((2875, 2886), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2883, 2886), True, 'import numpy as np\n'), ((3239, 3254), 'numpy.zeros', 'np.zeros', (['num_q'], {}), '(num_q)\n', (3247, 3254), True, 'import numpy as np\n'), ((1058, 1086), 'numpy.dot', 'np.dot', (['inertia_I_0', 'w0[0:3]'], {}), '(inertia_I_0, w0[0:3])\n', (1064, 1086), True, 'import numpy as np\n'), ((1679, 1690), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1687, 1690), True, 'import numpy as np\n'), ((1707, 1718), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1715, 1718), True, 'import numpy as np\n'), ((1409, 1428), 'numpy.dot', 'np.dot', (['A_I_i', 'in_i'], {}), '(A_I_i, in_i)\n', (1415, 1428), True, 'import numpy as np\n'), ((1462, 1491), 'numpy.dot', 'np.dot', (['inertia_I_i', 'wd[i, :]'], {}), '(inertia_I_i, wd[i, :])\n', (1468, 1491), True, 'import numpy as np\n'), ((1510, 1539), 'numpy.dot', 'np.dot', (['inertia_I_i', 'ww[i, :]'], {}), '(inertia_I_i, ww[i, :])\n', (1516, 1539), True, 'import numpy as np\n'), ((3402, 3419), 'numpy.dot', 'np.dot', (['A_I_i', 'Ez'], {}), '(A_I_i, Ez)\n', (3408, 3419), True, 'import numpy as np\n'), ((3483, 3500), 'numpy.dot', 'np.dot', (['A_I_i', 'Ez'], {}), '(A_I_i, Ez)\n', (3489, 3500), True, 'import numpy as np\n'), ((2354, 2380), 'numpy.dot', 'np.dot', (['A_I_i', 'cc[i, i, :]'], {}), '(A_I_i, cc[i, i, :])\n', (2360, 2380), True, 'import numpy as np\n'), ((2528, 2554), 'numpy.dot', 'np.dot', (['A_I_i', 'cc[i, i, :]'], {}), '(A_I_i, cc[i, i, :])\n', (2534, 2554), True, 'import numpy as np\n'), ((2508, 2524), 'numpy.dot', 'np.dot', (['Ez', 'q[i]'], {}), '(Ez, q[i])\n', (2514, 2524), True, 'import numpy as np\n'), ((3106, 3129), 'numpy.dot', 'np.dot', (['A_I_0', 'c0[i, :]'], {}), '(A_I_0, c0[i, :])\n', (3112, 3129), True, 'import numpy as np\n'), ((2726, 2754), 'numpy.dot', 'np.dot', (["(J_type[i] == 'P')", 'Ez'], {}), "(J_type[i] == 'P', Ez)\n", (2732, 2754), True, 'import numpy as np\n'), ((2033, 2061), 'numpy.dot', 'np.dot', (["(J_type[i] == 'P')", 'Ez'], {}), "(J_type[i] == 'P', Ez)\n", (2039, 2061), True, 'import numpy as np\n')] |
import copy
from datetime import datetime
import dask.array as da
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from pathlib import Path
from act.io.armfiles import read_netcdf
from act.qc.arm import add_dqr_to_qc
from act.qc.qcfilter import parse_bit, set_bit, unset_bit
from act.qc.radiometer_tests import fft_shading_test
from act.qc.sp2 import SP2ParticleCriteria, PYSP2_AVAILABLE
from act.tests import (
EXAMPLE_CEIL1,
EXAMPLE_CO2FLX4M,
EXAMPLE_MET1,
EXAMPLE_METE40,
EXAMPLE_MFRSR,
EXAMPLE_IRT25m20s,
EXAMPLE_BRS,
EXAMPLE_MET_YAML
)
from act.qc.bsrn_tests import _calculate_solar_parameters
from act.qc.add_supplemental_qc import read_yaml_supplemental_qc, apply_supplemental_qc
def test_fft_shading_test():
obj = read_netcdf(EXAMPLE_MFRSR)
obj.clean.cleanup()
obj = fft_shading_test(obj)
qc_data = obj['qc_diffuse_hemisp_narrowband_filter4']
assert np.nansum(qc_data.values) == 7164
def test_global_qc_cleanup():
ds_object = read_netcdf(EXAMPLE_MET1)
ds_object.load()
ds_object.clean.cleanup()
assert ds_object['qc_wdir_vec_mean'].attrs['flag_meanings'] == [
'Value is equal to missing_value.',
'Value is less than the fail_min.',
'Value is greater than the fail_max.',
]
assert ds_object['qc_wdir_vec_mean'].attrs['flag_masks'] == [1, 2, 4]
assert ds_object['qc_wdir_vec_mean'].attrs['flag_assessments'] == [
'Bad',
'Bad',
'Bad',
]
assert ds_object['qc_temp_mean'].attrs['flag_meanings'] == [
'Value is equal to missing_value.',
'Value is less than the fail_min.',
'Value is greater than the fail_max.',
'Difference between current and previous values exceeds fail_delta.',
]
assert ds_object['qc_temp_mean'].attrs['flag_masks'] == [1, 2, 4, 8]
assert ds_object['qc_temp_mean'].attrs['flag_assessments'] == [
'Bad',
'Bad',
'Bad',
'Indeterminate',
]
ds_object.close()
del ds_object
def test_qc_test_errors():
ds_object = read_netcdf(EXAMPLE_MET1)
var_name = 'temp_mean'
assert ds_object.qcfilter.add_less_test(var_name, None) is None
assert ds_object.qcfilter.add_greater_test(var_name, None) is None
assert ds_object.qcfilter.add_less_equal_test(var_name, None) is None
assert ds_object.qcfilter.add_equal_to_test(var_name, None) is None
assert ds_object.qcfilter.add_not_equal_to_test(var_name, None) is None
def test_arm_qc():
# Test DQR Webservice using known DQR
variable = 'wspd_vec_mean'
qc_variable = 'qc_' + variable
obj = read_netcdf(EXAMPLE_METE40)
# DQR webservice does go down, so ensure it
# properly runs first before testing
try:
obj = add_dqr_to_qc(obj, variable=variable)
ran = True
obj.attrs['_datastream'] = obj.attrs['datastream']
del obj.attrs['datastream']
obj2 = add_dqr_to_qc(obj, variable=variable)
obj3 = add_dqr_to_qc(obj)
add_dqr_to_qc(obj, variable=variable, exclude=['D190529.4'])
add_dqr_to_qc(obj, variable=variable, include=['D400101.1'])
with np.testing.assert_raises(ValueError):
del obj.attrs['_datastream']
add_dqr_to_qc(obj, variable=variable)
except ValueError:
ran = False
if ran:
assert qc_variable in obj
dqr = [True for d in obj[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d]
assert dqr[0] is True
assert 'Suspect' not in obj[qc_variable].attrs['flag_assessments']
assert 'Incorrect' not in obj[qc_variable].attrs['flag_assessments']
assert qc_variable in obj2
dqr = [True for d in obj2[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d]
assert dqr[0] is True
assert 'Suspect' not in obj2[qc_variable].attrs['flag_assessments']
assert 'Incorrect' not in obj2[qc_variable].attrs['flag_assessments']
assert qc_variable in obj3
dqr = [True for d in obj3[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d]
assert dqr[0] is True
assert 'Suspect' not in obj3[qc_variable].attrs['flag_assessments']
assert 'Incorrect' not in obj3[qc_variable].attrs['flag_assessments']
def test_qcfilter():
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
expected_qc_var_name = 'qc_' + var_name
ds_object.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=True, cleanup=False, flag_type=False
)
assert expected_qc_var_name in list(ds_object.keys())
del ds_object[expected_qc_var_name]
# Perform adding of quality control variables to object
result = ds_object.qcfilter.add_test(var_name, test_meaning='Birds!')
assert isinstance(result, dict)
qc_var_name = result['qc_variable_name']
assert qc_var_name == expected_qc_var_name
# Check that new linking and describing attributes are set
assert ds_object[qc_var_name].attrs['standard_name'] == 'quality_flag'
assert ds_object[var_name].attrs['ancillary_variables'] == qc_var_name
# Check that CF attributes are set including new flag_assessments
assert 'flag_masks' in ds_object[qc_var_name].attrs.keys()
assert 'flag_meanings' in ds_object[qc_var_name].attrs.keys()
assert 'flag_assessments' in ds_object[qc_var_name].attrs.keys()
# Check that the values of the attributes are set correctly
assert ds_object[qc_var_name].attrs['flag_assessments'][0] == 'Bad'
assert ds_object[qc_var_name].attrs['flag_meanings'][0] == 'Birds!'
assert ds_object[qc_var_name].attrs['flag_masks'][0] == 1
# Set some test values
index = [0, 1, 2, 30]
ds_object.qcfilter.set_test(var_name, index=index, test_number=result['test_number'])
# Add a new test and set values
index2 = [6, 7, 8, 50]
ds_object.qcfilter.add_test(
var_name,
index=index2,
test_number=9,
test_meaning='testing high number',
test_assessment='Suspect',
)
# Retrieve data from object as numpy masked array. Count number of masked
# elements and ensure equal to size of index array.
data = ds_object.qcfilter.get_masked_data(var_name, rm_assessments='Bad')
assert np.ma.count_masked(data) == len(index)
data = ds_object.qcfilter.get_masked_data(
var_name, rm_assessments='Suspect', return_nan_array=True
)
assert np.sum(np.isnan(data)) == len(index2)
data = ds_object.qcfilter.get_masked_data(
var_name, rm_assessments=['Bad', 'Suspect'], ma_fill_value=np.nan
)
assert np.ma.count_masked(data) == len(index + index2)
# Test internal function for returning the index array of where the
# tests are set.
assert (
np.sum(
ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
- np.array(index, dtype=int)
)
== 0
)
# Unset a test
ds_object.qcfilter.unset_test(var_name, index=0, test_number=result['test_number'])
# Remove the test
ds_object.qcfilter.remove_test(var_name, test_number=33)
# Ensure removal works when flag_masks is a numpy array
ds_object['qc_' + var_name].attrs['flag_masks'] = np.array(ds_object['qc_' + var_name].attrs['flag_masks'])
ds_object.qcfilter.remove_test(var_name, test_number=result['test_number'])
pytest.raises(ValueError, ds_object.qcfilter.add_test, var_name)
pytest.raises(ValueError, ds_object.qcfilter.remove_test, var_name)
ds_object.close()
assert np.all(parse_bit([257]) == np.array([1, 9], dtype=np.int32))
pytest.raises(ValueError, parse_bit, [1, 2])
pytest.raises(ValueError, parse_bit, -1)
assert set_bit(0, 16) == 32768
data = range(0, 4)
assert isinstance(set_bit(list(data), 2), list)
assert isinstance(set_bit(tuple(data), 2), tuple)
assert isinstance(unset_bit(list(data), 2), list)
assert isinstance(unset_bit(tuple(data), 2), tuple)
# Fill in missing tests
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
del ds_object[var_name].attrs['long_name']
# Test creating a qc variable
ds_object.qcfilter.create_qc_variable(var_name)
# Test creating a second qc variable and of flag type
ds_object.qcfilter.create_qc_variable(var_name, flag_type=True)
result = ds_object.qcfilter.add_test(
var_name,
index=[1, 2, 3],
test_number=9,
test_meaning='testing high number',
flag_value=True,
)
ds_object.qcfilter.set_test(var_name, index=5, test_number=9, flag_value=True)
data = ds_object.qcfilter.get_masked_data(var_name)
assert np.isclose(np.sum(data), 42674.766, 0.01)
data = ds_object.qcfilter.get_masked_data(var_name, rm_assessments='Bad')
assert np.isclose(np.sum(data), 42643.195, 0.01)
ds_object.qcfilter.unset_test(var_name, test_number=9, flag_value=True)
ds_object.qcfilter.unset_test(var_name, index=1, test_number=9, flag_value=True)
assert ds_object.qcfilter.available_bit(result['qc_variable_name']) == 10
assert ds_object.qcfilter.available_bit(result['qc_variable_name'], recycle=True) == 1
ds_object.qcfilter.remove_test(var_name, test_number=9, flag_value=True)
ds_object.qcfilter.update_ancillary_variable(var_name)
# Test updating ancillary variable if does not exist
ds_object.qcfilter.update_ancillary_variable('not_a_variable_name')
# Change ancillary_variables attribute to test if add correct qc variable correctly
ds_object[var_name].attrs['ancillary_variables'] = 'a_different_name'
ds_object.qcfilter.update_ancillary_variable(var_name, qc_var_name=expected_qc_var_name)
assert expected_qc_var_name in ds_object[var_name].attrs['ancillary_variables']
# Test flag QC
var_name = 'inst_sfc_ir_temp'
qc_var_name = 'qc_' + var_name
ds_object.qcfilter.create_qc_variable(var_name, flag_type=True)
assert qc_var_name in list(ds_object.data_vars)
assert 'flag_values' in ds_object[qc_var_name].attrs.keys()
assert 'flag_masks' not in ds_object[qc_var_name].attrs.keys()
del ds_object[qc_var_name]
qc_var_name = ds_object.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=True, cleanup=False, flag_type=True
)
assert qc_var_name in list(ds_object.data_vars)
assert 'flag_values' in ds_object[qc_var_name].attrs.keys()
assert 'flag_masks' not in ds_object[qc_var_name].attrs.keys()
del ds_object[qc_var_name]
ds_object.qcfilter.add_missing_value_test(var_name, flag_value=True, prepend_text='arm')
ds_object.qcfilter.add_test(
var_name,
index=list(range(0, 20)),
test_number=2,
test_meaning='Testing flag',
flag_value=True,
test_assessment='Suspect',
)
assert qc_var_name in list(ds_object.data_vars)
assert 'flag_values' in ds_object[qc_var_name].attrs.keys()
assert 'flag_masks' not in ds_object[qc_var_name].attrs.keys()
assert 'standard_name' in ds_object[qc_var_name].attrs.keys()
assert ds_object[qc_var_name].attrs['flag_values'] == [1, 2]
assert ds_object[qc_var_name].attrs['flag_assessments'] == ['Bad', 'Suspect']
ds_object.close()
def test_qcfilter2():
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
expected_qc_var_name = 'qc_' + var_name
data = ds_object[var_name].values
data[0:4] = data[0:4] + 30.0
data[1000:1024] = data[1000:1024] + 30.0
ds_object[var_name].values = data
coef = 1.4
ds_object.qcfilter.add_iqr_test(var_name, coef=1.4, test_assessment='Bad', prepend_text='arm')
assert np.sum(ds_object[expected_qc_var_name].values) == 28
assert ds_object[expected_qc_var_name].attrs['flag_masks'] == [1]
assert ds_object[expected_qc_var_name].attrs['flag_meanings'] == [
f'arm: Value outside of interquartile range test range with a coefficient of {coef}'
]
ds_object.qcfilter.add_iqr_test(var_name, test_number=3, prepend_text='ACT')
assert np.sum(ds_object[expected_qc_var_name].values) == 140
assert ds_object[expected_qc_var_name].attrs['flag_masks'] == [1, 4]
assert ds_object[expected_qc_var_name].attrs['flag_meanings'][-1] == (
'ACT: Value outside of interquartile range test range with a coefficient of 1.5'
)
ds_object.qcfilter.add_gesd_test(var_name, test_assessment='Bad')
assert np.sum(ds_object[expected_qc_var_name].values) == 204
assert ds_object[expected_qc_var_name].attrs['flag_masks'] == [1, 4, 8]
assert ds_object[expected_qc_var_name].attrs['flag_meanings'][-1] == (
'Value failed generalized Extreme Studentized Deviate test with an alpha of 0.05'
)
ds_object.qcfilter.add_gesd_test(var_name, alpha=0.1)
assert np.sum(ds_object[expected_qc_var_name].values) == 332
assert ds_object[expected_qc_var_name].attrs['flag_masks'] == [1, 4, 8, 16]
assert ds_object[expected_qc_var_name].attrs['flag_meanings'][-1] == (
'Value failed generalized Extreme Studentized Deviate test with an alpha of 0.1'
)
assert ds_object[expected_qc_var_name].attrs['flag_assessments'] == [
'Bad',
'Indeterminate',
'Bad',
'Indeterminate',
]
def test_qcfilter3():
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(var_name, index=range(0, 100), test_meaning='testing')
qc_var_name = result['qc_variable_name']
assert ds_object[qc_var_name].values.dtype.kind in np.typecodes['AllInteger']
ds_object[qc_var_name].values = ds_object[qc_var_name].values.astype(np.float32)
assert ds_object[qc_var_name].values.dtype.kind not in np.typecodes['AllInteger']
result = ds_object.qcfilter.get_qc_test_mask(
var_name=var_name, test_number=1, return_index=False
)
assert np.sum(result) == 100
result = ds_object.qcfilter.get_qc_test_mask(
var_name=var_name, test_number=1, return_index=True
)
assert np.sum(result) == 4950
# Test where QC variables are not integer type
ds_object = ds_object.resample(time='5min').mean(keep_attrs=True)
ds_object.qcfilter.add_test(
var_name, index=range(0, ds_object.time.size), test_meaning='Testing float'
)
assert np.sum(ds_object[qc_var_name].values) == 582
ds_object[qc_var_name].values = ds_object[qc_var_name].values.astype(np.float32)
ds_object.qcfilter.remove_test(var_name, test_number=2)
assert np.sum(ds_object[qc_var_name].values) == 6
def test_qctests():
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
# Add in one missing value and test for that missing value
data = ds_object[var_name].values
data[0] = np.nan
ds_object[var_name].data = da.from_array(data)
result = ds_object.qcfilter.add_missing_value_test(var_name)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert data.mask[0]
result = ds_object.qcfilter.add_missing_value_test(var_name, use_dask=True)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert data == np.array([0])
ds_object.qcfilter.remove_test(var_name, test_number=result['test_number'])
# less than min test
limit_value = 6.8
result = ds_object.qcfilter.add_less_test(
var_name, limit_value, prepend_text='arm', limit_attr_name='fail_min'
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert np.ma.count_masked(data) == 54
assert 'fail_min' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['fail_min'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_min'], limit_value)
result = ds_object.qcfilter.add_less_test(var_name, limit_value, test_assessment='Suspect')
assert 'warn_min' in ds_object[result['qc_variable_name']].attrs.keys()
limit_value = 8
result = ds_object.qcfilter.add_less_test(var_name, limit_value)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 2911939
result = ds_object.qcfilter.add_less_test(var_name, limit_value, use_dask=True)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 2911939
# greator than max test
limit_value = 12.7
result = ds_object.qcfilter.add_greater_test(
var_name, limit_value, prepend_text='arm', limit_attr_name='fail_max'
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert np.ma.count_masked(data) == 61
assert 'fail_max' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['fail_max'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_max'], limit_value)
result = ds_object.qcfilter.add_greater_test(var_name, limit_value, test_assessment='Suspect')
assert 'warn_max' in ds_object[result['qc_variable_name']].attrs.keys()
result = ds_object.qcfilter.add_greater_test(var_name, limit_value, use_dask=True)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 125458
result = ds_object.qcfilter.add_greater_test(var_name, limit_value)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 125458
# less than or equal test
limit_value = 6.9
result = ds_object.qcfilter.add_less_equal_test(
var_name,
limit_value,
test_assessment='Suspect',
prepend_text='arm',
limit_attr_name='warn_min',
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert np.ma.count_masked(data) == 149
assert 'warn_min' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['warn_min'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(ds_object[result['qc_variable_name']].attrs['warn_min'], limit_value)
result = ds_object.qcfilter.add_less_equal_test(var_name, limit_value)
assert 'fail_min' in ds_object[result['qc_variable_name']].attrs.keys()
result = ds_object.qcfilter.add_less_equal_test(var_name, limit_value, use_dask=True)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 601581
result = ds_object.qcfilter.add_less_equal_test(var_name, limit_value)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 601581
# greater than or equal test
result = ds_object.qcfilter.add_greater_equal_test(var_name, None)
limit_value = 12
result = ds_object.qcfilter.add_greater_equal_test(
var_name,
limit_value,
test_assessment='Suspect',
prepend_text='arm',
limit_attr_name='warn_max',
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert np.ma.count_masked(data) == 606
assert 'warn_max' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['warn_max'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(ds_object[result['qc_variable_name']].attrs['warn_max'], limit_value)
result = ds_object.qcfilter.add_greater_equal_test(var_name, limit_value)
assert 'fail_max' in ds_object[result['qc_variable_name']].attrs.keys()
result = ds_object.qcfilter.add_greater_equal_test(var_name, limit_value, use_dask=True)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 1189873
result = ds_object.qcfilter.add_greater_equal_test(var_name, limit_value)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 1189873
# equal to test
limit_value = 7.6705
result = ds_object.qcfilter.add_equal_to_test(
var_name, limit_value, prepend_text='arm', limit_attr_name='fail_equal_to'
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert np.ma.count_masked(data) == 2
assert 'fail_equal_to' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['fail_equal_to'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_equal_to'], limit_value)
result = ds_object.qcfilter.add_equal_to_test(
var_name, limit_value, test_assessment='Indeterminate'
)
assert 'warn_equal_to' in ds_object[result['qc_variable_name']].attrs.keys()
result = ds_object.qcfilter.add_equal_to_test(var_name, limit_value, use_dask=True)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 8631
result = ds_object.qcfilter.add_equal_to_test(var_name, limit_value)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 8631
# not equal to test
limit_value = 7.6705
result = ds_object.qcfilter.add_not_equal_to_test(
var_name,
limit_value,
test_assessment='Indeterminate',
prepend_text='arm',
limit_attr_name='warn_not_equal_to',
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert np.ma.count_masked(data) == 4318
assert 'warn_not_equal_to' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['warn_not_equal_to'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(ds_object[result['qc_variable_name']].attrs['warn_not_equal_to'], limit_value)
result = ds_object.qcfilter.add_not_equal_to_test(var_name, limit_value)
assert 'fail_not_equal_to' in ds_object[result['qc_variable_name']].attrs.keys()
result = ds_object.qcfilter.add_not_equal_to_test(var_name, limit_value, use_dask=True)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 9320409
result = ds_object.qcfilter.add_not_equal_to_test(var_name, limit_value)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 9320409
# outside range test
limit_value1 = 6.8
limit_value2 = 12.7
result = ds_object.qcfilter.add_outside_test(
var_name,
limit_value1,
limit_value2,
prepend_text='arm',
limit_attr_names=['fail_lower_range', 'fail_upper_range'],
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert np.ma.count_masked(data) == 115
assert 'fail_lower_range' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['fail_lower_range'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_lower_range'], limit_value1)
assert 'fail_upper_range' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['fail_upper_range'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_upper_range'], limit_value2)
result = ds_object.qcfilter.add_outside_test(
var_name, limit_value1, limit_value2, test_assessment='Indeterminate'
)
assert 'warn_lower_range' in ds_object[result['qc_variable_name']].attrs.keys()
assert 'warn_upper_range' in ds_object[result['qc_variable_name']].attrs.keys()
result = ds_object.qcfilter.add_outside_test(
var_name, limit_value1, limit_value2, use_dask=True
)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 342254
result = ds_object.qcfilter.add_outside_test(
var_name,
limit_value1,
limit_value2,
)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 342254
# Starting to run out of space for tests. Remove some tests.
for ii in range(16, 30):
ds_object.qcfilter.remove_test(var_name, test_number=ii)
# inside range test
limit_value1 = 7
limit_value2 = 8
result = ds_object.qcfilter.add_inside_test(
var_name,
limit_value1,
limit_value2,
prepend_text='arm',
limit_attr_names=['fail_lower_range_inner', 'fail_upper_range_inner'],
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert np.ma.count_masked(data) == 479
assert 'fail_lower_range_inner' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['fail_lower_range_inner'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(
ds_object[result['qc_variable_name']].attrs['fail_lower_range_inner'],
limit_value1,
)
assert 'fail_upper_range_inner' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['fail_upper_range_inner'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(
ds_object[result['qc_variable_name']].attrs['fail_upper_range_inner'],
limit_value2,
)
result = ds_object.qcfilter.add_inside_test(
var_name, limit_value1, limit_value2, test_assessment='Indeterminate'
)
assert 'warn_lower_range_inner' in ds_object[result['qc_variable_name']].attrs.keys()
assert 'warn_upper_range_inner' in ds_object[result['qc_variable_name']].attrs.keys()
result = ds_object.qcfilter.add_inside_test(var_name, limit_value1, limit_value2, use_dask=True)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 1820693
result = ds_object.qcfilter.add_inside_test(
var_name,
limit_value1,
limit_value2,
)
data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)
assert np.sum(data) == 1820693
# delta test
test_limit = 0.05
result = ds_object.qcfilter.add_delta_test(
var_name, test_limit, prepend_text='arm', limit_attr_name='warn_delta'
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert np.ma.count_masked(data) == 175
assert 'warn_delta' in ds_object[result['qc_variable_name']].attrs.keys()
assert (
ds_object[result['qc_variable_name']].attrs['warn_delta'].dtype
== ds_object[result['variable_name']].values.dtype
)
assert np.isclose(ds_object[result['qc_variable_name']].attrs['warn_delta'], test_limit)
data = ds_object.qcfilter.get_masked_data(var_name, rm_assessments=['Suspect', 'Bad'])
assert np.ma.count_masked(data) == 1355
result = ds_object.qcfilter.add_delta_test(var_name, test_limit, test_assessment='Bad')
assert 'fail_delta' in ds_object[result['qc_variable_name']].attrs.keys()
comp_object = read_netcdf(EXAMPLE_IRT25m20s)
with np.testing.assert_raises(ValueError):
result = ds_object.qcfilter.add_difference_test(var_name, 'test')
with np.testing.assert_raises(ValueError):
result = ds_object.qcfilter.add_difference_test(
var_name,
{comp_object.attrs['datastream']: comp_object},
var_name,
diff_limit=None,
)
assert ds_object.qcfilter.add_difference_test(var_name, set_test_regardless=False) is None
result = ds_object.qcfilter.add_difference_test(
var_name,
{comp_object.attrs['datastream']: comp_object},
var_name,
diff_limit=1,
prepend_text='arm',
)
data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])
assert 'arm' in result['test_meaning']
assert not (data.mask).all()
comp_object.close()
ds_object.close()
def test_qctests_dos():
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
# persistence test
data = ds_object[var_name].values
data[1000:2500] = data[1000]
ds_object[var_name].values = data
result = ds_object.qcfilter.add_persistence_test(var_name)
qc_var_name = result['qc_variable_name']
test_meaning = (
'Data failing persistence test. Standard Deviation over a '
'window of 10 values less than 0.0001.'
)
assert ds_object[qc_var_name].attrs['flag_meanings'][-1] == test_meaning
# There is a precision issue with GitHub testing that makes the number of tests
# tripped off by 1. This isclose() option is to account for that.
assert np.isclose(np.sum(ds_object[qc_var_name].values), 1500, atol=1)
ds_object.qcfilter.add_persistence_test(var_name, window=10000, prepend_text='DQO')
test_meaning = (
'DQO: Data failing persistence test. Standard Deviation over a window of '
'4320 values less than 0.0001.'
)
assert ds_object[qc_var_name].attrs['flag_meanings'][-1] == test_meaning
def test_datafilter():
ds = read_netcdf(EXAMPLE_MET1, drop_variables=['base_time', 'time_offset'])
ds.clean.cleanup()
data_var_names = list(ds.data_vars)
qc_var_names = [var_name for var_name in ds.data_vars if var_name.startswith('qc_')]
data_var_names = list(set(data_var_names) - set(qc_var_names))
data_var_names.sort()
qc_var_names.sort()
var_name = 'atmos_pressure'
ds_1 = ds.mean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment='Bad')
ds_filtered = copy.deepcopy(ds)
ds_filtered.qcfilter.datafilter(rm_assessments='Bad', del_qc_var=False)
ds_2 = ds_filtered.mean()
assert np.isclose(ds_1[var_name].values, 98.86, atol=0.01)
assert np.isclose(ds_2[var_name].values, 99.15, atol=0.01)
assert isinstance(ds_1[var_name].data, da.core.Array)
ds_filtered = copy.deepcopy(ds)
ds_filtered.qcfilter.datafilter(rm_assessments='Bad', variables=var_name)
ds_2 = ds_filtered.mean()
assert np.isclose(ds_2[var_name].values, 99.15, atol=0.01)
expected_var_names = sorted(list(set(data_var_names + qc_var_names) - set(['qc_' + var_name])))
assert sorted(list(ds_filtered.data_vars)) == expected_var_names
ds_filtered = copy.deepcopy(ds)
ds_filtered.qcfilter.datafilter(rm_assessments='Bad', del_qc_var=True)
assert sorted(list(ds_filtered.data_vars)) == data_var_names
ds.close()
del ds
def test_qc_remainder():
ds = read_netcdf(EXAMPLE_MET1)
assert ds.clean.get_attr_info(variable='bad_name') is None
del ds.attrs['qc_bit_comment']
assert isinstance(ds.clean.get_attr_info(), dict)
ds.attrs['qc_flag_comment'] = 'testing'
ds.close()
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup(normalize_assessment=True)
ds['qc_atmos_pressure'].attrs['units'] = 'testing'
del ds['qc_temp_mean'].attrs['units']
del ds['qc_temp_mean'].attrs['flag_masks']
ds.clean.handle_missing_values()
ds.close()
ds = read_netcdf(EXAMPLE_MET1)
ds.attrs['qc_bit_1_comment'] = 'tesing'
data = ds['qc_atmos_pressure'].values.astype(np.int64)
data[0] = 2**32
ds['qc_atmos_pressure'].values = data
ds.clean.get_attr_info(variable='qc_atmos_pressure')
ds.clean.clean_arm_state_variables('testname')
ds.clean.cleanup()
ds['qc_atmos_pressure'].attrs['standard_name'] = 'wrong_name'
ds.clean.link_variables()
assert ds['qc_atmos_pressure'].attrs['standard_name'] == 'quality_flag'
ds.close()
def test_qc_flag_description():
"""
This will check if the cleanup() method will correctly convert convert
flag_#_description to CF flag_masks and flag_meanings.
"""
ds = read_netcdf(EXAMPLE_CO2FLX4M)
ds.clean.cleanup()
qc_var_name = ds.qcfilter.check_for_ancillary_qc(
'momentum_flux', add_if_missing=False, cleanup=False
)
assert isinstance(ds[qc_var_name].attrs['flag_masks'], list)
assert isinstance(ds[qc_var_name].attrs['flag_meanings'], list)
assert isinstance(ds[qc_var_name].attrs['flag_assessments'], list)
assert ds[qc_var_name].attrs['standard_name'] == 'quality_flag'
assert len(ds[qc_var_name].attrs['flag_masks']) == 9
unique_flag_assessments = list({'Acceptable', 'Indeterminate', 'Bad'})
assert list(set(ds[qc_var_name].attrs['flag_assessments'])) == unique_flag_assessments
def test_clean():
# Read test data
ceil_ds = read_netcdf([EXAMPLE_CEIL1])
# Cleanup QC data
ceil_ds.clean.cleanup(clean_arm_state_vars=['detection_status'])
# Check that global attribures are removed
global_attributes = [
'qc_bit_comment',
'qc_bit_1_description',
'qc_bit_1_assessment',
'qc_bit_2_description',
'qc_bit_2_assessment' 'qc_bit_3_description',
'qc_bit_3_assessment',
]
for glb_att in global_attributes:
assert glb_att not in ceil_ds.attrs.keys()
# Check that CF attributes are set including new flag_assessments
var_name = 'qc_first_cbh'
for attr_name in ['flag_masks', 'flag_meanings', 'flag_assessments']:
assert attr_name in ceil_ds[var_name].attrs.keys()
assert isinstance(ceil_ds[var_name].attrs[attr_name], list)
# Check that the flag_mask values are set correctly
assert ceil_ds['qc_first_cbh'].attrs['flag_masks'] == [1, 2, 4]
# Check that the flag_meanings values are set correctly
assert ceil_ds['qc_first_cbh'].attrs['flag_meanings'] == [
'Value is equal to missing_value.',
'Value is less than the fail_min.',
'Value is greater than the fail_max.',
]
# Check the value of flag_assessments is as expected
assert ceil_ds['qc_first_cbh'].attrs['flag_assessments'] == ['Bad', 'Bad', 'Bad']
# Check that ancillary varibles is being added
assert 'qc_first_cbh' in ceil_ds['first_cbh'].attrs['ancillary_variables'].split()
# Check that state field is updated to CF
assert 'flag_values' in ceil_ds['detection_status'].attrs.keys()
assert isinstance(ceil_ds['detection_status'].attrs['flag_values'], list)
assert ceil_ds['detection_status'].attrs['flag_values'] == [0, 1, 2, 3, 4, 5]
assert 'flag_meanings' in ceil_ds['detection_status'].attrs.keys()
assert isinstance(ceil_ds['detection_status'].attrs['flag_meanings'], list)
assert ceil_ds['detection_status'].attrs['flag_meanings'] == [
'No significant backscatter',
'One cloud base detected',
'Two cloud bases detected',
'Three cloud bases detected',
'Full obscuration determined but no cloud base detected',
'Some obscuration detected but determined to be transparent',
]
assert 'flag_0_description' not in ceil_ds['detection_status'].attrs.keys()
assert 'detection_status' in ceil_ds['first_cbh'].attrs['ancillary_variables'].split()
ceil_ds.close()
def test_compare_time_series_trends():
drop_vars = [
'base_time',
'time_offset',
'atmos_pressure',
'qc_atmos_pressure',
'temp_std',
'rh_mean',
'qc_rh_mean',
'rh_std',
'vapor_pressure_mean',
'qc_vapor_pressure_mean',
'vapor_pressure_std',
'wspd_arith_mean',
'qc_wspd_arith_mean',
'wspd_vec_mean',
'qc_wspd_vec_mean',
'wdir_vec_mean',
'qc_wdir_vec_mean',
'wdir_vec_std',
'tbrg_precip_total',
'qc_tbrg_precip_total',
'tbrg_precip_total_corr',
'qc_tbrg_precip_total_corr',
'org_precip_rate_mean',
'qc_org_precip_rate_mean',
'pwd_err_code',
'pwd_mean_vis_1min',
'qc_pwd_mean_vis_1min',
'pwd_mean_vis_10min',
'qc_pwd_mean_vis_10min',
'pwd_pw_code_inst',
'qc_pwd_pw_code_inst',
'pwd_pw_code_15min',
'qc_pwd_pw_code_15min',
'pwd_pw_code_1hr',
'qc_pwd_pw_code_1hr',
'pwd_precip_rate_mean_1min',
'qc_pwd_precip_rate_mean_1min',
'pwd_cumul_rain',
'qc_pwd_cumul_rain',
'pwd_cumul_snow',
'qc_pwd_cumul_snow',
'logger_volt',
'qc_logger_volt',
'logger_temp',
'qc_logger_temp',
'lat',
'lon',
'alt',
]
ds = read_netcdf(EXAMPLE_MET1, drop_variables=drop_vars)
ds.clean.cleanup()
ds2 = copy.deepcopy(ds)
var_name = 'temp_mean'
qc_var_name = ds.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False, cleanup=False, flag_type=False
)
ds.qcfilter.compare_time_series_trends(
var_name=var_name,
time_shift=60,
comp_var_name=var_name,
comp_dataset=ds2,
time_qc_threshold=60 * 10,
)
test_description = (
'Time shift detected with Minimum Difference test. Comparison of '
'temp_mean with temp_mean off by 0 seconds exceeding absolute '
'threshold of 600 seconds.'
)
assert ds[qc_var_name].attrs['flag_meanings'][-1] == test_description
time = ds2['time'].values + np.timedelta64(1, 'h')
time_attrs = ds2['time'].attrs
ds2 = ds2.assign_coords({'time': time})
ds2['time'].attrs = time_attrs
ds.qcfilter.compare_time_series_trends(
var_name=var_name, comp_dataset=ds2, time_step=60, time_match_threshhold=50
)
test_description = (
'Time shift detected with Minimum Difference test. Comparison of '
'temp_mean with temp_mean off by 3600 seconds exceeding absolute '
'threshold of 900 seconds.'
)
assert ds[qc_var_name].attrs['flag_meanings'][-1] == test_description
def test_qc_data_type():
drop_vars = [
'base_time',
'time_offset',
'inst_up_long_case_resist',
'inst_up_long_hemisp_tp',
'inst_up_short_hemisp_tp',
'inst_sfc_ir_temp',
'lat',
'lon',
'alt',
]
ds_object = read_netcdf(EXAMPLE_IRT25m20s, drop_variables=drop_vars)
var_name = 'inst_up_long_dome_resist'
expected_qc_var_name = 'qc_' + var_name
ds_object.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=True)
del ds_object[expected_qc_var_name].attrs['flag_meanings']
del ds_object[expected_qc_var_name].attrs['flag_assessments']
ds_object[expected_qc_var_name] = ds_object[expected_qc_var_name].astype(np.int8)
ds_object.qcfilter.add_test(var_name, index=[1], test_number=9, test_meaning='First test')
assert ds_object[expected_qc_var_name].attrs['flag_masks'][0].dtype == np.uint32
assert ds_object[expected_qc_var_name].dtype == np.int16
ds_object.qcfilter.add_test(var_name, index=[1], test_number=17, test_meaning='Second test')
assert ds_object[expected_qc_var_name].dtype == np.int32
ds_object.qcfilter.add_test(var_name, index=[1], test_number=33, test_meaning='Third test')
assert ds_object[expected_qc_var_name].dtype == np.int64
assert ds_object[expected_qc_var_name].attrs['flag_masks'][0].dtype == np.uint64
ds_object.qcfilter.add_test(var_name, index=[1], test_meaning='Fourth test', recycle=True)
def test_qc_speed():
"""
This tests the speed of the QC module to ensure changes do not significantly
slow down the module's processing.
"""
n_variables = 100
n_samples = 100
time = pd.date_range(start='2022-02-17 00:00:00', end='2022-02-18 00:00:00', periods=n_samples)
# Create data variables with random noise
np.random.seed(42)
noisy_data_mapping = {f'data_var_{i}': np.random.random(time.shape) for i in range(n_variables)}
ds = xr.Dataset(
data_vars={name: ('time', data) for name, data in noisy_data_mapping.items()},
coords={'time': time},
)
start = datetime.utcnow()
for name, var in noisy_data_mapping.items():
failed_qc = var > 0.75 # Consider data above 0.75 as bad. Negligible time here.
ds.qcfilter.add_test(name, index=failed_qc, test_meaning='Value above threshold')
time_diff = datetime.utcnow() - start
assert time_diff.seconds <= 3
@pytest.mark.skipif(not PYSP2_AVAILABLE, reason="PySP2 is not installed.")
def test_sp2_particle_config():
particle_config_object = SP2ParticleCriteria()
assert particle_config_object.ScatMaxPeakHt1 == 60000
assert particle_config_object.ScatMinPeakHt1 == 250
assert particle_config_object.ScatMaxPeakHt2 == 60000
assert particle_config_object.ScatMinPeakHt2 == 250
assert particle_config_object.ScatMinWidth == 10
assert particle_config_object.ScatMaxWidth == 90
assert particle_config_object.ScatMinPeakPos == 20
assert particle_config_object.ScatMaxPeakPos == 90
assert particle_config_object.IncanMinPeakHt1 == 200
assert particle_config_object.IncanMinPeakHt2 == 200
assert particle_config_object.IncanMaxPeakHt1 == 60000
assert particle_config_object.IncanMaxPeakHt2 == 60000
assert particle_config_object.IncanMinWidth == 5
assert particle_config_object.IncanMaxWidth == np.inf
assert particle_config_object.IncanMinPeakPos == 20
assert particle_config_object.IncanMaxPeakPos == 90
assert particle_config_object.IncanMinPeakRatio == 0.1
assert particle_config_object.IncanMaxPeakRatio == 25
assert particle_config_object.IncanMaxPeakOffset == 11
assert particle_config_object.c0Mass1 == 0
assert particle_config_object.c1Mass1 == 0.0001896
assert particle_config_object.c2Mass1 == 0
assert particle_config_object.c3Mass1 == 0
assert particle_config_object.c0Mass2 == 0
assert particle_config_object.c1Mass2 == 0.0016815
assert particle_config_object.c2Mass2 == 0
assert particle_config_object.c3Mass2 == 0
assert particle_config_object.c0Scat1 == 0
assert particle_config_object.c1Scat1 == 78.141
assert particle_config_object.c2Scat1 == 0
assert particle_config_object.c0Scat2 == 0
assert particle_config_object.c1Scat2 == 752.53
assert particle_config_object.c2Scat2 == 0
assert particle_config_object.densitySO4 == 1.8
assert particle_config_object.densityBC == 1.8
assert particle_config_object.TempSTP == 273.15
assert particle_config_object.PressSTP == 1013.25
def test_bsrn_limits_test():
for use_dask in [False, True]:
ds_object = read_netcdf(EXAMPLE_BRS)
var_names = list(ds_object.data_vars)
# Remove QC variables to make testing easier
for var_name in var_names:
if var_name.startswith('qc_'):
del ds_object[var_name]
# Add atmospheric temperature fake data
ds_object['temp_mean'] = xr.DataArray(
data=np.full(ds_object.time.size, 13.5), dims=['time'],
attrs={'long_name': 'Atmospheric air temperature', 'units': 'degC'})
# Make a short direct variable since BRS does not have one
ds_object['short_direct'] = copy.deepcopy(ds_object['short_direct_normal'])
ds_object['short_direct'].attrs['ancillary_variables'] = 'qc_short_direct'
ds_object['short_direct'].attrs['long_name'] = 'Shortwave direct irradiance, pyrheliometer'
sza, Sa = _calculate_solar_parameters(ds_object, 'lat', 'lon', 1360.8)
ds_object['short_direct'].data = ds_object['short_direct'].data * .5
# Make up long variable since BRS does not have values
ds_object['up_long_hemisp'].data = copy.deepcopy(ds_object['down_long_hemisp_shaded'].data)
data = copy.deepcopy(ds_object['down_short_hemisp'].data)
ds_object['up_short_hemisp'].data = data
# Test that nothing happens when no variable names are provided
ds_object.qcfilter.bsrn_limits_test()
# Mess with data to get tests to trip
data = ds_object['down_short_hemisp'].data
data[200:300] = data[200:300] - 10
data[800:850] += 330
data[1340:1380] += 600
ds_object['down_short_hemisp'].data = data
data = ds_object['down_short_diffuse_hemisp'].data
data[200:250] = data[200:250] - 1.9
data[250:300] = data[250:300] - 3.9
data[800:850] += 330
data[1340:1380] += 600
ds_object['down_short_diffuse_hemisp'].data = data
data = ds_object['short_direct_normal'].data
data[200:250] = data[200:250] - 1.9
data[250:300] = data[250:300] - 3.9
data[800:850] += 600
data[1340:1380] += 800
ds_object['short_direct_normal'].data = data
data = ds_object['short_direct'].data
data[200:250] = data[200:250] - 1.9
data[250:300] = data[250:300] - 3.9
data[800:850] += 300
data[1340:1380] += 800
ds_object['short_direct'].data = data
data = ds_object['down_long_hemisp_shaded'].data
data[200:250] = data[200:250] - 355
data[250:300] = data[250:300] - 400
data[800:850] += 200
data[1340:1380] += 400
ds_object['down_long_hemisp_shaded'].data = data
data = ds_object['up_long_hemisp'].data
data[200:250] = data[200:250] - 355
data[250:300] = data[250:300] - 400
data[800:850] += 300
data[1340:1380] += 500
ds_object['up_long_hemisp'].data = data
ds_object.qcfilter.bsrn_limits_test(
gbl_SW_dn_name='down_short_hemisp',
glb_diffuse_SW_dn_name='down_short_diffuse_hemisp',
direct_normal_SW_dn_name='short_direct_normal',
glb_SW_up_name='up_short_hemisp',
glb_LW_dn_name='down_long_hemisp_shaded',
glb_LW_up_name='up_long_hemisp',
direct_SW_dn_name='short_direct',
use_dask=use_dask)
assert ds_object['qc_down_short_hemisp'].attrs['flag_masks'] == [1, 2]
assert ds_object['qc_down_short_hemisp'].attrs['flag_meanings'][-2] == \
'Value less than BSRN physically possible limit of -4.0 W/m^2'
assert ds_object['qc_down_short_hemisp'].attrs['flag_meanings'][-1] == \
'Value greater than BSRN physically possible limit'
assert ds_object['qc_down_short_diffuse_hemisp'].attrs['flag_masks'] == [1, 2]
assert ds_object['qc_down_short_diffuse_hemisp'].attrs['flag_assessments'] == ['Bad', 'Bad']
assert ds_object['qc_short_direct'].attrs['flag_masks'] == [1, 2]
assert ds_object['qc_short_direct'].attrs['flag_assessments'] == ['Bad', 'Bad']
assert ds_object['qc_short_direct'].attrs['flag_meanings'] == \
['Value less than BSRN physically possible limit of -4.0 W/m^2',
'Value greater than BSRN physically possible limit']
assert ds_object['qc_short_direct_normal'].attrs['flag_masks'] == [1, 2]
assert ds_object['qc_short_direct_normal'].attrs['flag_meanings'][-1] == \
'Value greater than BSRN physically possible limit'
assert ds_object['qc_down_short_hemisp'].attrs['flag_masks'] == [1, 2]
assert ds_object['qc_down_short_hemisp'].attrs['flag_meanings'][-1] == \
'Value greater than BSRN physically possible limit'
assert ds_object['qc_up_short_hemisp'].attrs['flag_masks'] == [1, 2]
assert ds_object['qc_up_short_hemisp'].attrs['flag_meanings'][-1] == \
'Value greater than BSRN physically possible limit'
assert ds_object['qc_up_long_hemisp'].attrs['flag_masks'] == [1, 2]
assert ds_object['qc_up_long_hemisp'].attrs['flag_meanings'][-1] == \
'Value greater than BSRN physically possible limit of 900.0 W/m^2'
ds_object.qcfilter.bsrn_limits_test(
test="Extremely Rare",
gbl_SW_dn_name='down_short_hemisp',
glb_diffuse_SW_dn_name='down_short_diffuse_hemisp',
direct_normal_SW_dn_name='short_direct_normal',
glb_SW_up_name='up_short_hemisp',
glb_LW_dn_name='down_long_hemisp_shaded',
glb_LW_up_name='up_long_hemisp',
direct_SW_dn_name='short_direct',
use_dask=use_dask)
assert ds_object['qc_down_short_hemisp'].attrs['flag_masks'] == [1, 2, 4, 8]
assert ds_object['qc_down_short_diffuse_hemisp'].attrs['flag_masks'] == [1, 2, 4, 8]
assert ds_object['qc_short_direct'].attrs['flag_masks'] == [1, 2, 4, 8]
assert ds_object['qc_short_direct_normal'].attrs['flag_masks'] == [1, 2, 4, 8]
assert ds_object['qc_up_short_hemisp'].attrs['flag_masks'] == [1, 2, 4, 8]
assert ds_object['qc_up_long_hemisp'].attrs['flag_masks'] == [1, 2, 4, 8]
assert ds_object['qc_up_long_hemisp'].attrs['flag_meanings'][-1] == \
'Value greater than BSRN extremely rare limit of 700.0 W/m^2'
assert ds_object['qc_down_long_hemisp_shaded'].attrs['flag_meanings'][-1] == \
'Value greater than BSRN extremely rare limit of 500.0 W/m^2'
# down_short_hemisp
result = ds_object.qcfilter.get_qc_test_mask('down_short_hemisp', test_number=1)
assert np.sum(result) == 100
result = ds_object.qcfilter.get_qc_test_mask('down_short_hemisp', test_number=2)
assert np.sum(result) == 26
result = ds_object.qcfilter.get_qc_test_mask('down_short_hemisp', test_number=3)
assert np.sum(result) == 337
result = ds_object.qcfilter.get_qc_test_mask('down_short_hemisp', test_number=4)
assert np.sum(result) == 66
# down_short_diffuse_hemisp
result = ds_object.qcfilter.get_qc_test_mask('down_short_diffuse_hemisp', test_number=1)
assert np.sum(result) == 50
result = ds_object.qcfilter.get_qc_test_mask('down_short_diffuse_hemisp', test_number=2)
assert np.sum(result) == 56
result = ds_object.qcfilter.get_qc_test_mask('down_short_diffuse_hemisp', test_number=3)
assert np.sum(result) == 100
result = ds_object.qcfilter.get_qc_test_mask('down_short_diffuse_hemisp', test_number=4)
assert np.sum(result) == 90
# short_direct_normal
result = ds_object.qcfilter.get_qc_test_mask('short_direct_normal', test_number=1)
assert np.sum(result) == 46
result = ds_object.qcfilter.get_qc_test_mask('short_direct_normal', test_number=2)
assert np.sum(result) == 26
result = ds_object.qcfilter.get_qc_test_mask('short_direct_normal', test_number=3)
assert np.sum(result) == 94
result = ds_object.qcfilter.get_qc_test_mask('short_direct_normal', test_number=4)
assert np.sum(result) == 38
# short_direct_normal
result = ds_object.qcfilter.get_qc_test_mask('short_direct', test_number=1)
assert np.sum(result) == 41
result = ds_object.qcfilter.get_qc_test_mask('short_direct', test_number=2)
assert np.sum(result) == 607
result = ds_object.qcfilter.get_qc_test_mask('short_direct', test_number=3)
assert np.sum(result) == 89
result = ds_object.qcfilter.get_qc_test_mask('short_direct', test_number=4)
assert np.sum(result) == 79
# down_long_hemisp_shaded
result = ds_object.qcfilter.get_qc_test_mask('down_long_hemisp_shaded', test_number=1)
assert np.sum(result) == 50
result = ds_object.qcfilter.get_qc_test_mask('down_long_hemisp_shaded', test_number=2)
assert np.sum(result) == 40
result = ds_object.qcfilter.get_qc_test_mask('down_long_hemisp_shaded', test_number=3)
assert np.sum(result) == 89
result = ds_object.qcfilter.get_qc_test_mask('down_long_hemisp_shaded', test_number=4)
assert np.sum(result) == 90
# up_long_hemisp
result = ds_object.qcfilter.get_qc_test_mask('up_long_hemisp', test_number=1)
assert np.sum(result) == 50
result = ds_object.qcfilter.get_qc_test_mask('up_long_hemisp', test_number=2)
assert np.sum(result) == 40
result = ds_object.qcfilter.get_qc_test_mask('up_long_hemisp', test_number=3)
assert np.sum(result) == 89
result = ds_object.qcfilter.get_qc_test_mask('up_long_hemisp', test_number=4)
assert np.sum(result) == 90
# Change data values to trip tests
ds_object['down_short_diffuse_hemisp'].data[0:100] = \
ds_object['down_short_diffuse_hemisp'].data[0:100] + 100
ds_object['up_long_hemisp'].data[0:100] = \
ds_object['up_long_hemisp'].data[0:100] - 200
ds_object.qcfilter.bsrn_comparison_tests(
['Global over Sum SW Ratio', 'Diffuse Ratio', 'SW up', 'LW down to air temp',
'LW up to air temp', 'LW down to LW up'],
gbl_SW_dn_name='down_short_hemisp',
glb_diffuse_SW_dn_name='down_short_diffuse_hemisp',
direct_normal_SW_dn_name='short_direct_normal',
glb_SW_up_name='up_short_hemisp',
glb_LW_dn_name='down_long_hemisp_shaded',
glb_LW_up_name='up_long_hemisp',
air_temp_name='temp_mean',
test_assessment='Indeterminate',
lat_name='lat',
lon_name='lon',
use_dask=use_dask
)
# Ratio of Global over Sum SW
result = ds_object.qcfilter.get_qc_test_mask('down_short_hemisp', test_number=5)
assert np.sum(result) == 190
# Diffuse Ratio
result = ds_object.qcfilter.get_qc_test_mask('down_short_hemisp', test_number=6)
assert np.sum(result) == 47
# Shortwave up comparison
result = ds_object.qcfilter.get_qc_test_mask('up_short_hemisp', test_number=5)
assert np.sum(result) == 226
# Longwave up to air temperature comparison
result = ds_object.qcfilter.get_qc_test_mask('up_long_hemisp', test_number=5)
assert np.sum(result) == 290
# Longwave down to air temperature compaison
result = ds_object.qcfilter.get_qc_test_mask('down_long_hemisp_shaded', test_number=5)
assert np.sum(result) == 976
# Lonwave down to longwave up comparison
result = ds_object.qcfilter.get_qc_test_mask('down_long_hemisp_shaded', test_number=6)
assert np.sum(result) == 100
def test_add_atmospheric_pressure_test():
ds_object = read_netcdf(EXAMPLE_MET1, cleanup_qc=True)
ds_object.load()
variable = 'atmos_pressure'
qc_varialbe = 'qc_' + variable
data = ds_object[variable].values
data[200:250] = data[200:250] + 5
data[500:550] = data[500:550] - 4.6
ds_object[variable].values = data
result = ds_object.qcfilter.add_atmospheric_pressure_test(variable)
assert isinstance(result, dict)
assert np.sum(ds_object[qc_varialbe].values) == 1600
del ds_object[qc_varialbe]
ds_object.qcfilter.add_atmospheric_pressure_test(variable, use_dask=True)
assert np.sum(ds_object[qc_varialbe].values) == 100
ds_object.close
del ds_object
def test_read_yaml_supplemental_qc():
ds_object = read_netcdf(EXAMPLE_MET1, keep_variables=['temp_mean', 'qc_temp_mean'], cleanup_qc=True)
result = read_yaml_supplemental_qc(ds_object, EXAMPLE_MET_YAML)
assert isinstance(result, dict)
assert len(result.keys()) == 3
result = read_yaml_supplemental_qc(ds_object, Path(EXAMPLE_MET_YAML).parent, variables='temp_mean',
assessments=['Bad', 'Incorrect', 'Suspect'])
assert len(result.keys()) == 2
assert sorted(result['temp_mean'].keys()) == ['Bad', 'Suspect']
result = read_yaml_supplemental_qc(ds_object, 'sgpmetE13.b1.yaml', quiet=True)
assert result is None
apply_supplemental_qc(ds_object, EXAMPLE_MET_YAML)
assert ds_object['qc_temp_mean'].attrs['flag_masks'] == [1, 2, 4, 8, 16, 32, 64, 128, 256]
assert ds_object['qc_temp_mean'].attrs['flag_assessments'] == [
'Bad', 'Bad', 'Bad', 'Indeterminate', 'Bad', 'Bad', 'Suspect', 'Good', 'Bad']
assert ds_object['qc_temp_mean'].attrs['flag_meanings'][0] == 'Value is equal to missing_value.'
assert ds_object['qc_temp_mean'].attrs['flag_meanings'][-1] == 'Values are bad for all'
assert ds_object['qc_temp_mean'].attrs['flag_meanings'][-2] == 'Values are good'
assert np.sum(ds_object['qc_temp_mean'].values) == 81344
assert np.count_nonzero(ds_object['qc_temp_mean'].values) == 1423
del ds_object
ds_object = read_netcdf(EXAMPLE_MET1, keep_variables=['temp_mean', 'qc_temp_mean'], cleanup_qc=True)
apply_supplemental_qc(ds_object, Path(EXAMPLE_MET_YAML).parent, apply_all=False)
assert ds_object['qc_temp_mean'].attrs['flag_masks'] == [1, 2, 4, 8, 16, 32, 64, 128]
ds_object = read_netcdf(EXAMPLE_MET1, cleanup_qc=True)
apply_supplemental_qc(ds_object, Path(EXAMPLE_MET_YAML).parent, exclude_all_variables='temp_mean')
assert ds_object['qc_rh_mean'].attrs['flag_masks'] == [1, 2, 4, 8, 16, 32, 64, 128]
assert 'Values are bad for all' in ds_object['qc_rh_mean'].attrs['flag_meanings']
assert 'Values are bad for all' not in ds_object['qc_temp_mean'].attrs['flag_meanings']
del ds_object
ds_object = read_netcdf(EXAMPLE_MET1, keep_variables=['temp_mean', 'rh_mean'])
apply_supplemental_qc(ds_object, Path(EXAMPLE_MET_YAML).parent, exclude_all_variables='temp_mean',
assessments='Bad', quiet=True)
assert ds_object['qc_rh_mean'].attrs['flag_assessments'] == ['Bad']
assert ds_object['qc_temp_mean'].attrs['flag_assessments'] == ['Bad', 'Bad']
assert np.sum(ds_object['qc_rh_mean'].values) == 124
assert np.sum(ds_object['qc_temp_mean'].values) == 2840
del ds_object
| [
"numpy.ma.count_masked",
"numpy.testing.assert_raises",
"numpy.count_nonzero",
"numpy.array",
"act.qc.arm.add_dqr_to_qc",
"copy.deepcopy",
"act.io.armfiles.read_netcdf",
"pandas.date_range",
"pathlib.Path",
"numpy.random.random",
"numpy.random.seed",
"pytest.mark.skipif",
"act.qc.radiometer_... | [((41515, 41588), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not PYSP2_AVAILABLE)'], {'reason': '"""PySP2 is not installed."""'}), "(not PYSP2_AVAILABLE, reason='PySP2 is not installed.')\n", (41533, 41588), False, 'import pytest\n'), ((786, 812), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MFRSR'], {}), '(EXAMPLE_MFRSR)\n', (797, 812), False, 'from act.io.armfiles import read_netcdf\n'), ((847, 868), 'act.qc.radiometer_tests.fft_shading_test', 'fft_shading_test', (['obj'], {}), '(obj)\n', (863, 868), False, 'from act.qc.radiometer_tests import fft_shading_test\n'), ((1020, 1045), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {}), '(EXAMPLE_MET1)\n', (1031, 1045), False, 'from act.io.armfiles import read_netcdf\n'), ((2093, 2118), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {}), '(EXAMPLE_MET1)\n', (2104, 2118), False, 'from act.io.armfiles import read_netcdf\n'), ((2647, 2674), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_METE40'], {}), '(EXAMPLE_METE40)\n', (2658, 2674), False, 'from act.io.armfiles import read_netcdf\n'), ((4331, 4361), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_IRT25m20s'], {}), '(EXAMPLE_IRT25m20s)\n', (4342, 4361), False, 'from act.io.armfiles import read_netcdf\n'), ((7294, 7351), 'numpy.array', 'np.array', (["ds_object['qc_' + var_name].attrs['flag_masks']"], {}), "(ds_object['qc_' + var_name].attrs['flag_masks'])\n", (7302, 7351), True, 'import numpy as np\n'), ((7436, 7500), 'pytest.raises', 'pytest.raises', (['ValueError', 'ds_object.qcfilter.add_test', 'var_name'], {}), '(ValueError, ds_object.qcfilter.add_test, var_name)\n', (7449, 7500), False, 'import pytest\n'), ((7505, 7572), 'pytest.raises', 'pytest.raises', (['ValueError', 'ds_object.qcfilter.remove_test', 'var_name'], {}), '(ValueError, ds_object.qcfilter.remove_test, var_name)\n', (7518, 7572), False, 'import pytest\n'), ((7673, 7717), 'pytest.raises', 'pytest.raises', (['ValueError', 'parse_bit', '[1, 2]'], {}), '(ValueError, parse_bit, [1, 2])\n', (7686, 7717), False, 'import pytest\n'), ((7722, 7762), 'pytest.raises', 'pytest.raises', (['ValueError', 'parse_bit', '(-1)'], {}), '(ValueError, parse_bit, -1)\n', (7735, 7762), False, 'import pytest\n'), ((8083, 8113), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_IRT25m20s'], {}), '(EXAMPLE_IRT25m20s)\n', (8094, 8113), False, 'from act.io.armfiles import read_netcdf\n'), ((11301, 11331), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_IRT25m20s'], {}), '(EXAMPLE_IRT25m20s)\n', (11312, 11331), False, 'from act.io.armfiles import read_netcdf\n'), ((13339, 13369), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_IRT25m20s'], {}), '(EXAMPLE_IRT25m20s)\n', (13350, 13369), False, 'from act.io.armfiles import read_netcdf\n'), ((14647, 14677), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_IRT25m20s'], {}), '(EXAMPLE_IRT25m20s)\n', (14658, 14677), False, 'from act.io.armfiles import read_netcdf\n'), ((14874, 14893), 'dask.array.from_array', 'da.from_array', (['data'], {}), '(data)\n', (14887, 14893), True, 'import dask.array as da\n'), ((15952, 16037), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['fail_min']", 'limit_value'], {}), "(ds_object[result['qc_variable_name']].attrs['fail_min'], limit_value\n )\n", (15962, 16037), True, 'import numpy as np\n'), ((17242, 17327), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['fail_max']", 'limit_value'], {}), "(ds_object[result['qc_variable_name']].attrs['fail_max'], limit_value\n )\n", (17252, 17327), True, 'import numpy as np\n'), ((18584, 18669), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['warn_min']", 'limit_value'], {}), "(ds_object[result['qc_variable_name']].attrs['warn_min'], limit_value\n )\n", (18594, 18669), True, 'import numpy as np\n'), ((19984, 20069), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['warn_max']", 'limit_value'], {}), "(ds_object[result['qc_variable_name']].attrs['warn_max'], limit_value\n )\n", (19994, 20069), True, 'import numpy as np\n'), ((21263, 21352), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['fail_equal_to']", 'limit_value'], {}), "(ds_object[result['qc_variable_name']].attrs['fail_equal_to'],\n limit_value)\n", (21273, 21352), True, 'import numpy as np\n'), ((22667, 22760), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['warn_not_equal_to']", 'limit_value'], {}), "(ds_object[result['qc_variable_name']].attrs['warn_not_equal_to'],\n limit_value)\n", (22677, 22760), True, 'import numpy as np\n'), ((24069, 24162), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['fail_lower_range']", 'limit_value1'], {}), "(ds_object[result['qc_variable_name']].attrs['fail_lower_range'],\n limit_value1)\n", (24079, 24162), True, 'import numpy as np\n'), ((24410, 24503), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['fail_upper_range']", 'limit_value2'], {}), "(ds_object[result['qc_variable_name']].attrs['fail_upper_range'],\n limit_value2)\n", (24420, 24503), True, 'import numpy as np\n'), ((26192, 26292), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['fail_lower_range_inner']", 'limit_value1'], {}), "(ds_object[result['qc_variable_name']].attrs[\n 'fail_lower_range_inner'], limit_value1)\n", (26202, 26292), True, 'import numpy as np\n'), ((26574, 26674), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['fail_upper_range_inner']", 'limit_value2'], {}), "(ds_object[result['qc_variable_name']].attrs[\n 'fail_upper_range_inner'], limit_value2)\n", (26584, 26674), True, 'import numpy as np\n'), ((28080, 28165), 'numpy.isclose', 'np.isclose', (["ds_object[result['qc_variable_name']].attrs['warn_delta']", 'test_limit'], {}), "(ds_object[result['qc_variable_name']].attrs['warn_delta'],\n test_limit)\n", (28090, 28165), True, 'import numpy as np\n'), ((28488, 28518), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_IRT25m20s'], {}), '(EXAMPLE_IRT25m20s)\n', (28499, 28518), False, 'from act.io.armfiles import read_netcdf\n'), ((29439, 29469), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_IRT25m20s'], {}), '(EXAMPLE_IRT25m20s)\n', (29450, 29469), False, 'from act.io.armfiles import read_netcdf\n'), ((30552, 30622), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {'drop_variables': "['base_time', 'time_offset']"}), "(EXAMPLE_MET1, drop_variables=['base_time', 'time_offset'])\n", (30563, 30622), False, 'from act.io.armfiles import read_netcdf\n'), ((31034, 31051), 'copy.deepcopy', 'copy.deepcopy', (['ds'], {}), '(ds)\n', (31047, 31051), False, 'import copy\n'), ((31169, 31220), 'numpy.isclose', 'np.isclose', (['ds_1[var_name].values', '(98.86)'], {'atol': '(0.01)'}), '(ds_1[var_name].values, 98.86, atol=0.01)\n', (31179, 31220), True, 'import numpy as np\n'), ((31232, 31283), 'numpy.isclose', 'np.isclose', (['ds_2[var_name].values', '(99.15)'], {'atol': '(0.01)'}), '(ds_2[var_name].values, 99.15, atol=0.01)\n', (31242, 31283), True, 'import numpy as np\n'), ((31361, 31378), 'copy.deepcopy', 'copy.deepcopy', (['ds'], {}), '(ds)\n', (31374, 31378), False, 'import copy\n'), ((31498, 31549), 'numpy.isclose', 'np.isclose', (['ds_2[var_name].values', '(99.15)'], {'atol': '(0.01)'}), '(ds_2[var_name].values, 99.15, atol=0.01)\n', (31508, 31549), True, 'import numpy as np\n'), ((31738, 31755), 'copy.deepcopy', 'copy.deepcopy', (['ds'], {}), '(ds)\n', (31751, 31755), False, 'import copy\n'), ((31959, 31984), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {}), '(EXAMPLE_MET1)\n', (31970, 31984), False, 'from act.io.armfiles import read_netcdf\n'), ((32206, 32231), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {}), '(EXAMPLE_MET1)\n', (32217, 32231), False, 'from act.io.armfiles import read_netcdf\n'), ((32486, 32511), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {}), '(EXAMPLE_MET1)\n', (32497, 32511), False, 'from act.io.armfiles import read_netcdf\n'), ((33190, 33219), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_CO2FLX4M'], {}), '(EXAMPLE_CO2FLX4M)\n', (33201, 33219), False, 'from act.io.armfiles import read_netcdf\n'), ((33916, 33944), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['[EXAMPLE_CEIL1]'], {}), '([EXAMPLE_CEIL1])\n', (33927, 33944), False, 'from act.io.armfiles import read_netcdf\n'), ((37753, 37804), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {'drop_variables': 'drop_vars'}), '(EXAMPLE_MET1, drop_variables=drop_vars)\n', (37764, 37804), False, 'from act.io.armfiles import read_netcdf\n'), ((37838, 37855), 'copy.deepcopy', 'copy.deepcopy', (['ds'], {}), '(ds)\n', (37851, 37855), False, 'import copy\n'), ((39383, 39439), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_IRT25m20s'], {'drop_variables': 'drop_vars'}), '(EXAMPLE_IRT25m20s, drop_variables=drop_vars)\n', (39394, 39439), False, 'from act.io.armfiles import read_netcdf\n'), ((40770, 40862), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2022-02-17 00:00:00"""', 'end': '"""2022-02-18 00:00:00"""', 'periods': 'n_samples'}), "(start='2022-02-17 00:00:00', end='2022-02-18 00:00:00',\n periods=n_samples)\n", (40783, 40862), True, 'import pandas as pd\n'), ((40910, 40928), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (40924, 40928), True, 'import numpy as np\n'), ((41189, 41206), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (41204, 41206), False, 'from datetime import datetime\n'), ((41650, 41671), 'act.qc.sp2.SP2ParticleCriteria', 'SP2ParticleCriteria', ([], {}), '()\n', (41669, 41671), False, 'from act.qc.sp2 import SP2ParticleCriteria, PYSP2_AVAILABLE\n'), ((55506, 55548), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {'cleanup_qc': '(True)'}), '(EXAMPLE_MET1, cleanup_qc=True)\n', (55517, 55548), False, 'from act.io.armfiles import read_netcdf\n'), ((56219, 56311), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {'keep_variables': "['temp_mean', 'qc_temp_mean']", 'cleanup_qc': '(True)'}), "(EXAMPLE_MET1, keep_variables=['temp_mean', 'qc_temp_mean'],\n cleanup_qc=True)\n", (56230, 56311), False, 'from act.io.armfiles import read_netcdf\n'), ((56322, 56376), 'act.qc.add_supplemental_qc.read_yaml_supplemental_qc', 'read_yaml_supplemental_qc', (['ds_object', 'EXAMPLE_MET_YAML'], {}), '(ds_object, EXAMPLE_MET_YAML)\n', (56347, 56376), False, 'from act.qc.add_supplemental_qc import read_yaml_supplemental_qc, apply_supplemental_qc\n'), ((56754, 56823), 'act.qc.add_supplemental_qc.read_yaml_supplemental_qc', 'read_yaml_supplemental_qc', (['ds_object', '"""sgpmetE13.b1.yaml"""'], {'quiet': '(True)'}), "(ds_object, 'sgpmetE13.b1.yaml', quiet=True)\n", (56779, 56823), False, 'from act.qc.add_supplemental_qc import read_yaml_supplemental_qc, apply_supplemental_qc\n'), ((56855, 56905), 'act.qc.add_supplemental_qc.apply_supplemental_qc', 'apply_supplemental_qc', (['ds_object', 'EXAMPLE_MET_YAML'], {}), '(ds_object, EXAMPLE_MET_YAML)\n', (56876, 56905), False, 'from act.qc.add_supplemental_qc import read_yaml_supplemental_qc, apply_supplemental_qc\n'), ((57600, 57692), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {'keep_variables': "['temp_mean', 'qc_temp_mean']", 'cleanup_qc': '(True)'}), "(EXAMPLE_MET1, keep_variables=['temp_mean', 'qc_temp_mean'],\n cleanup_qc=True)\n", (57611, 57692), False, 'from act.io.armfiles import read_netcdf\n'), ((57881, 57923), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {'cleanup_qc': '(True)'}), '(EXAMPLE_MET1, cleanup_qc=True)\n', (57892, 57923), False, 'from act.io.armfiles import read_netcdf\n'), ((58329, 58395), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_MET1'], {'keep_variables': "['temp_mean', 'rh_mean']"}), "(EXAMPLE_MET1, keep_variables=['temp_mean', 'rh_mean'])\n", (58340, 58395), False, 'from act.io.armfiles import read_netcdf\n'), ((938, 963), 'numpy.nansum', 'np.nansum', (['qc_data.values'], {}), '(qc_data.values)\n', (947, 963), True, 'import numpy as np\n'), ((2788, 2825), 'act.qc.arm.add_dqr_to_qc', 'add_dqr_to_qc', (['obj'], {'variable': 'variable'}), '(obj, variable=variable)\n', (2801, 2825), False, 'from act.qc.arm import add_dqr_to_qc\n'), ((2955, 2992), 'act.qc.arm.add_dqr_to_qc', 'add_dqr_to_qc', (['obj'], {'variable': 'variable'}), '(obj, variable=variable)\n', (2968, 2992), False, 'from act.qc.arm import add_dqr_to_qc\n'), ((3008, 3026), 'act.qc.arm.add_dqr_to_qc', 'add_dqr_to_qc', (['obj'], {}), '(obj)\n', (3021, 3026), False, 'from act.qc.arm import add_dqr_to_qc\n'), ((3035, 3095), 'act.qc.arm.add_dqr_to_qc', 'add_dqr_to_qc', (['obj'], {'variable': 'variable', 'exclude': "['D190529.4']"}), "(obj, variable=variable, exclude=['D190529.4'])\n", (3048, 3095), False, 'from act.qc.arm import add_dqr_to_qc\n'), ((3104, 3164), 'act.qc.arm.add_dqr_to_qc', 'add_dqr_to_qc', (['obj'], {'variable': 'variable', 'include': "['D400101.1']"}), "(obj, variable=variable, include=['D400101.1'])\n", (3117, 3164), False, 'from act.qc.arm import add_dqr_to_qc\n'), ((6300, 6324), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (6318, 6324), True, 'import numpy as np\n'), ((6647, 6671), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (6665, 6671), True, 'import numpy as np\n'), ((7775, 7789), 'act.qc.qcfilter.set_bit', 'set_bit', (['(0)', '(16)'], {}), '(0, 16)\n', (7782, 7789), False, 'from act.qc.qcfilter import parse_bit, set_bit, unset_bit\n'), ((8717, 8729), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (8723, 8729), True, 'import numpy as np\n'), ((8848, 8860), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (8854, 8860), True, 'import numpy as np\n'), ((11699, 11745), 'numpy.sum', 'np.sum', (['ds_object[expected_qc_var_name].values'], {}), '(ds_object[expected_qc_var_name].values)\n', (11705, 11745), True, 'import numpy as np\n'), ((12085, 12131), 'numpy.sum', 'np.sum', (['ds_object[expected_qc_var_name].values'], {}), '(ds_object[expected_qc_var_name].values)\n', (12091, 12131), True, 'import numpy as np\n'), ((12464, 12510), 'numpy.sum', 'np.sum', (['ds_object[expected_qc_var_name].values'], {}), '(ds_object[expected_qc_var_name].values)\n', (12470, 12510), True, 'import numpy as np\n'), ((12835, 12881), 'numpy.sum', 'np.sum', (['ds_object[expected_qc_var_name].values'], {}), '(ds_object[expected_qc_var_name].values)\n', (12841, 12881), True, 'import numpy as np\n'), ((13936, 13950), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (13942, 13950), True, 'import numpy as np\n'), ((14085, 14099), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (14091, 14099), True, 'import numpy as np\n'), ((14364, 14401), 'numpy.sum', 'np.sum', (['ds_object[qc_var_name].values'], {}), '(ds_object[qc_var_name].values)\n', (14370, 14401), True, 'import numpy as np\n'), ((14566, 14603), 'numpy.sum', 'np.sum', (['ds_object[qc_var_name].values'], {}), '(ds_object[qc_var_name].values)\n', (14572, 14603), True, 'import numpy as np\n'), ((15270, 15283), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (15278, 15283), True, 'import numpy as np\n'), ((15686, 15710), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (15704, 15710), True, 'import numpy as np\n'), ((16406, 16418), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (16412, 16418), True, 'import numpy as np\n'), ((16624, 16636), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (16630, 16636), True, 'import numpy as np\n'), ((16976, 17000), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (16994, 17000), True, 'import numpy as np\n'), ((17697, 17709), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (17703, 17709), True, 'import numpy as np\n'), ((17902, 17914), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (17908, 17914), True, 'import numpy as np\n'), ((18317, 18341), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (18335, 18341), True, 'import numpy as np\n'), ((19018, 19030), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (19024, 19030), True, 'import numpy as np\n'), ((19226, 19238), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (19232, 19238), True, 'import numpy as np\n'), ((19717, 19741), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (19735, 19741), True, 'import numpy as np\n'), ((20424, 20436), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (20430, 20436), True, 'import numpy as np\n'), ((20636, 20648), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (20642, 20648), True, 'import numpy as np\n'), ((20988, 21012), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (21006, 21012), True, 'import numpy as np\n'), ((21750, 21762), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (21756, 21762), True, 'import numpy as np\n'), ((21954, 21966), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (21960, 21966), True, 'import numpy as np\n'), ((22381, 22405), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (22399, 22405), True, 'import numpy as np\n'), ((23123, 23135), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (23129, 23135), True, 'import numpy as np\n'), ((23334, 23346), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (23340, 23346), True, 'import numpy as np\n'), ((23786, 23810), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (23804, 23810), True, 'import numpy as np\n'), ((25030, 25042), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (25036, 25042), True, 'import numpy as np\n'), ((25281, 25293), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (25287, 25293), True, 'import numpy as np\n'), ((25897, 25921), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (25915, 25921), True, 'import numpy as np\n'), ((27219, 27231), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (27225, 27231), True, 'import numpy as np\n'), ((27470, 27482), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (27476, 27482), True, 'import numpy as np\n'), ((27809, 27833), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (27827, 27833), True, 'import numpy as np\n'), ((28265, 28289), 'numpy.ma.count_masked', 'np.ma.count_masked', (['data'], {}), '(data)\n', (28283, 28289), True, 'import numpy as np\n'), ((28528, 28564), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (28552, 28564), True, 'import numpy as np\n'), ((28650, 28686), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (28674, 28686), True, 'import numpy as np\n'), ((30149, 30186), 'numpy.sum', 'np.sum', (['ds_object[qc_var_name].values'], {}), '(ds_object[qc_var_name].values)\n', (30155, 30186), True, 'import numpy as np\n'), ((38530, 38552), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""h"""'], {}), "(1, 'h')\n", (38544, 38552), True, 'import numpy as np\n'), ((40972, 41000), 'numpy.random.random', 'np.random.random', (['time.shape'], {}), '(time.shape)\n', (40988, 41000), True, 'import numpy as np\n'), ((41452, 41469), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (41467, 41469), False, 'from datetime import datetime\n'), ((43727, 43751), 'act.io.armfiles.read_netcdf', 'read_netcdf', (['EXAMPLE_BRS'], {}), '(EXAMPLE_BRS)\n', (43738, 43751), False, 'from act.io.armfiles import read_netcdf\n'), ((44318, 44365), 'copy.deepcopy', 'copy.deepcopy', (["ds_object['short_direct_normal']"], {}), "(ds_object['short_direct_normal'])\n", (44331, 44365), False, 'import copy\n'), ((44567, 44627), 'act.qc.bsrn_tests._calculate_solar_parameters', '_calculate_solar_parameters', (['ds_object', '"""lat"""', '"""lon"""', '(1360.8)'], {}), "(ds_object, 'lat', 'lon', 1360.8)\n", (44594, 44627), False, 'from act.qc.bsrn_tests import _calculate_solar_parameters\n'), ((44812, 44868), 'copy.deepcopy', 'copy.deepcopy', (["ds_object['down_long_hemisp_shaded'].data"], {}), "(ds_object['down_long_hemisp_shaded'].data)\n", (44825, 44868), False, 'import copy\n'), ((44884, 44934), 'copy.deepcopy', 'copy.deepcopy', (["ds_object['down_short_hemisp'].data"], {}), "(ds_object['down_short_hemisp'].data)\n", (44897, 44934), False, 'import copy\n'), ((55912, 55949), 'numpy.sum', 'np.sum', (['ds_object[qc_varialbe].values'], {}), '(ds_object[qc_varialbe].values)\n', (55918, 55949), True, 'import numpy as np\n'), ((56079, 56116), 'numpy.sum', 'np.sum', (['ds_object[qc_varialbe].values'], {}), '(ds_object[qc_varialbe].values)\n', (56085, 56116), True, 'import numpy as np\n'), ((57444, 57484), 'numpy.sum', 'np.sum', (["ds_object['qc_temp_mean'].values"], {}), "(ds_object['qc_temp_mean'].values)\n", (57450, 57484), True, 'import numpy as np\n'), ((57505, 57555), 'numpy.count_nonzero', 'np.count_nonzero', (["ds_object['qc_temp_mean'].values"], {}), "(ds_object['qc_temp_mean'].values)\n", (57521, 57555), True, 'import numpy as np\n'), ((58720, 58758), 'numpy.sum', 'np.sum', (["ds_object['qc_rh_mean'].values"], {}), "(ds_object['qc_rh_mean'].values)\n", (58726, 58758), True, 'import numpy as np\n'), ((58777, 58817), 'numpy.sum', 'np.sum', (["ds_object['qc_temp_mean'].values"], {}), "(ds_object['qc_temp_mean'].values)\n", (58783, 58817), True, 'import numpy as np\n'), ((3178, 3214), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (3202, 3214), True, 'import numpy as np\n'), ((3269, 3306), 'act.qc.arm.add_dqr_to_qc', 'add_dqr_to_qc', (['obj'], {'variable': 'variable'}), '(obj, variable=variable)\n', (3282, 3306), False, 'from act.qc.arm import add_dqr_to_qc\n'), ((6477, 6491), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (6485, 6491), True, 'import numpy as np\n'), ((7615, 7631), 'act.qc.qcfilter.parse_bit', 'parse_bit', (['[257]'], {}), '([257])\n', (7624, 7631), False, 'from act.qc.qcfilter import parse_bit, set_bit, unset_bit\n'), ((7635, 7667), 'numpy.array', 'np.array', (['[1, 9]'], {'dtype': 'np.int32'}), '([1, 9], dtype=np.int32)\n', (7643, 7667), True, 'import numpy as np\n'), ((50357, 50371), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (50363, 50371), True, 'import numpy as np\n'), ((50483, 50497), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (50489, 50497), True, 'import numpy as np\n'), ((50608, 50622), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (50614, 50622), True, 'import numpy as np\n'), ((50734, 50748), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (50740, 50748), True, 'import numpy as np\n'), ((50904, 50918), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (50910, 50918), True, 'import numpy as np\n'), ((51037, 51051), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (51043, 51051), True, 'import numpy as np\n'), ((51170, 51184), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (51176, 51184), True, 'import numpy as np\n'), ((51304, 51318), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (51310, 51318), True, 'import numpy as np\n'), ((51462, 51476), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (51468, 51476), True, 'import numpy as np\n'), ((51589, 51603), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (51595, 51603), True, 'import numpy as np\n'), ((51716, 51730), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (51722, 51730), True, 'import numpy as np\n'), ((51843, 51857), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (51849, 51857), True, 'import numpy as np\n'), ((51994, 52008), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (52000, 52008), True, 'import numpy as np\n'), ((52114, 52128), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (52120, 52128), True, 'import numpy as np\n'), ((52235, 52249), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (52241, 52249), True, 'import numpy as np\n'), ((52355, 52369), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (52361, 52369), True, 'import numpy as np\n'), ((52521, 52535), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (52527, 52535), True, 'import numpy as np\n'), ((52652, 52666), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (52658, 52666), True, 'import numpy as np\n'), ((52783, 52797), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (52789, 52797), True, 'import numpy as np\n'), ((52914, 52928), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (52920, 52928), True, 'import numpy as np\n'), ((53062, 53076), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (53068, 53076), True, 'import numpy as np\n'), ((53184, 53198), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (53190, 53198), True, 'import numpy as np\n'), ((53306, 53320), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (53312, 53320), True, 'import numpy as np\n'), ((53428, 53442), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (53434, 53442), True, 'import numpy as np\n'), ((54571, 54585), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (54577, 54585), True, 'import numpy as np\n'), ((54722, 54736), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (54728, 54736), True, 'import numpy as np\n'), ((54880, 54894), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (54886, 54894), True, 'import numpy as np\n'), ((55056, 55070), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (55062, 55070), True, 'import numpy as np\n'), ((55242, 55256), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (55248, 55256), True, 'import numpy as np\n'), ((55424, 55438), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (55430, 55438), True, 'import numpy as np\n'), ((56499, 56521), 'pathlib.Path', 'Path', (['EXAMPLE_MET_YAML'], {}), '(EXAMPLE_MET_YAML)\n', (56503, 56521), False, 'from pathlib import Path\n'), ((57726, 57748), 'pathlib.Path', 'Path', (['EXAMPLE_MET_YAML'], {}), '(EXAMPLE_MET_YAML)\n', (57730, 57748), False, 'from pathlib import Path\n'), ((57961, 57983), 'pathlib.Path', 'Path', (['EXAMPLE_MET_YAML'], {}), '(EXAMPLE_MET_YAML)\n', (57965, 57983), False, 'from pathlib import Path\n'), ((58433, 58455), 'pathlib.Path', 'Path', (['EXAMPLE_MET_YAML'], {}), '(EXAMPLE_MET_YAML)\n', (58437, 58455), False, 'from pathlib import Path\n'), ((6932, 6958), 'numpy.array', 'np.array', (['index'], {'dtype': 'int'}), '(index, dtype=int)\n', (6940, 6958), True, 'import numpy as np\n'), ((44082, 44116), 'numpy.full', 'np.full', (['ds_object.time.size', '(13.5)'], {}), '(ds_object.time.size, 13.5)\n', (44089, 44116), True, 'import numpy as np\n')] |
import random
import numpy as np
import torch
from IPython.display import display, HTML
from rdkit import rdBase
def set_seed(seed=42):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
def clear_torch(model=None):
if model:
del model
torch.cuda.empty_cache()
def disable_rdkit_log():
rdBase.DisableLog('rdApp.*')
def enable_rdkit_log():
rdBase.EnableLog('rdApp.*')
def header_str(a_str, n=80):
"""Returns a string formatted as a header."""
return '{{:=^{:d}}}'.format(n).format(' ' + a_str + ' ')
def header_html(a_str, level=1):
"""Returns a string formatted as a header."""
return display(HTML(f'<h{level}>{a_str}</h{level}>'))
def subset_list(alist, indices):
return [alist[index] for index in indices]
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"rdkit.rdBase.EnableLog",
"IPython.display.HTML",
"random.seed",
"numpy.random.seed",
"rdkit.rdBase.DisableLog",
"torch.cuda.empty_cache"
] | [((143, 166), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (160, 166), False, 'import torch\n'), ((171, 203), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (197, 203), False, 'import torch\n'), ((208, 225), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (219, 225), False, 'import random\n'), ((230, 250), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (244, 250), True, 'import numpy as np\n'), ((318, 342), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (340, 342), False, 'import torch\n'), ((374, 402), 'rdkit.rdBase.DisableLog', 'rdBase.DisableLog', (['"""rdApp.*"""'], {}), "('rdApp.*')\n", (391, 402), False, 'from rdkit import rdBase\n'), ((433, 460), 'rdkit.rdBase.EnableLog', 'rdBase.EnableLog', (['"""rdApp.*"""'], {}), "('rdApp.*')\n", (449, 460), False, 'from rdkit import rdBase\n'), ((707, 744), 'IPython.display.HTML', 'HTML', (['f"""<h{level}>{a_str}</h{level}>"""'], {}), "(f'<h{level}>{a_str}</h{level}>')\n", (711, 744), False, 'from IPython.display import display, HTML\n')] |
#deeperic
import os
import glob
import csv
import tensorflow as tf
import numpy as np
from PIL import Image
def _read_raw_images(path, is_directory=True):
"""Reads directory of images in tensorflow
Args:
path:
is_directory:
Returns:
"""
images = []
png_files = []
jpeg_files = []
reader = tf.WholeFileReader()
png_files_path = glob.glob(os.path.join(path, '*.[pP][nN][gG]'))
jpeg_files_path = glob.glob(os.path.join(path, '*.[jJ][pP][eE][gG]'))
jpg_files_path = glob.glob(os.path.join(path, '*.[jJ][pP][gG]'))
if is_directory:
for filename in png_files_path:
png_files.append(filename)
for filename in jpeg_files_path:
jpeg_files.append(filename)
for filename in jpg_files_path:
jpeg_files.append(filename)
else:
raise ValueError('Currently only batch read from directory supported')
# Decode if there is a PNG file:
if len(png_files) > 0:
png_file_queue = tf.train.string_input_producer(png_files)
pkey, pvalue = reader.read(png_file_queue)
p_img = tf.image.decode_png(pvalue)
if len(jpeg_files) > 0:
jpeg_file_queue = tf.train.string_input_producer(jpeg_files)
jkey, jvalue = reader.read(jpeg_file_queue)
j_img = tf.image.decode_jpeg(jvalue)
return # TODO: return normal thing
def read_and_decode(filename_queue, imshape, normalize=False, flatten=True):
"""Reads
Args:
filename_queue:
imshape:
normalize:
flatten:
Returns:
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)
})
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
if flatten:
num_elements = 1
for i in imshape: num_elements = num_elements * i
#print num_elements
image = tf.reshape(image, [num_elements])
image.set_shape(num_elements)
else:
image = tf.reshape(image, imshape)
image.set_shape(imshape)
if normalize:
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32)
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
# Helper, Examples
def _read_labels_csv_from(path, num_classes, one_hot=False):
"""Reads
Args:
Returns:
"""
print('Reading labels')
with open(os.path.join(path), 'r') as dest_f:
data_iter = csv.reader(dest_f)
train_labels = [data for data in data_iter]
train_labels = np.array(train_labels, dtype=np.uint32)
if one_hot:
labels_one_hot = utils.dense_to_one_hot(train_labels, num_classes)
labels_one_hot = np.asarray(labels_one_hot)
return labels_one_hot
return train_labels
def _read_pngs_from(path):
"""Reads directory of images.
Args:
path: path to the directory
Returns:
A list of all images in the directory in the TF format (You need to call sess.run() or .eval() to get the value).
"""
images = []
png_files_path = glob.glob(os.path.join(path, '*.[pP][nN][gG]'))
for filename in png_files_path:
im = Image.open(filename)
im = np.asarray(im, np.uint8)
# get only images name, not path
image_name = filename.split('/')[-1].split('.')[0]
images.append([int(image_name), im])
images = sorted(images, key=lambda image: image[0])
images_only = [np.asarray(image[1], np.uint8) for image in images] # Use unint8 or you will be !!!
images_only = np.array(images_only)
#print(images_only.shape)
return images_only
def read_and_decode_wholefile(filename_queue, imshape, normalize=False, flatten=True):
"""Reads
Args:
filename_queue:
imshape:
normalize:
flatten:
Returns:
"""
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
image = tf.image.decode_png(value, channels=3)
if flatten:
num_elements = 1
for i in imshape: num_elements = num_elements * i
#print num_elements
image = tf.reshape(image, [num_elements])
image.set_shape(num_elements)
else:
image = tf.reshape(image, imshape)
image.set_shape(imshape)
if normalize:
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32)
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# don't care
label = 1
return image, label
def dense_to_one_hot(labels_dense, num_classes):
"""
Convert class labels from scalars to one-hot vectors.
"""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
print(labels_one_hot[0])
return labels_one_hot | [
"tensorflow.image.decode_png",
"PIL.Image.open",
"tensorflow.reshape",
"tensorflow.FixedLenFeature",
"os.path.join",
"tensorflow.decode_raw",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"tensorflow.TFRecordReader",
"tensorflow.train.string_input_producer",
"tensorflow.WholeFileReader",
"t... | [((319, 339), 'tensorflow.WholeFileReader', 'tf.WholeFileReader', ([], {}), '()\n', (337, 339), True, 'import tensorflow as tf\n'), ((1480, 1499), 'tensorflow.TFRecordReader', 'tf.TFRecordReader', ([], {}), '()\n', (1497, 1499), True, 'import tensorflow as tf\n'), ((1902, 1948), 'tensorflow.decode_raw', 'tf.decode_raw', (["features['image_raw']", 'tf.uint8'], {}), "(features['image_raw'], tf.uint8)\n", (1915, 1948), True, 'import tensorflow as tf\n'), ((2460, 2496), 'tensorflow.cast', 'tf.cast', (["features['label']", 'tf.int32'], {}), "(features['label'], tf.int32)\n", (2467, 2496), True, 'import tensorflow as tf\n'), ((2815, 2854), 'numpy.array', 'np.array', (['train_labels'], {'dtype': 'np.uint32'}), '(train_labels, dtype=np.uint32)\n', (2823, 2854), True, 'import numpy as np\n'), ((3762, 3783), 'numpy.array', 'np.array', (['images_only'], {}), '(images_only)\n', (3770, 3783), True, 'import numpy as np\n'), ((4034, 4054), 'tensorflow.WholeFileReader', 'tf.WholeFileReader', ([], {}), '()\n', (4052, 4054), True, 'import tensorflow as tf\n'), ((4109, 4147), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['value'], {'channels': '(3)'}), '(value, channels=3)\n', (4128, 4147), True, 'import tensorflow as tf\n'), ((4867, 4902), 'numpy.zeros', 'np.zeros', (['(num_labels, num_classes)'], {}), '((num_labels, num_classes))\n', (4875, 4902), True, 'import numpy as np\n'), ((370, 406), 'os.path.join', 'os.path.join', (['path', '"""*.[pP][nN][gG]"""'], {}), "(path, '*.[pP][nN][gG]')\n", (382, 406), False, 'import os\n'), ((438, 478), 'os.path.join', 'os.path.join', (['path', '"""*.[jJ][pP][eE][gG]"""'], {}), "(path, '*.[jJ][pP][eE][gG]')\n", (450, 478), False, 'import os\n'), ((509, 545), 'os.path.join', 'os.path.join', (['path', '"""*.[jJ][pP][gG]"""'], {}), "(path, '*.[jJ][pP][gG]')\n", (521, 545), False, 'import os\n'), ((942, 983), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['png_files'], {}), '(png_files)\n', (972, 983), True, 'import tensorflow as tf\n'), ((1043, 1070), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['pvalue'], {}), '(pvalue)\n', (1062, 1070), True, 'import tensorflow as tf\n'), ((1120, 1162), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['jpeg_files'], {}), '(jpeg_files)\n', (1150, 1162), True, 'import tensorflow as tf\n'), ((1223, 1251), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['jvalue'], {}), '(jvalue)\n', (1243, 1251), True, 'import tensorflow as tf\n'), ((2075, 2108), 'tensorflow.reshape', 'tf.reshape', (['image', '[num_elements]'], {}), '(image, [num_elements])\n', (2085, 2108), True, 'import tensorflow as tf\n'), ((2163, 2189), 'tensorflow.reshape', 'tf.reshape', (['image', 'imshape'], {}), '(image, imshape)\n', (2173, 2189), True, 'import tensorflow as tf\n'), ((2299, 2325), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (2306, 2325), True, 'import tensorflow as tf\n'), ((2730, 2748), 'csv.reader', 'csv.reader', (['dest_f'], {}), '(dest_f)\n', (2740, 2748), False, 'import csv\n'), ((2962, 2988), 'numpy.asarray', 'np.asarray', (['labels_one_hot'], {}), '(labels_one_hot)\n', (2972, 2988), True, 'import numpy as np\n'), ((3318, 3354), 'os.path.join', 'os.path.join', (['path', '"""*.[pP][nN][gG]"""'], {}), "(path, '*.[pP][nN][gG]')\n", (3330, 3354), False, 'import os\n'), ((3399, 3419), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (3409, 3419), False, 'from PIL import Image\n'), ((3429, 3453), 'numpy.asarray', 'np.asarray', (['im', 'np.uint8'], {}), '(im, np.uint8)\n', (3439, 3453), True, 'import numpy as np\n'), ((3661, 3691), 'numpy.asarray', 'np.asarray', (['image[1]', 'np.uint8'], {}), '(image[1], np.uint8)\n', (3671, 3691), True, 'import numpy as np\n'), ((4274, 4307), 'tensorflow.reshape', 'tf.reshape', (['image', '[num_elements]'], {}), '(image, [num_elements])\n', (4284, 4307), True, 'import tensorflow as tf\n'), ((4362, 4388), 'tensorflow.reshape', 'tf.reshape', (['image', 'imshape'], {}), '(image, imshape)\n', (4372, 4388), True, 'import tensorflow as tf\n'), ((4498, 4524), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (4505, 4524), True, 'import tensorflow as tf\n'), ((4812, 4833), 'numpy.arange', 'np.arange', (['num_labels'], {}), '(num_labels)\n', (4821, 4833), True, 'import numpy as np\n'), ((2678, 2696), 'os.path.join', 'os.path.join', (['path'], {}), '(path)\n', (2690, 2696), False, 'import os\n'), ((1650, 1683), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (1668, 1683), True, 'import tensorflow as tf\n'), ((1700, 1732), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (1718, 1732), True, 'import tensorflow as tf\n'), ((2338, 2364), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (2345, 2364), True, 'import tensorflow as tf\n'), ((4537, 4563), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (4544, 4563), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
//////////////////////////////////////////////////////////////////////////////////////////
// Original author: <NAME>
// Github: https://github.com/aritzLizoain
// My personal website: https://aritzlizoain.github.io/
// Description: CNN Image Segmentation
// Copyright 2020, <NAME>.
// License: MIT License
//////////////////////////////////////////////////////////////////////////////////////////
Working directory must be where all files are located
This code can be run to check both training and augmented labels (uncomment last section)
"""
import numpy as np
import imgaug.augmenters as iaa
import matplotlib.pyplot as plt
import random
import matplotlib.patches as mpatches
from mask import *
##############################################################################
def augmentation_sequence_Color(images, labels):
labels = labels.astype(np.uint8)
seq = iaa.Sequential([iaa.Dropout2d(p=0.70)]) #, iaa.Flipud(0.8),\
#iaa.OneOf([iaa.Rotate((270, 270))]) iaa.Fliplr(0.8),
#iaa.Rotate((90, 90)) only invert in order to avoid weight issues and masks
return seq(images=images, segmentation_maps=labels)
#----------------------------------------------------------------------------
def augmentation_Color(images, labels, TEST_PREDICTIONS_PATH = ''):
print("Applying data augmentation: dropout, rotation, flip.")
images_aug, labels_aug = augmentation_sequence_Color(images=images, labels=labels)
labels_aug = labels_aug.astype(np.float64)
# Perform a sanity check on a random AUGMENTED sample
# ix = random.randint(0, len(images_aug)-1)
red_patch = mpatches.Patch(color=[1, 0.2, 0.2], label='Cluster')
blue_patch = mpatches.Patch(color=[0,0.5,1.], label='Hot pixel')
green_patch = mpatches.Patch(color=[0.35,1.,0.25], label='Glowing')
black_patch = mpatches.Patch(color=[0./255, 0./255, 0./255], label='Background')
# for ix in range(0,len(labels)):
# fig, ax = plt.subplots(2, 2, figsize=(10, 10))
# ax[0,0].imshow(rgb2gray(images[ix]), cmap="gray") #rgb2gray & , cmap="gray" if grayscale
# ax[0,0].set_title('Training image: {0}'.format(ix+1), fontsize=18);
# ax[0,0].set_xlabel('pixels', fontsize=10)
# ax[0,0].set_ylabel('pixels', fontsize=10)
# ax[0,0].tick_params(axis='both', which='major', labelsize=10)
# ax[0,1].imshow(rgb2gray(images_aug[ix]), cmap="gray") #rgb2gray & , cmap="gray" if grayscale
# ax[0,1].set_title('Augmented image: {0}'.format(ix+1), fontsize=18);
# ax[0,1].set_xlabel('pixels', fontsize=10)
# ax[0,1].set_ylabel('pixels', fontsize=10)
# ax[0,1].tick_params(axis='both', which='major', labelsize=10)
# ax[1,0].imshow(images_aug[ix]) #rgb2gray & , cmap="gray" if grayscale
# ax[1,0].set_title('Augmented image: {0}'.format(ix+1), fontsize=18);
# ax[1,0].set_xlabel('pixels', fontsize=10)
# ax[1,0].set_ylabel('pixels', fontsize=10)
# ax[1,0].tick_params(axis='both', which='major', labelsize=10)
# # ax[1,0].imshow(labels[ix])
# # ax[1,0].set_title('Training label: {0}'.format(ix+1), fontsize=18);
# # ax[1,0].set_xlabel('pixels', fontsize=10)
# # ax[1,0].set_ylabel('pixels', fontsize=10)
# # ax[1,0].tick_params(axis='both', which='major', labelsize=10)
# ax[1,1].imshow(labels_aug[ix])
# ax[1,1].set_title('Augmented label: {0}'.format(ix+1), fontsize=18);
# ax[1,1].set_xlabel('pixels', fontsize=10)
# ax[1,1].set_ylabel('pixels', fontsize=10)
# ax[1,1].tick_params(axis='both', which='major', labelsize=10)
# plt.legend(loc='upper center', bbox_to_anchor=(-0.12, -0.15), fontsize=18,\
# handles=[red_patch, blue_patch, green_patch, black_patch], ncol=4)
# plt.savefig(TEST_PREDICTIONS_PATH+'Augmentation')
# plt.show()
all_images = np.append(images , images_aug, axis=0 )
all_labels= np.append(labels, labels_aug, axis=0)
return all_images, all_labels
#----------------------------------------------------------------------------
def augmentation_sequence_Invert(images, labels):
labels = labels.astype(np.uint8)
seq = iaa.Sequential([iaa.Invert(p=1, per_channel=0.6)]) #, iaa.Flipud(0.8),\
#iaa.OneOf([iaa.Rotate((270, 270))]) iaa.Fliplr(0.8),
#iaa.Rotate((90, 90)) only invert in order to avoid weight issues and masks
return seq(images=images, segmentation_maps=labels)
#----------------------------------------------------------------------------
def augmentation_Invert(images, labels, TEST_PREDICTIONS_PATH = ''):
print("Applying data augmentation: invert, dropout, logContrast, hue, gammaContrast.")
images_aug, labels_aug = augmentation_sequence_Invert(images=images, labels=labels)
labels_aug = labels_aug.astype(np.float64)
# Perform a sanity check on a random AUGMENTED sample
ixn = random.randint(1, len(images_aug)-1)
red_patch = mpatches.Patch(color=[1, 0.2, 0.2], label='Cluster')
blue_patch = mpatches.Patch(color=[0,0.5,1.], label='Hot pixel')
green_patch = mpatches.Patch(color=[0.35,1.,0.25], label='Glowing')
black_patch = mpatches.Patch(color=[0./255, 0./255, 0./255], label='Background')
for ix in range(ixn,ixn+1): #only one: ixn,ixn+1
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
ax[0,0].imshow(rgb2gray(images[ix]), cmap="gray") #rgb2gray & , cmap="gray" if grayscale
ax[0,0].set_title('Training image: {0}'.format(ix+1), fontsize=18);
ax[0,0].set_xlabel('pixels', fontsize=10)
ax[0,0].set_ylabel('pixels', fontsize=10)
ax[0,0].tick_params(axis='both', which='major', labelsize=10)
ax[0,1].imshow(rgb2gray(images_aug[ix]), cmap="gray") #rgb2gray & , cmap="gray" if grayscale
ax[0,1].set_title('Augmented image: {0}'.format(ix+1), fontsize=18);
ax[0,1].set_xlabel('pixels', fontsize=10)
ax[0,1].set_ylabel('pixels', fontsize=10)
ax[0,1].tick_params(axis='both', which='major', labelsize=10)
# ax[1,0].imshow(images_aug[ix]) #rgb2gray & , cmap="gray" if grayscale
# ax[1,0].set_title('Augmented image: {0}'.format(ix+1), fontsize=18);
# ax[1,0].set_xlabel('pixels', fontsize=10)
# ax[1,0].set_ylabel('pixels', fontsize=10)
# ax[1,0].tick_params(axis='both', which='major', labelsize=10)
ax[1,0].imshow(labels[ix])
ax[1,0].set_title('Training label: {0}'.format(ix+1), fontsize=18);
ax[1,0].set_xlabel('pixels', fontsize=10)
ax[1,0].set_ylabel('pixels', fontsize=10)
ax[1,0].tick_params(axis='both', which='major', labelsize=10)
ax[1,1].imshow(labels_aug[ix])
ax[1,1].set_title('Augmented label: {0}'.format(ix+1), fontsize=18);
ax[1,1].set_xlabel('pixels', fontsize=10)
ax[1,1].set_ylabel('pixels', fontsize=10)
ax[1,1].tick_params(axis='both', which='major', labelsize=10)
plt.legend(loc='upper center', bbox_to_anchor=(-0.12, -0.15), fontsize=18,\
handles=[red_patch, blue_patch, green_patch, black_patch], ncol=4)
plt.savefig(TEST_PREDICTIONS_PATH+'Augmentation')
plt.show()
all_images = np.append(images , images_aug, axis=0 )
all_labels= np.append(labels, labels_aug, axis=0)
return all_images, all_labels
#----------------------------------------------------------------------------
#Same augmentation but without displaying anything on screen
def augmentation_noPrint(images, labels, TEST_PREDICTIONS_PATH = ''):
images_aug, labels_aug = augmentation_sequence_Color(images=images, labels=labels)
labels_aug = labels_aug.astype(np.float64)
all_images = np.append(images , images_aug, axis=0 )
all_labels= np.append(labels, labels_aug, axis=0)
return all_images, all_labels
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
VISUALIZE AUGMENTATION
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# import os
# import sys
# import numpy as np
# import cv2
# from skimage.transform import resize
# from mask import *
#############################################################################
# TRAIN_PATH = '' #training images dataset path
# TEST_PATH = '' #testing images dataset path
# TEST_PREDICTIONS_PATH = '' #testing outputs path
# IMG_WIDTH = 256
# IMG_HEIGHT = 256
# images, test_images = load_images(TRAIN_PATH, TEST_PATH, TEST_PREDICTIONS_PATH, IMG_WIDTH, IMG_HEIGHT)
# labels = create_labels(images) #create_labels() from mask.py
# images_aug, labels_aug = augmentation(images=images, labels=labels)
| [
"matplotlib.pyplot.savefig",
"imgaug.augmenters.Invert",
"numpy.append",
"imgaug.augmenters.Dropout2d",
"matplotlib.patches.Patch",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1698, 1750), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[1, 0.2, 0.2]', 'label': '"""Cluster"""'}), "(color=[1, 0.2, 0.2], label='Cluster')\n", (1712, 1750), True, 'import matplotlib.patches as mpatches\n'), ((1768, 1822), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[0, 0.5, 1.0]', 'label': '"""Hot pixel"""'}), "(color=[0, 0.5, 1.0], label='Hot pixel')\n", (1782, 1822), True, 'import matplotlib.patches as mpatches\n'), ((1838, 1894), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[0.35, 1.0, 0.25]', 'label': '"""Glowing"""'}), "(color=[0.35, 1.0, 0.25], label='Glowing')\n", (1852, 1894), True, 'import matplotlib.patches as mpatches\n'), ((1910, 1985), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[0.0 / 255, 0.0 / 255, 0.0 / 255]', 'label': '"""Background"""'}), "(color=[0.0 / 255, 0.0 / 255, 0.0 / 255], label='Background')\n", (1924, 1985), True, 'import matplotlib.patches as mpatches\n'), ((3990, 4027), 'numpy.append', 'np.append', (['images', 'images_aug'], {'axis': '(0)'}), '(images, images_aug, axis=0)\n', (3999, 4027), True, 'import numpy as np\n'), ((4046, 4083), 'numpy.append', 'np.append', (['labels', 'labels_aug'], {'axis': '(0)'}), '(labels, labels_aug, axis=0)\n', (4055, 4083), True, 'import numpy as np\n'), ((5107, 5159), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[1, 0.2, 0.2]', 'label': '"""Cluster"""'}), "(color=[1, 0.2, 0.2], label='Cluster')\n", (5121, 5159), True, 'import matplotlib.patches as mpatches\n'), ((5177, 5231), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[0, 0.5, 1.0]', 'label': '"""Hot pixel"""'}), "(color=[0, 0.5, 1.0], label='Hot pixel')\n", (5191, 5231), True, 'import matplotlib.patches as mpatches\n'), ((5247, 5303), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[0.35, 1.0, 0.25]', 'label': '"""Glowing"""'}), "(color=[0.35, 1.0, 0.25], label='Glowing')\n", (5261, 5303), True, 'import matplotlib.patches as mpatches\n'), ((5319, 5394), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[0.0 / 255, 0.0 / 255, 0.0 / 255]', 'label': '"""Background"""'}), "(color=[0.0 / 255, 0.0 / 255, 0.0 / 255], label='Background')\n", (5333, 5394), True, 'import matplotlib.patches as mpatches\n'), ((7354, 7391), 'numpy.append', 'np.append', (['images', 'images_aug'], {'axis': '(0)'}), '(images, images_aug, axis=0)\n', (7363, 7391), True, 'import numpy as np\n'), ((7410, 7447), 'numpy.append', 'np.append', (['labels', 'labels_aug'], {'axis': '(0)'}), '(labels, labels_aug, axis=0)\n', (7419, 7447), True, 'import numpy as np\n'), ((7844, 7881), 'numpy.append', 'np.append', (['images', 'images_aug'], {'axis': '(0)'}), '(images, images_aug, axis=0)\n', (7853, 7881), True, 'import numpy as np\n'), ((7900, 7937), 'numpy.append', 'np.append', (['labels', 'labels_aug'], {'axis': '(0)'}), '(labels, labels_aug, axis=0)\n', (7909, 7937), True, 'import numpy as np\n'), ((5457, 5493), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(10, 10)'}), '(2, 2, figsize=(10, 10))\n', (5469, 5493), True, 'import matplotlib.pyplot as plt\n'), ((7095, 7240), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'bbox_to_anchor': '(-0.12, -0.15)', 'fontsize': '(18)', 'handles': '[red_patch, blue_patch, green_patch, black_patch]', 'ncol': '(4)'}), "(loc='upper center', bbox_to_anchor=(-0.12, -0.15), fontsize=18,\n handles=[red_patch, blue_patch, green_patch, black_patch], ncol=4)\n", (7105, 7240), True, 'import matplotlib.pyplot as plt\n'), ((7266, 7317), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(TEST_PREDICTIONS_PATH + 'Augmentation')"], {}), "(TEST_PREDICTIONS_PATH + 'Augmentation')\n", (7277, 7317), True, 'import matplotlib.pyplot as plt\n'), ((7324, 7334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7332, 7334), True, 'import matplotlib.pyplot as plt\n'), ((943, 963), 'imgaug.augmenters.Dropout2d', 'iaa.Dropout2d', ([], {'p': '(0.7)'}), '(p=0.7)\n', (956, 963), True, 'import imgaug.augmenters as iaa\n'), ((4311, 4343), 'imgaug.augmenters.Invert', 'iaa.Invert', ([], {'p': '(1)', 'per_channel': '(0.6)'}), '(p=1, per_channel=0.6)\n', (4321, 4343), True, 'import imgaug.augmenters as iaa\n')] |
# Import SlackerGPU to set env variables, import tf (tf 1.4)
import data_aug_uw
from handcam.ltt.util import SlackerGPU
slackerGPU = SlackerGPU.SlackerGPU(
username="ltaverne", desired_server="ait-server-03", num_gpus=0
)
import tensorflow as tf
flags = tf.app.flags
# State your dataset directory
flags.DEFINE_string(
"dataset_dir",
"/local/home/luke/datasets/rgbd-dataset/",
"String: Your dataset directory",
)
# The number of images in the validation set. You would have to know the total number of examples in advance. This is essentially your evaluation dataset.
flags.DEFINE_float(
"validation_size",
0.1,
"Float: The proportion of examples in the dataset to be used for validation",
)
# The number of shards per dataset split.
flags.DEFINE_integer("num_records_per_shard", 100, "Int: Number of images per shard")
# Seed for repeatability.
flags.DEFINE_integer("random_seed", 0, "Int: Random seed to use for repeatability.")
# Output filename for the naming the TFRecord file
flags.DEFINE_string(
"tfrecord_filename",
"uw-rgbd",
"String: The output filename to name your TFRecord file",
)
FLAGS = flags.FLAGS
# other imports
import random
from random import shuffle
random.seed(FLAGS.random_seed)
import glob
import os
import math
import sys
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(FLAGS.random_seed)
from scipy.ndimage import rotate
from handcam.ltt.util.Utils import apply_alpha_matting_tf
from handcam.ltt.datasets.handcam import HandCamDataHandler
data_handler_handcam = HandCamDataHandler.Handler()
from handcam.ltt.datasets.handcam.HandCamDataHandler import HandGenerator
hand_generator = HandGenerator(f=data_handler_handcam.f, batch_size=1)
# Get an iterator for all of the images
shuffle_data = True
dataset_root = FLAGS.dataset_dir
depth_image_pattern = dataset_root + "*/*/*_depth.png"
hand_mask_pattern = "/local/home/luke/datasets/handcam/greenscreens/*-mask.png"
class_names = sorted([i for i in next(os.walk(dataset_root))[1]])
class_names_to_index = dict(zip(class_names, range(len(class_names))))
object_instances = glob.glob(os.path.join(dataset_root, "*/*/"))
#'/local/home/luke/datasets/rgbd-dataset/hand_towel/hand_towel_2/hand_towel_2_4_184_depth.png'
depth_filenames = glob.glob(depth_image_pattern)
hand_mask_filenames = glob.glob(hand_mask_pattern)
def get_hand_root_from_mask_path(hand_mask_path):
return hand_mask_path.split("-mask.png")[0]
def get_class_name_from_path(depth_im_path):
return depth_im_path.split(dataset_root)[1].split("/")[0]
def get_rgb_path(depth_im_path):
return depth_im_path.split("_depth.png")[0] + ".png"
def shuffle_dataset(rgb_filenames, depth_filenames, labels):
c = list(zip(rgb_filenames, depth_filenames, labels))
shuffle(c)
return zip(*c) # rgb_filenames, depth_filenames, labels
hand_root_filenames = [get_hand_root_from_mask_path(i) for i in hand_mask_filenames]
num_hands = len(hand_root_filenames)
NUM_CLASSES = 51
def int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
a TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
a TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def _parse_function(rgb_filenames, depth_filenames, ids, i):
depthFilename = depth_filenames[i]
rgbFilename = rgb_filenames[i]
hand_index = np.random.random_integers(0, num_hands - 1) # closed interval
handFilename = hand_root_filenames[hand_index]
rotAngle = (
-10.0 - 95.0
) * np.random.rand() + 95.0 # x shift # degrees, pos is CCW
h_shift_im = np.random.randint(-45, 45) # x shift
v_shift_im = np.random.randint(0, 90) # y shift
h_shift_hand = np.random.randint(0, 200) # x shift
v_shift_hand = np.random.randint(-100, 0) # x shift
flip_lr = round(np.random.rand())
aug_im = data_aug_uw.load_and_augment(
depthFilename,
rgbFilename,
handFilename,
rotAngle,
h_shift_im,
v_shift_im,
h_shift_hand,
v_shift_hand,
flip_lr,
)
# Have to scale, can't do it in opencv as CV_32F has a range of 0.0-1.0
# print(aug_im.dtype)
# print(np.max(aug_im))
aug_im[..., 0:3] = np.float32(np.uint8(255.0 * aug_im[..., 0:3]))
aug_im[..., 3] = np.float32(np.uint16(65535.0 * aug_im[..., 3]))
img_str = tf.compat.as_bytes(aug_im.tostring())
return img_str, ids[i]
# Borrowing work from <https://github.com/kwotsin/create_tfrecords/blob/python-3.0/dataset_utils.py>
def _get_dataset_filename(dataset_dir, shard_id, instance_path, _NUM_SHARDS):
instance_path = instance_path.split("/")[-2]
output_filename = "%s_%05d-of-%05d.tfrecord" % (
instance_path,
shard_id,
_NUM_SHARDS,
)
return os.path.join(
"/local/home/luke/datasets/rgbd-dataset-tfrecords", output_filename
)
def image_to_tfexample(rgb_filenames, depth_filenames, ids, i):
img, class_id = _parse_function(rgb_filenames, depth_filenames, ids, i)
return tf.train.Example(
features=tf.train.Features(
feature={
"image/img": bytes_feature(img),
"image/class/label": int64_feature(class_id),
}
)
)
def _convert_dataset(object_instances, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
# assert split_name in ['train', 'validation']
with tf.Graph().as_default():
with tf.Session("") as sess:
for instance in object_instances:
depth_filenames = glob.glob(os.path.join(instance, "*_depth.png"))
ids = [
class_names_to_index[get_class_name_from_path(i)]
for i in depth_filenames
]
rgb_filenames = [get_rgb_path(i) for i in depth_filenames]
rgb_filenames, depth_filenames, ids = shuffle_dataset(
rgb_filenames, depth_filenames, ids
)
total_shards = int(
np.ceil(len(depth_filenames) / float(FLAGS.num_records_per_shard))
)
for shard_id in range(total_shards):
output_filename = _get_dataset_filename(
dataset_dir,
shard_id,
instance_path=instance,
_NUM_SHARDS=total_shards,
)
with tf.python_io.TFRecordWriter(
output_filename
) as tfrecord_writer:
start_ndx = shard_id * FLAGS.num_records_per_shard
end_ndx = min(
(shard_id + 1) * FLAGS.num_records_per_shard,
len(rgb_filenames),
)
for i in range(start_ndx, end_ndx):
sys.stdout.write(
"\r>> Converting image %d/%d shard %d"
% (i + 1, len(rgb_filenames), shard_id)
)
sys.stdout.flush()
example = image_to_tfexample(
rgb_filenames, depth_filenames, ids, i
)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write("\n")
sys.stdout.flush()
_convert_dataset(object_instances, dataset_dir=FLAGS.dataset_dir)
| [
"numpy.uint8",
"numpy.random.rand",
"tensorflow.train.Int64List",
"os.walk",
"tensorflow.Graph",
"handcam.ltt.util.SlackerGPU.SlackerGPU",
"tensorflow.Session",
"numpy.random.seed",
"tensorflow.python_io.TFRecordWriter",
"sys.stdout.flush",
"handcam.ltt.datasets.handcam.HandCamDataHandler.Handle... | [((135, 225), 'handcam.ltt.util.SlackerGPU.SlackerGPU', 'SlackerGPU.SlackerGPU', ([], {'username': '"""ltaverne"""', 'desired_server': '"""ait-server-03"""', 'num_gpus': '(0)'}), "(username='ltaverne', desired_server='ait-server-03',\n num_gpus=0)\n", (156, 225), False, 'from handcam.ltt.util import SlackerGPU\n'), ((1223, 1253), 'random.seed', 'random.seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (1234, 1253), False, 'import random\n'), ((1351, 1384), 'numpy.random.seed', 'np.random.seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (1365, 1384), True, 'import numpy as np\n'), ((1561, 1589), 'handcam.ltt.datasets.handcam.HandCamDataHandler.Handler', 'HandCamDataHandler.Handler', ([], {}), '()\n', (1587, 1589), False, 'from handcam.ltt.datasets.handcam import HandCamDataHandler\n'), ((1682, 1735), 'handcam.ltt.datasets.handcam.HandCamDataHandler.HandGenerator', 'HandGenerator', ([], {'f': 'data_handler_handcam.f', 'batch_size': '(1)'}), '(f=data_handler_handcam.f, batch_size=1)\n', (1695, 1735), False, 'from handcam.ltt.datasets.handcam.HandCamDataHandler import HandGenerator\n'), ((2280, 2310), 'glob.glob', 'glob.glob', (['depth_image_pattern'], {}), '(depth_image_pattern)\n', (2289, 2310), False, 'import glob\n'), ((2333, 2361), 'glob.glob', 'glob.glob', (['hand_mask_pattern'], {}), '(hand_mask_pattern)\n', (2342, 2361), False, 'import glob\n'), ((2131, 2165), 'os.path.join', 'os.path.join', (['dataset_root', '"""*/*/"""'], {}), "(dataset_root, '*/*/')\n", (2143, 2165), False, 'import os\n'), ((2788, 2798), 'random.shuffle', 'shuffle', (['c'], {}), '(c)\n', (2795, 2798), False, 'from random import shuffle\n'), ((3672, 3715), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(num_hands - 1)'], {}), '(0, num_hands - 1)\n', (3697, 3715), True, 'import numpy as np\n'), ((3907, 3933), 'numpy.random.randint', 'np.random.randint', (['(-45)', '(45)'], {}), '(-45, 45)\n', (3924, 3933), True, 'import numpy as np\n'), ((3962, 3986), 'numpy.random.randint', 'np.random.randint', (['(0)', '(90)'], {}), '(0, 90)\n', (3979, 3986), True, 'import numpy as np\n'), ((4017, 4042), 'numpy.random.randint', 'np.random.randint', (['(0)', '(200)'], {}), '(0, 200)\n', (4034, 4042), True, 'import numpy as np\n'), ((4073, 4099), 'numpy.random.randint', 'np.random.randint', (['(-100)', '(0)'], {}), '(-100, 0)\n', (4090, 4099), True, 'import numpy as np\n'), ((4163, 4308), 'data_aug_uw.load_and_augment', 'data_aug_uw.load_and_augment', (['depthFilename', 'rgbFilename', 'handFilename', 'rotAngle', 'h_shift_im', 'v_shift_im', 'h_shift_hand', 'v_shift_hand', 'flip_lr'], {}), '(depthFilename, rgbFilename, handFilename,\n rotAngle, h_shift_im, v_shift_im, h_shift_hand, v_shift_hand, flip_lr)\n', (4191, 4308), False, 'import data_aug_uw\n'), ((5099, 5184), 'os.path.join', 'os.path.join', (['"""/local/home/luke/datasets/rgbd-dataset-tfrecords"""', 'output_filename'], {}), "('/local/home/luke/datasets/rgbd-dataset-tfrecords',\n output_filename)\n", (5111, 5184), False, 'import os\n'), ((8041, 8063), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (8057, 8063), False, 'import sys\n'), ((8068, 8086), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8084, 8086), False, 'import sys\n'), ((4131, 4147), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4145, 4147), True, 'import numpy as np\n'), ((4549, 4583), 'numpy.uint8', 'np.uint8', (['(255.0 * aug_im[..., 0:3])'], {}), '(255.0 * aug_im[..., 0:3])\n', (4557, 4583), True, 'import numpy as np\n'), ((4617, 4652), 'numpy.uint16', 'np.uint16', (['(65535.0 * aug_im[..., 3])'], {}), '(65535.0 * aug_im[..., 3])\n', (4626, 4652), True, 'import numpy as np\n'), ((3271, 3303), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'values'}), '(value=values)\n', (3289, 3303), True, 'import tensorflow as tf\n'), ((3482, 3516), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[values]'}), '(value=[values])\n', (3500, 3516), True, 'import tensorflow as tf\n'), ((3832, 3848), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3846, 3848), True, 'import numpy as np\n'), ((6093, 6107), 'tensorflow.Session', 'tf.Session', (['""""""'], {}), "('')\n", (6103, 6107), True, 'import tensorflow as tf\n'), ((6055, 6065), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6063, 6065), True, 'import tensorflow as tf\n'), ((2003, 2024), 'os.walk', 'os.walk', (['dataset_root'], {}), '(dataset_root)\n', (2010, 2024), False, 'import os\n'), ((6207, 6244), 'os.path.join', 'os.path.join', (['instance', '"""*_depth.png"""'], {}), "(instance, '*_depth.png')\n", (6219, 6244), False, 'import os\n'), ((7098, 7142), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_filename'], {}), '(output_filename)\n', (7125, 7142), True, 'import tensorflow as tf\n'), ((7778, 7796), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7794, 7796), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.signal import correlate2d
import matplotlib.pyplot as plt
from matplotlib import colors
def imshow(image, position=None, title=None, norm=True,
axis='off', cmap='gray', interpolation=None):
"""Plot the image.
Parameters
----------
image : ndarray
The image to be plotted.
position : int or tuple of ints, optional
The position of the subplot. See `pyplot.subplot()` for more
details. If not None, create a subplot according to position and
plot on it. (default to None)
title : str, optional
The title of the plot. (default to None, no title)
norm : boolean, optional
Whether to rescale image data into range [0, 1] before mapping to
colors. (default to True)
# TODO: support more complex normalizer.
axis : str, optional
Axis options. See `pyplot.imshow()` function for more details.
cmap : str, optional
Color map options. (default to 'gray', grayscale colormap)
interpolation : str, optional
Interpolation options. (default to None, no interpolation is
applied)
"""
if position is not None:
if isinstance(position, tuple):
plt.subplot(*position)
elif isinstance(position, int):
plt.subplot(position)
else:
raise ValueError('position should be an int or tuple of ints')
if axis is not None:
plt.axis(axis)
if title is not None:
plt.title(title)
kwargs = {'cmap': cmap}
if interpolation is not None:
kwargs['interpolation'] = interpolation
if not norm:
kwargs['vmax'], kwargs['vmin'] = 255, 0
plt.imshow(image, **kwargs)
def bimshow(bimage, ticklabels='on', color='darkgray', figsize=None):
"""Plot the binary image on the grid.
Parameters
----------
bimage : np.ndarray
The binary image to be plotted.
ticklabels : {'on', 'off'}, optional
Whether to show the tick labels (default to 'on', which shows
tick labels on both axes).
color : str, optional
The color used to paint nonzero squares (default to 'darkgray').
figsize : (float, float), optional
Width, height in inches of the figure (default to None). For
more detail, see plt.figure().
"""
height, width = bimage.shape
# Customize the color map.
cmap = colors.ListedColormap(['white', color])
bounds = [0, 1, 255]
norm = colors.BoundaryNorm(bounds, cmap.N)
fig, ax = plt.subplots(figsize=figsize)
ax.imshow(bimage, cmap=cmap, norm=norm)
ax.set_xticks(np.arange(-0.5, width, 1)) # set the major ticks
ax.set_xticklabels('') # hide the major ticklabels
ax.set_yticks(np.arange(-0.5, height, 1))
ax.set_yticklabels('')
ax.grid(which='major', axis='both', linestyle='-', color='k', lw=0.8)
assert ticklabels in ('on', 'off')
if ticklabels == 'on':
ax.xaxis.tick_top()
xloc = np.arange(width)
ax.set_xticks(xloc, minor=True) # set the minor ticks
ax.set_xticklabels(xloc, minor=True) # set the minor ticklabels
yloc = np.arange(height)
ax.set_yticks(yloc, minor=True)
ax.set_yticklabels(yloc, minor=True)
def plot_mask(mask, title=None, fontsize=20):
"""Plot the mask.
Parameters
----------
mask : np.ndarray
The input mask as a two dimensional array.
title : str, optional
The title of the mask (default to None).
fontsize : int, optional
The fontsize of the number inside squares.
"""
isint = np.issubdtype(mask.dtype.type, np.integer)
h, w = mask.shape
if title is not None:
plt.title(title)
plt.axis('scaled')
plt.axis([0, 2*w, 0, 2*h])
xtks = np.arange(2, 2*w, 2)
ytks = np.arange(2, 2*h, 2)
plt.xticks(xtks, '')
plt.yticks(ytks, '')
plt.grid('on')
for i in range(h):
for j in range(w):
s = str(mask[i, j]) if isint else f'{mask[i, j]:.2f}'
plt.text(j*2 + 1, 2*h - i*2 - 1, s,
fontsize=fontsize,
horizontalalignment='center',
verticalalignment='center')
def conv2d(image, kernel, mode='same'):
"""2 dim correlation.
Parameters
----------
image : np.ndarray
The input image.
kernel : np.ndarray
Kernel.
mode : str, optional
Padding mode.
"""
return correlate2d(image, kernel, mode=mode)
def rescale(image):
"""Rescale the intensity levels of the grayscale image.
Currently, it only supports rescaling the intensities into the range
[0, 255]. # TODO: support more flexible output range.
This function is mostly used at the final stage of image processing
procedure as an alternative to `clip()`.
Parameters
----------
image : np.ndarray
The input grayscale image.
Returns
-------
np.ndarray
The rescaled image.
"""
amin, amax = np.min(image), np.max(image)
rescaled = (image - amin) / float(amax - amin)
rescaled = rescaled * 255
return np.rint(rescaled).astype(np.uint8)
if __name__ == '__main__':
image = np.zeros((20, 20), dtype=np.uint8)
for i in range(20):
for j in range(20):
if (i - 10)**2 + (j - 10)**2 <= 25:
image[i, j] = 1
bimshow(image, figsize=(8, 8))
plt.show()
| [
"matplotlib.pyplot.grid",
"numpy.arange",
"matplotlib.pyplot.imshow",
"matplotlib.colors.ListedColormap",
"numpy.max",
"numpy.issubdtype",
"matplotlib.pyplot.yticks",
"numpy.rint",
"numpy.min",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.title",
"matplotlib.pyplo... | [((1725, 1752), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image, **kwargs)\n', (1735, 1752), True, 'import matplotlib.pyplot as plt\n'), ((2438, 2477), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (["['white', color]"], {}), "(['white', color])\n", (2459, 2477), False, 'from matplotlib import colors\n'), ((2514, 2549), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (2533, 2549), False, 'from matplotlib import colors\n'), ((2565, 2594), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2577, 2594), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3684), 'numpy.issubdtype', 'np.issubdtype', (['mask.dtype.type', 'np.integer'], {}), '(mask.dtype.type, np.integer)\n', (3655, 3684), True, 'import numpy as np\n'), ((3763, 3781), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (3771, 3781), True, 'import matplotlib.pyplot as plt\n'), ((3786, 3816), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 2 * w, 0, 2 * h]'], {}), '([0, 2 * w, 0, 2 * h])\n', (3794, 3816), True, 'import matplotlib.pyplot as plt\n'), ((3824, 3846), 'numpy.arange', 'np.arange', (['(2)', '(2 * w)', '(2)'], {}), '(2, 2 * w, 2)\n', (3833, 3846), True, 'import numpy as np\n'), ((3856, 3878), 'numpy.arange', 'np.arange', (['(2)', '(2 * h)', '(2)'], {}), '(2, 2 * h, 2)\n', (3865, 3878), True, 'import numpy as np\n'), ((3881, 3901), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xtks', '""""""'], {}), "(xtks, '')\n", (3891, 3901), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3926), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ytks', '""""""'], {}), "(ytks, '')\n", (3916, 3926), True, 'import matplotlib.pyplot as plt\n'), ((3931, 3945), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (3939, 3945), True, 'import matplotlib.pyplot as plt\n'), ((4504, 4541), 'scipy.signal.correlate2d', 'correlate2d', (['image', 'kernel'], {'mode': 'mode'}), '(image, kernel, mode=mode)\n', (4515, 4541), False, 'from scipy.signal import correlate2d\n'), ((5255, 5289), 'numpy.zeros', 'np.zeros', (['(20, 20)'], {'dtype': 'np.uint8'}), '((20, 20), dtype=np.uint8)\n', (5263, 5289), True, 'import numpy as np\n'), ((5462, 5472), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5470, 5472), True, 'import matplotlib.pyplot as plt\n'), ((1479, 1493), 'matplotlib.pyplot.axis', 'plt.axis', (['axis'], {}), '(axis)\n', (1487, 1493), True, 'import matplotlib.pyplot as plt\n'), ((1528, 1544), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1537, 1544), True, 'import matplotlib.pyplot as plt\n'), ((2658, 2683), 'numpy.arange', 'np.arange', (['(-0.5)', 'width', '(1)'], {}), '(-0.5, width, 1)\n', (2667, 2683), True, 'import numpy as np\n'), ((2782, 2808), 'numpy.arange', 'np.arange', (['(-0.5)', 'height', '(1)'], {}), '(-0.5, height, 1)\n', (2791, 2808), True, 'import numpy as np\n'), ((3021, 3037), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (3030, 3037), True, 'import numpy as np\n'), ((3190, 3207), 'numpy.arange', 'np.arange', (['height'], {}), '(height)\n', (3199, 3207), True, 'import numpy as np\n'), ((3742, 3758), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3751, 3758), True, 'import matplotlib.pyplot as plt\n'), ((5056, 5069), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (5062, 5069), True, 'import numpy as np\n'), ((5071, 5084), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (5077, 5084), True, 'import numpy as np\n'), ((1259, 1281), 'matplotlib.pyplot.subplot', 'plt.subplot', (['*position'], {}), '(*position)\n', (1270, 1281), True, 'import matplotlib.pyplot as plt\n'), ((4075, 4197), 'matplotlib.pyplot.text', 'plt.text', (['(j * 2 + 1)', '(2 * h - i * 2 - 1)', 's'], {'fontsize': 'fontsize', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""'}), "(j * 2 + 1, 2 * h - i * 2 - 1, s, fontsize=fontsize,\n horizontalalignment='center', verticalalignment='center')\n", (4083, 4197), True, 'import matplotlib.pyplot as plt\n'), ((5179, 5196), 'numpy.rint', 'np.rint', (['rescaled'], {}), '(rescaled)\n', (5186, 5196), True, 'import numpy as np\n'), ((1334, 1355), 'matplotlib.pyplot.subplot', 'plt.subplot', (['position'], {}), '(position)\n', (1345, 1355), True, 'import matplotlib.pyplot as plt\n')] |
import tensorflow as tf
from WeightBias import DenseLayer
import numpy as np
import os
class Linearlizer:
def __init__(self,
input_size,
hidden_size,
output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.hidden_origin = DenseLayer('relu_origin',input_size,hidden_size)
self.hidden_cross = DenseLayer('relu_cross',input_size,hidden_size)
self.out_origin = DenseLayer('soft_origin',hidden_size,output_size)
self.out_cross = DenseLayer('soft_cross',hidden_size,output_size)
def loss(self, origin, cross, song_vectors, is_same):
return self.loss_vec_computed(self.word_vector(origin), self.compare_vector(cross), song_vectors, is_same)
def loss_vec_computed(self, word_vector, cross_vector, global_vector, is_same):
input_vec = word_vector + global_vector
output_vec = cross_vector
logit_assignment = tf.reduce_mean(input_vec * output_vec,axis=1)*0.1
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=logit_assignment,labels=is_same)
return tf.reduce_mean(cost)
def word_vector(self, input):
origin_next = tf.nn.relu(self.hidden_origin.calc_output(input))
origin_vec = self.out_origin.calc_output(origin_next)
return origin_vec
def compare_vector(self, input_cmp):
cross_next = tf.nn.relu(self.hidden_cross.calc_output(input_cmp))
cross_vec = self.out_cross.calc_output(cross_next)
return cross_vec
def vars(self):
return (
self.hidden_origin.wb_list() +
self.hidden_cross.wb_list() +
self.out_origin.wb_list() +
self.out_cross.wb_list()
)
def load(self, sess, folder):
for var in self.vars():
save_var_name = var.name[:-2]
name = os.path.join(folder, save_var_name+".npy")
value = np.load(name)
var.load(value, sess)
def save(self, sess, folder):
for var in self.vars():
save_var_name = var.name[:-2]
value = sess.run(var)
name = os.path.join(folder, save_var_name+".npy")
np.save(name, value)
| [
"os.path.join",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.reduce_mean",
"WeightBias.DenseLayer",
"numpy.load",
"numpy.save"
] | [((351, 401), 'WeightBias.DenseLayer', 'DenseLayer', (['"""relu_origin"""', 'input_size', 'hidden_size'], {}), "('relu_origin', input_size, hidden_size)\n", (361, 401), False, 'from WeightBias import DenseLayer\n'), ((428, 477), 'WeightBias.DenseLayer', 'DenseLayer', (['"""relu_cross"""', 'input_size', 'hidden_size'], {}), "('relu_cross', input_size, hidden_size)\n", (438, 477), False, 'from WeightBias import DenseLayer\n'), ((502, 553), 'WeightBias.DenseLayer', 'DenseLayer', (['"""soft_origin"""', 'hidden_size', 'output_size'], {}), "('soft_origin', hidden_size, output_size)\n", (512, 553), False, 'from WeightBias import DenseLayer\n'), ((577, 627), 'WeightBias.DenseLayer', 'DenseLayer', (['"""soft_cross"""', 'hidden_size', 'output_size'], {}), "('soft_cross', hidden_size, output_size)\n", (587, 627), False, 'from WeightBias import DenseLayer\n'), ((1060, 1145), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'logit_assignment', 'labels': 'is_same'}), '(logits=logit_assignment, labels=is_same\n )\n', (1099, 1145), True, 'import tensorflow as tf\n'), ((1155, 1175), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost'], {}), '(cost)\n', (1169, 1175), True, 'import tensorflow as tf\n'), ((995, 1041), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(input_vec * output_vec)'], {'axis': '(1)'}), '(input_vec * output_vec, axis=1)\n', (1009, 1041), True, 'import tensorflow as tf\n'), ((1909, 1953), 'os.path.join', 'os.path.join', (['folder', "(save_var_name + '.npy')"], {}), "(folder, save_var_name + '.npy')\n", (1921, 1953), False, 'import os\n'), ((1972, 1985), 'numpy.load', 'np.load', (['name'], {}), '(name)\n', (1979, 1985), True, 'import numpy as np\n'), ((2183, 2227), 'os.path.join', 'os.path.join', (['folder', "(save_var_name + '.npy')"], {}), "(folder, save_var_name + '.npy')\n", (2195, 2227), False, 'import os\n'), ((2238, 2258), 'numpy.save', 'np.save', (['name', 'value'], {}), '(name, value)\n', (2245, 2258), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
def cumcnt(v):
cumsum = v.cumsum().fillna(method='pad')
reset = -cumsum[v.isnull()].diff().fillna(cumsum)
return v.where(v.notnull(), reset).cumsum().fillna(0.0)
def cumcnt_indices(v):
v[~v] = np.nan
r = cumcnt(v)
return r.astype(int)
def calc_rsi(ser, n):
diff = ser.diff().values
gains = diff
losses = -diff
with np.errstate(invalid='ignore'):
gains[gains<0] = 0.0
losses[losses<=0] = 1e-10 # we don't want divide by zero/NaN
m = n-1
ni = 1/n
g = gains[n] = np.nanmean(gains[:n])
l = losses[n] = np.nanmean(losses[:n])
gains[:n] = losses[:n] = np.nan
for i,v in enumerate(gains[n:],n):
g = gains[i] = (v+m*g)*ni
for i,v in enumerate(losses[n:],n):
l = losses[i] = (v+m*l)*ni
rs = gains / losses
rsi = pd.Series(100 - (100/(1+rs)))
return rsi
def calc_stoch(p, n, m):
l = p.rolling(n).min()
h = p.rolling(n).max()
k = (p-l) / (h-l)
d = 100 * k.rolling(m).mean()
return d
def calc_historical_volatility(ser, n=10):
ln = np.log(ser / ser.shift())
hv = ln.rolling(n).std(ddof=0)
f = np.sqrt(365) * 100
return hv * f
def calc_up_down_length(ser):
v = ser.diff()
x = cumcnt_indices(v>0)
y = cumcnt_indices(v<0)
x[y>0] = -y
return x
def calc_rate_of_change(ser, n):
change = ser / ser.shift()
return change.rolling(n).apply(lambda sub: (sub<sub[-1]).sum(), raw=True)
def calc_stoch_rsi(ser, n=14, m=3):
rsi = calc_rsi(ser, n)
s_rsi = calc_stoch(rsi, n, m)
return s_rsi
def calc_connors_rsi(ser, n=100, m=3, k=2):
rsi_price = calc_rsi(ser, m)
updown = calc_up_down_length(ser)
rsi_updown = calc_rsi(updown, k)
roc = calc_rate_of_change(ser, n)
return (rsi_price + rsi_updown + roc) / 3
| [
"pandas.Series",
"numpy.nanmean",
"numpy.sqrt",
"numpy.errstate"
] | [((573, 594), 'numpy.nanmean', 'np.nanmean', (['gains[:n]'], {}), '(gains[:n])\n', (583, 594), True, 'import numpy as np\n'), ((615, 637), 'numpy.nanmean', 'np.nanmean', (['losses[:n]'], {}), '(losses[:n])\n', (625, 637), True, 'import numpy as np\n'), ((856, 887), 'pandas.Series', 'pd.Series', (['(100 - 100 / (1 + rs))'], {}), '(100 - 100 / (1 + rs))\n', (865, 887), True, 'import pandas as pd\n'), ((400, 429), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (411, 429), True, 'import numpy as np\n'), ((1174, 1186), 'numpy.sqrt', 'np.sqrt', (['(365)'], {}), '(365)\n', (1181, 1186), True, 'import numpy as np\n')] |
import cv2 as cv
import argparse
import os
import numpy as np
def nearest_resize(pic, scale):
"""
nearest resize
:param pic: img
:param scale: scale >0
:return: resized img
"""
h,w,c = pic.shape # height,width,channel
th, tw = int(h * scale), int(w * scale)
# avoid the out of bounds from the original img
pic = np.pad(pic, ((0, 1), (0, 1), (0, 0)), 'reflect')
emptyImage = np.zeros((th, tw,c ), np.uint8)
h_scale = h/th
w_scale = w/tw
for i in range(th):
for j in range(tw):
# 首先找到在原图中对应的点的(X, Y)坐标
#first, find the location from the original img
corr_x = (i + 0.5)*h_scale - 0.5
corr_y = (j + 0.5)*w_scale - 0.5
emptyImage[i, j, :] = pic[int(corr_x), int(corr_y), :]
return emptyImage
def conv2D(img,kernel,padding,stride):
"""
convlution 2D
:param img: img with size [h,w,c]
:param kernel: with size [kh,kw,c] or [kh,kw]
:param padding: padding number
:param stride: stride
:return: filted img
"""
h,w,c = img.shape # height,width,channel
if kernel.ndim==2:
kernel = np.expand_dims(kernel, axis=-1)
kh,kw,kc = kernel.shape #kernel h, kernel w, kernel channel,
assert kc == 1 or kc == c
oh = int((h-kh+2*padding)/stride)+1 #out height
ow = int((w-kw+2*padding)/stride)+1 #out width
#out
emptyImage = np.zeros((oh, ow, c), np.float)
img = np.pad(img,((padding, padding), (padding, padding), (0, 0)), 'constant')
for i in range(oh):
for j in range(ow):
i_idx = i*stride
j_idx = j*stride
tmp_out = img[i_idx:i_idx+kh,j_idx:j_idx+kw,:]*kernel
for cc in range(c):
emptyImage[i,j,cc] = np.sum(tmp_out[:,:,cc])
return emptyImage
def add_suffix(img_file,suffix):
"""
add suffix for a given file name, and not change the file type
:param img_file: img file, e.g. "xxx.jpg"
:param suffix: "——abcde"
:return: "xxx——abcde.jpg"
"""
name = os.path.splitext(img_file)[0]
type_ = os.path.splitext(img_file)[1]
file_name = name + suffix + type_
return file_name
def get_box_filter(height,width):
"""
:param height: kernel height
:param width: kernel width
:return: the box fileter
"""
box_kernel = np.ones((height,width),np.float)
box_kernel = box_kernel/box_kernel.size
return box_kernel
if __name__ == '__main__':
"""
"""
print("openCV_version:",cv.__version__)
parser = argparse.ArgumentParser(description="set blue")
parser.add_argument("--img_file",type=str,default="./5_gt.png",help="path of the img file")
args = parser.parse_args()
img = cv.imread(args.img_file)
#Image Filtering
ne_img_down = nearest_resize(img, scale=0.5)
cv.imwrite(add_suffix(args.img_file, "_nearest_img_down"), ne_img_down)
#2.b Box filter
box_kernel = get_box_filter(3,3)
#2.a Convolution filter
conv_img = conv2D(img, box_kernel, padding=1, stride=1)
#2.c Anti-aliasing Filter
conv_down_img = nearest_resize(conv_img, scale=0.5)
cv.imwrite(add_suffix(args.img_file, "_Anti-aliasing_img"), conv_down_img)
| [
"numpy.ones",
"argparse.ArgumentParser",
"os.path.splitext",
"numpy.sum",
"numpy.zeros",
"numpy.expand_dims",
"numpy.pad",
"cv2.imread"
] | [((359, 407), 'numpy.pad', 'np.pad', (['pic', '((0, 1), (0, 1), (0, 0))', '"""reflect"""'], {}), "(pic, ((0, 1), (0, 1), (0, 0)), 'reflect')\n", (365, 407), True, 'import numpy as np\n'), ((426, 457), 'numpy.zeros', 'np.zeros', (['(th, tw, c)', 'np.uint8'], {}), '((th, tw, c), np.uint8)\n', (434, 457), True, 'import numpy as np\n'), ((1418, 1449), 'numpy.zeros', 'np.zeros', (['(oh, ow, c)', 'np.float'], {}), '((oh, ow, c), np.float)\n', (1426, 1449), True, 'import numpy as np\n'), ((1460, 1533), 'numpy.pad', 'np.pad', (['img', '((padding, padding), (padding, padding), (0, 0))', '"""constant"""'], {}), "(img, ((padding, padding), (padding, padding), (0, 0)), 'constant')\n", (1466, 1533), True, 'import numpy as np\n'), ((2354, 2388), 'numpy.ones', 'np.ones', (['(height, width)', 'np.float'], {}), '((height, width), np.float)\n', (2361, 2388), True, 'import numpy as np\n'), ((2557, 2604), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""set blue"""'}), "(description='set blue')\n", (2580, 2604), False, 'import argparse\n'), ((2743, 2767), 'cv2.imread', 'cv.imread', (['args.img_file'], {}), '(args.img_file)\n', (2752, 2767), True, 'import cv2 as cv\n'), ((1160, 1191), 'numpy.expand_dims', 'np.expand_dims', (['kernel'], {'axis': '(-1)'}), '(kernel, axis=-1)\n', (1174, 1191), True, 'import numpy as np\n'), ((2061, 2087), 'os.path.splitext', 'os.path.splitext', (['img_file'], {}), '(img_file)\n', (2077, 2087), False, 'import os\n'), ((2103, 2129), 'os.path.splitext', 'os.path.splitext', (['img_file'], {}), '(img_file)\n', (2119, 2129), False, 'import os\n'), ((1779, 1804), 'numpy.sum', 'np.sum', (['tmp_out[:, :, cc]'], {}), '(tmp_out[:, :, cc])\n', (1785, 1804), True, 'import numpy as np\n')] |
# ------------------------------------------------------------------------------
#
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# ------------------------------------------------------------------------------
"""
Tests for the ArrayOrNone TraitType.
"""
from __future__ import absolute_import
from traits.testing.unittest_tools import unittest
try:
import numpy
except ImportError:
numpy_available = False
else:
numpy_available = True
from traits.testing.unittest_tools import UnittestTools
from traits.api import ArrayOrNone, HasTraits, NO_COMPARE, TraitError
if numpy_available:
# Use of `ArrayOrNone` requires NumPy to be installed.
class Foo(HasTraits):
maybe_array = ArrayOrNone
maybe_float_array = ArrayOrNone(dtype=float)
maybe_two_d_array = ArrayOrNone(shape=(None, None))
maybe_array_with_default = ArrayOrNone(value=[1, 2, 3])
maybe_array_no_compare = ArrayOrNone(comparison_mode=NO_COMPARE)
@unittest.skipUnless(numpy_available, "numpy not available")
class TestArrayOrNone(unittest.TestCase, UnittestTools):
"""
Tests for the ArrayOrNone TraitType.
"""
def test_default(self):
foo = Foo()
self.assertIsNone(foo.maybe_array)
def test_explicit_default(self):
foo = Foo()
self.assertIsInstance(foo.maybe_array_with_default, numpy.ndarray)
def test_default_validation(self):
# CArray and Array validate the default at class creation time;
# we do the same for ArrayOrNone.
with self.assertRaises(TraitError):
class Bar(HasTraits):
bad_array = ArrayOrNone(shape=(None, None), value=[1, 2, 3])
def test_setting_array_from_array(self):
foo = Foo()
test_array = numpy.arange(5)
foo.maybe_array = test_array
output_array = foo.maybe_array
self.assertIsInstance(output_array, numpy.ndarray)
self.assertEqual(output_array.dtype, test_array.dtype)
self.assertEqual(output_array.shape, test_array.shape)
self.assertTrue((output_array == test_array).all())
def test_setting_array_from_list(self):
foo = Foo()
test_list = [5, 6, 7, 8, 9]
foo.maybe_array = test_list
output_array = foo.maybe_array
self.assertIsInstance(output_array, numpy.ndarray)
self.assertEqual(output_array.dtype, numpy.dtype(int))
self.assertEqual(output_array.shape, (5,))
self.assertTrue((output_array == test_list).all())
def test_setting_array_from_none(self):
foo = Foo()
test_array = numpy.arange(5)
self.assertIsNone(foo.maybe_array)
foo.maybe_array = test_array
self.assertIsInstance(foo.maybe_array, numpy.ndarray)
foo.maybe_array = None
self.assertIsNone(foo.maybe_array)
def test_dtype(self):
foo = Foo()
foo.maybe_float_array = [1, 2, 3]
array_value = foo.maybe_float_array
self.assertIsInstance(array_value, numpy.ndarray)
self.assertEqual(array_value.dtype, numpy.dtype(float))
def test_shape(self):
foo = Foo()
with self.assertRaises(TraitError):
foo.maybe_two_d_array = [1, 2, 3]
def test_change_notifications(self):
foo = Foo()
test_array = numpy.arange(-7, -2)
different_test_array = numpy.arange(10)
# Assigning None to something that's already None shouldn't fire.
with self.assertTraitDoesNotChange(foo, "maybe_array"):
foo.maybe_array = None
# Changing from None to an array: expect an event.
with self.assertTraitChanges(foo, "maybe_array"):
foo.maybe_array = test_array
# No event from assigning the same array again.
with self.assertTraitDoesNotChange(foo, "maybe_array"):
foo.maybe_array = test_array
# But assigning a new array fires an event.
with self.assertTraitChanges(foo, "maybe_array"):
foo.maybe_array = different_test_array
# No event even if the array is modified in place.
different_test_array += 2
with self.assertTraitDoesNotChange(foo, "maybe_array"):
foo.maybe_array = different_test_array
# Set back to None; we should get an event.
with self.assertTraitChanges(foo, "maybe_array"):
foo.maybe_array = None
def test_comparison_mode_override(self):
foo = Foo()
test_array = numpy.arange(-7, 2)
with self.assertTraitChanges(foo, "maybe_array_no_compare"):
foo.maybe_array_no_compare = None
with self.assertTraitChanges(foo, "maybe_array_no_compare"):
foo.maybe_array_no_compare = test_array
with self.assertTraitChanges(foo, "maybe_array_no_compare"):
foo.maybe_array_no_compare = test_array
def test_default_value_copied(self):
# Check that we don't share defaults.
test_default = numpy.arange(100.0, 110.0)
class FooBar(HasTraits):
foo = ArrayOrNone(value=test_default)
bar = ArrayOrNone(value=test_default)
foo_bar = FooBar()
self.assertTrue((foo_bar.foo == test_default).all())
self.assertTrue((foo_bar.bar == test_default).all())
test_default += 2.0
self.assertFalse((foo_bar.foo == test_default).all())
self.assertFalse((foo_bar.bar == test_default).all())
foo = foo_bar.foo
foo += 1729.0
self.assertFalse((foo_bar.foo == foo_bar.bar).all())
| [
"traits.testing.unittest_tools.unittest.skipUnless",
"traits.api.ArrayOrNone",
"numpy.dtype",
"numpy.arange"
] | [((1334, 1393), 'traits.testing.unittest_tools.unittest.skipUnless', 'unittest.skipUnless', (['numpy_available', '"""numpy not available"""'], {}), "(numpy_available, 'numpy not available')\n", (1353, 1393), False, 'from traits.testing.unittest_tools import unittest\n'), ((1106, 1130), 'traits.api.ArrayOrNone', 'ArrayOrNone', ([], {'dtype': 'float'}), '(dtype=float)\n', (1117, 1130), False, 'from traits.api import ArrayOrNone, HasTraits, NO_COMPARE, TraitError\n'), ((1160, 1191), 'traits.api.ArrayOrNone', 'ArrayOrNone', ([], {'shape': '(None, None)'}), '(shape=(None, None))\n', (1171, 1191), False, 'from traits.api import ArrayOrNone, HasTraits, NO_COMPARE, TraitError\n'), ((1228, 1256), 'traits.api.ArrayOrNone', 'ArrayOrNone', ([], {'value': '[1, 2, 3]'}), '(value=[1, 2, 3])\n', (1239, 1256), False, 'from traits.api import ArrayOrNone, HasTraits, NO_COMPARE, TraitError\n'), ((1291, 1330), 'traits.api.ArrayOrNone', 'ArrayOrNone', ([], {'comparison_mode': 'NO_COMPARE'}), '(comparison_mode=NO_COMPARE)\n', (1302, 1330), False, 'from traits.api import ArrayOrNone, HasTraits, NO_COMPARE, TraitError\n'), ((2131, 2146), 'numpy.arange', 'numpy.arange', (['(5)'], {}), '(5)\n', (2143, 2146), False, 'import numpy\n'), ((2962, 2977), 'numpy.arange', 'numpy.arange', (['(5)'], {}), '(5)\n', (2974, 2977), False, 'import numpy\n'), ((3671, 3691), 'numpy.arange', 'numpy.arange', (['(-7)', '(-2)'], {}), '(-7, -2)\n', (3683, 3691), False, 'import numpy\n'), ((3723, 3739), 'numpy.arange', 'numpy.arange', (['(10)'], {}), '(10)\n', (3735, 3739), False, 'import numpy\n'), ((4839, 4858), 'numpy.arange', 'numpy.arange', (['(-7)', '(2)'], {}), '(-7, 2)\n', (4851, 4858), False, 'import numpy\n'), ((5330, 5356), 'numpy.arange', 'numpy.arange', (['(100.0)', '(110.0)'], {}), '(100.0, 110.0)\n', (5342, 5356), False, 'import numpy\n'), ((2748, 2764), 'numpy.dtype', 'numpy.dtype', (['int'], {}), '(int)\n', (2759, 2764), False, 'import numpy\n'), ((3431, 3449), 'numpy.dtype', 'numpy.dtype', (['float'], {}), '(float)\n', (3442, 3449), False, 'import numpy\n'), ((5409, 5440), 'traits.api.ArrayOrNone', 'ArrayOrNone', ([], {'value': 'test_default'}), '(value=test_default)\n', (5420, 5440), False, 'from traits.api import ArrayOrNone, HasTraits, NO_COMPARE, TraitError\n'), ((5460, 5491), 'traits.api.ArrayOrNone', 'ArrayOrNone', ([], {'value': 'test_default'}), '(value=test_default)\n', (5471, 5491), False, 'from traits.api import ArrayOrNone, HasTraits, NO_COMPARE, TraitError\n'), ((1995, 2043), 'traits.api.ArrayOrNone', 'ArrayOrNone', ([], {'shape': '(None, None)', 'value': '[1, 2, 3]'}), '(shape=(None, None), value=[1, 2, 3])\n', (2006, 2043), False, 'from traits.api import ArrayOrNone, HasTraits, NO_COMPARE, TraitError\n')] |
import sys
import numpy as np
from hummingbird.simulation.simulator import Simulator
from hummingbird.graphics.video_writer import VideoWriter
from hummingbird.parameters.planner_parameters import PlannerParameters
from hummingbird.graphics.waypoint_viewer import WaypointViewer
from hummingbird.graphics.data_viewer import DataViewer
from hummingbird.physics.wind_simulation import WindSimulation
from hummingbird.physics.fixed_wing import FixedWing
from hummingbird.control.autopilot import Autopilot
from hummingbird.estimation.observer import Observer
from hummingbird.guidance.path_follower import PathFollower
from hummingbird.guidance.path_manager import PathManager
from hummingbird.message_types.msg_waypoints import MsgWaypoints
class PathManagerSimulator(Simulator):
"""
config: 'straight_line' 'fillet' 'dubins'
"""
def __init__(self, record_video=False, display_data=True, config="dubins"):
Simulator.__init__(self, record_video)
if self.record_video:
self.video = VideoWriter(video_name="path_manager.avi",
bounding_box=(0, 0, 800, 600),
output_rate=self.sim_p.dt_video)
self.display_data = display_data
self.sim_p.end_time = 50.
self.waypoint_view = WaypointViewer() # initialize the viewer
self.data_view = DataViewer(800, 0)
self.mav = FixedWing()
self.wind = WindSimulation()
self.ctrl = Autopilot(self.sim_p.dt_controller)
self.obsv = Observer(self.sim_p.dt_controller)
self.measurements = self.mav.sensors.sensors
self.path_follow = PathFollower()
self.path_manage = PathManager()
self.plan_p = PlannerParameters()
self.waypoints = MsgWaypoints()
self.waypoints.type = config
self.waypoints.num_waypoints = 4
Va = self.plan_p.Va0
self.waypoints.ned[:self.waypoints.num_waypoints] = np.array([[0, 0, -100],
[1000, 0, -100],
[0, 1000, -100],
[1000, 1000, -100]])
self.waypoints.airspeed[:self.waypoints.num_waypoints] = np.array([Va, Va, Va, Va])
self.waypoints.course[:self.waypoints.num_waypoints] = np.array([np.radians(0),
np.radians(45),
np.radians(45),
np.radians(-135)])
def simulate(self):
while self.sim_time < self.sim_p.end_time:
# -------observer-------------
measurements = self.mav.sensors.update_sensors(self.mav.dynamics.true_state,
self.mav.dynamics._forces) # get sensor measurements
estimated_state = self.obsv.update(measurements) # estimate states from measurements
# -------path manager-------------
path = self.path_manage.update(self.waypoints, self.plan_p.R_min, estimated_state)
# -------path follower-------------
# autopilot_commands = path_follow.update(path, estimated_state)
autopilot_commands = self.path_follow.update(path, self.mav.dynamics.true_state)
# -------controller-------------
delta, commanded_state = self.ctrl.update(autopilot_commands, estimated_state)
# -------physical system-------------
current_wind = self.wind.update() # get the new wind vector
self.mav.dynamics.update(delta) # propagate the MAV dynamics
# -------update viewer-------------
if not self.waypoint_view.plot_initialized:
self.waypoint_view.update(self.waypoints, path, self.mav.dynamics.true_state) # plot path and MAV
path.flag_path_changed = True
self.waypoint_view.update(self.waypoints, path, self.mav.dynamics.true_state) # plot path and MAV
else:
self.waypoint_view.update(self.waypoints, path, self.mav.dynamics.true_state) # plot path and MAV
if self.display_data:
self.data_view.update(self.mav.dynamics.true_state, # true states
estimated_state, # estimated states
commanded_state, # commanded states
self.sim_p.dt_simulation)
# -------increment time-------------
self.sim_time += self.sim_p.dt_simulation
if self.record_video:
self.video.update(self.sim_time)
if self.record_video:
self.video.close()
sys.exit(self.waypoint_view.app.exec_())
if __name__ == "__main__":
simulator = PathManagerSimulator()
simulator.simulate()
| [
"hummingbird.control.autopilot.Autopilot",
"numpy.radians",
"hummingbird.guidance.path_follower.PathFollower",
"hummingbird.simulation.simulator.Simulator.__init__",
"hummingbird.graphics.video_writer.VideoWriter",
"hummingbird.physics.fixed_wing.FixedWing",
"hummingbird.guidance.path_manager.PathManage... | [((932, 970), 'hummingbird.simulation.simulator.Simulator.__init__', 'Simulator.__init__', (['self', 'record_video'], {}), '(self, record_video)\n', (950, 970), False, 'from hummingbird.simulation.simulator import Simulator\n'), ((1313, 1329), 'hummingbird.graphics.waypoint_viewer.WaypointViewer', 'WaypointViewer', ([], {}), '()\n', (1327, 1329), False, 'from hummingbird.graphics.waypoint_viewer import WaypointViewer\n'), ((1380, 1398), 'hummingbird.graphics.data_viewer.DataViewer', 'DataViewer', (['(800)', '(0)'], {}), '(800, 0)\n', (1390, 1398), False, 'from hummingbird.graphics.data_viewer import DataViewer\n'), ((1418, 1429), 'hummingbird.physics.fixed_wing.FixedWing', 'FixedWing', ([], {}), '()\n', (1427, 1429), False, 'from hummingbird.physics.fixed_wing import FixedWing\n'), ((1450, 1466), 'hummingbird.physics.wind_simulation.WindSimulation', 'WindSimulation', ([], {}), '()\n', (1464, 1466), False, 'from hummingbird.physics.wind_simulation import WindSimulation\n'), ((1487, 1522), 'hummingbird.control.autopilot.Autopilot', 'Autopilot', (['self.sim_p.dt_controller'], {}), '(self.sim_p.dt_controller)\n', (1496, 1522), False, 'from hummingbird.control.autopilot import Autopilot\n'), ((1543, 1577), 'hummingbird.estimation.observer.Observer', 'Observer', (['self.sim_p.dt_controller'], {}), '(self.sim_p.dt_controller)\n', (1551, 1577), False, 'from hummingbird.estimation.observer import Observer\n'), ((1659, 1673), 'hummingbird.guidance.path_follower.PathFollower', 'PathFollower', ([], {}), '()\n', (1671, 1673), False, 'from hummingbird.guidance.path_follower import PathFollower\n'), ((1701, 1714), 'hummingbird.guidance.path_manager.PathManager', 'PathManager', ([], {}), '()\n', (1712, 1714), False, 'from hummingbird.guidance.path_manager import PathManager\n'), ((1737, 1756), 'hummingbird.parameters.planner_parameters.PlannerParameters', 'PlannerParameters', ([], {}), '()\n', (1754, 1756), False, 'from hummingbird.parameters.planner_parameters import PlannerParameters\n'), ((1783, 1797), 'hummingbird.message_types.msg_waypoints.MsgWaypoints', 'MsgWaypoints', ([], {}), '()\n', (1795, 1797), False, 'from hummingbird.message_types.msg_waypoints import MsgWaypoints\n'), ((1965, 2043), 'numpy.array', 'np.array', (['[[0, 0, -100], [1000, 0, -100], [0, 1000, -100], [1000, 1000, -100]]'], {}), '([[0, 0, -100], [1000, 0, -100], [0, 1000, -100], [1000, 1000, -100]])\n', (1973, 2043), True, 'import numpy as np\n'), ((2289, 2315), 'numpy.array', 'np.array', (['[Va, Va, Va, Va]'], {}), '([Va, Va, Va, Va])\n', (2297, 2315), True, 'import numpy as np\n'), ((1027, 1137), 'hummingbird.graphics.video_writer.VideoWriter', 'VideoWriter', ([], {'video_name': '"""path_manager.avi"""', 'bounding_box': '(0, 0, 800, 600)', 'output_rate': 'self.sim_p.dt_video'}), "(video_name='path_manager.avi', bounding_box=(0, 0, 800, 600),\n output_rate=self.sim_p.dt_video)\n", (1038, 1137), False, 'from hummingbird.graphics.video_writer import VideoWriter\n'), ((2389, 2402), 'numpy.radians', 'np.radians', (['(0)'], {}), '(0)\n', (2399, 2402), True, 'import numpy as np\n'), ((2467, 2481), 'numpy.radians', 'np.radians', (['(45)'], {}), '(45)\n', (2477, 2481), True, 'import numpy as np\n'), ((2546, 2560), 'numpy.radians', 'np.radians', (['(45)'], {}), '(45)\n', (2556, 2560), True, 'import numpy as np\n'), ((2625, 2641), 'numpy.radians', 'np.radians', (['(-135)'], {}), '(-135)\n', (2635, 2641), True, 'import numpy as np\n')] |
import torch
import torchvision.transforms as transforms
import numpy as np
def image_transforms(mode='train', augment_parameters=[0.8, 1.2, 0.5, 2.0, 0.8, 1.2],
do_augmentation=True, transformations=None, size=(256, 512)):
if mode == 'train':
data_transform = transforms.Compose([
ResizeImage(train=True, size=size),
RandomFlip(do_augmentation),
ToTensor(train=True),
AugmentImagePair(augment_parameters, do_augmentation)
])
return data_transform
elif mode == 'test':
data_transform = transforms.Compose([
ResizeImage(train=False, size=size),
ToTensor(train=False),
DoTest(),
])
return data_transform
elif mode == 'custom':
data_transform = transforms.Compose(transformations)
return data_transform
else:
print('Wrong mode')
class ResizeImage(object):
def __init__(self, train=True, size=(256, 512)):
self.train = train
self.transform = transforms.Resize(size)
def __call__(self, sample):
if self.train:
left_image = sample['left_image']
right_image = sample['right_image']
new_right_image = self.transform(right_image)
new_left_image = self.transform(left_image)
sample = {'left_image': new_left_image, 'right_image': new_right_image}
else:
left_image = sample
new_left_image = self.transform(left_image)
sample = new_left_image
return sample
class DoTest(object):
def __call__(self, sample):
new_sample = torch.stack((sample, torch.flip(sample, [2])))
return new_sample
class ToTensor(object):
def __init__(self, train):
self.train = train
self.transform = transforms.ToTensor()
def __call__(self, sample):
if self.train:
left_image = sample['left_image']
right_image = sample['right_image']
new_right_image = self.transform(right_image)
new_left_image = self.transform(left_image)
sample = {'left_image': new_left_image,
'right_image': new_right_image}
else:
left_image = sample
sample = self.transform(left_image)
return sample
class RandomFlip(object):
def __init__(self, do_augmentation):
self.transform = transforms.RandomHorizontalFlip(p=1)
self.do_augmentation = do_augmentation
def __call__(self, sample):
left_image = sample['left_image']
right_image = sample['right_image']
k = np.random.uniform(0, 1, 1)
if self.do_augmentation:
if k > 0.5:
fliped_left = self.transform(right_image)
fliped_right = self.transform(left_image)
sample = {'left_image': fliped_left, 'right_image': fliped_right}
else:
sample = {'left_image': left_image, 'right_image': right_image}
return sample
class AugmentImagePair(object):
def __init__(self, augment_parameters, do_augmentation):
self.do_augmentation = do_augmentation
self.gamma_low = augment_parameters[0] # 0.8
self.gamma_high = augment_parameters[1] # 1.2
self.brightness_low = augment_parameters[2] # 0.5
self.brightness_high = augment_parameters[3] # 2.0
self.color_low = augment_parameters[4] # 0.8
self.color_high = augment_parameters[5] # 1.2
def __call__(self, sample):
left_image = sample['left_image']
right_image = sample['right_image']
p = np.random.uniform(0, 1, 1)
if self.do_augmentation:
if p > 0.5:
# randomly shift gamma
random_gamma = np.random.uniform(self.gamma_low, self.gamma_high)
left_image_aug = left_image ** random_gamma
right_image_aug = right_image ** random_gamma
# randomly shift brightness
random_brightness = np.random.uniform(self.brightness_low, self.brightness_high)
left_image_aug = left_image_aug * random_brightness
right_image_aug = right_image_aug * random_brightness
# randomly shift color
random_colors = np.random.uniform(self.color_low, self.color_high, 3)
for i in range(3):
left_image_aug[i, :, :] *= random_colors[i]
right_image_aug[i, :, :] *= random_colors[i]
# saturate
left_image_aug = torch.clamp(left_image_aug, 0, 1)
right_image_aug = torch.clamp(right_image_aug, 0, 1)
sample = {'left_image': left_image_aug, 'right_image': right_image_aug}
else:
sample = {'left_image': left_image, 'right_image': right_image}
return sample
| [
"torchvision.transforms.RandomHorizontalFlip",
"torch.flip",
"numpy.random.uniform",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Compose",
"torch.clamp"
] | [((1057, 1080), 'torchvision.transforms.Resize', 'transforms.Resize', (['size'], {}), '(size)\n', (1074, 1080), True, 'import torchvision.transforms as transforms\n'), ((1848, 1869), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1867, 1869), True, 'import torchvision.transforms as transforms\n'), ((2450, 2486), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(1)'}), '(p=1)\n', (2481, 2486), True, 'import torchvision.transforms as transforms\n'), ((2665, 2691), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2682, 2691), True, 'import numpy as np\n'), ((3669, 3695), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3686, 3695), True, 'import numpy as np\n'), ((819, 854), 'torchvision.transforms.Compose', 'transforms.Compose', (['transformations'], {}), '(transformations)\n', (837, 854), True, 'import torchvision.transforms as transforms\n'), ((1687, 1710), 'torch.flip', 'torch.flip', (['sample', '[2]'], {}), '(sample, [2])\n', (1697, 1710), False, 'import torch\n'), ((3823, 3873), 'numpy.random.uniform', 'np.random.uniform', (['self.gamma_low', 'self.gamma_high'], {}), '(self.gamma_low, self.gamma_high)\n', (3840, 3873), True, 'import numpy as np\n'), ((4077, 4137), 'numpy.random.uniform', 'np.random.uniform', (['self.brightness_low', 'self.brightness_high'], {}), '(self.brightness_low, self.brightness_high)\n', (4094, 4137), True, 'import numpy as np\n'), ((4348, 4401), 'numpy.random.uniform', 'np.random.uniform', (['self.color_low', 'self.color_high', '(3)'], {}), '(self.color_low, self.color_high, 3)\n', (4365, 4401), True, 'import numpy as np\n'), ((4627, 4660), 'torch.clamp', 'torch.clamp', (['left_image_aug', '(0)', '(1)'], {}), '(left_image_aug, 0, 1)\n', (4638, 4660), False, 'import torch\n'), ((4695, 4729), 'torch.clamp', 'torch.clamp', (['right_image_aug', '(0)', '(1)'], {}), '(right_image_aug, 0, 1)\n', (4706, 4729), False, 'import torch\n')] |
#
# Copyright (c) 2018, Salesforce, Inc.
# The Board of Trustees of the Leland Stanford Junior University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from json.decoder import JSONDecodeError
import logging
import os
import shutil
import random
import time
import re
import numpy as np
import torch
from .data_utils.example import Batch
from .data_utils.iterator import Iterator
logger = logging.getLogger(__name__)
ENTITY_MATCH_REGEX = re.compile('^([A-Z].*)_[0-9]+$')
class SpecialTokenMap:
def __init__(self, pattern, forward_func, backward_func=None):
"""
Inputs:
pattern: a regex pattern
forward_func: a function with signature forward_func(str) -> str
backward_func: a function with signature backward_func(str) -> list[str]
"""
if isinstance(forward_func, list):
self.forward_func = lambda x: forward_func[int(x)%len(forward_func)]
else:
self.forward_func = forward_func
if isinstance(backward_func, list):
self.backward_func = lambda x: backward_func[int(x)%len(backward_func)]
else:
self.backward_func = backward_func
self.pattern = pattern
def forward(self, s: str):
reverse_map = []
matches = re.finditer(self.pattern, s)
if matches is None:
return s, reverse_map
for match in matches:
occurrence = match.group(0)
parameter = match.group(1)
replacement = self.forward_func(parameter)
s = s.replace(occurrence, replacement)
reverse_map.append((self, occurrence))
return s, reverse_map
def backward(self, s: str, occurrence: str):
match = re.match(self.pattern, occurrence)
parameter = match.group(1)
if self.backward_func is None:
list_of_strings_to_match = [self.forward_func(parameter)]
else:
list_of_strings_to_match = sorted(self.backward_func(parameter), key=lambda x:len(x), reverse=True)
for string_to_match in list_of_strings_to_match:
l = [' '+string_to_match+' ', string_to_match+' ', ' '+string_to_match]
o = [' '+occurrence+' ', occurrence+' ', ' '+occurrence]
new_s = s
for i in range(len(l)):
new_s = re.sub(l[i], o[i], s, flags=re.IGNORECASE)
if s != new_s:
break
if s != new_s:
s = new_s
break
return s
def find_span_type(program, begin_index, end_index):
if begin_index > 1 and program[begin_index - 2] == 'location:':
span_type = 'LOCATION'
elif end_index == len(program) - 1 or not program[end_index + 1].startswith('^^'):
span_type = 'QUOTED_STRING'
else:
if program[end_index + 1] == '^^tt:hashtag':
span_type = 'HASHTAG'
elif program[end_index + 1] == '^^tt:username':
span_type = 'USERNAME'
else:
span_type = 'GENERIC_ENTITY_' + program[end_index + 1][2:]
end_index += 1
return span_type, end_index
def requote_program(program):
program = program.split(' ')
requoted = []
in_string = False
begin_index = 0
i = 0
while i < len(program):
token = program[i]
if token == '"':
in_string = not in_string
if in_string:
begin_index = i + 1
else:
span_type, end_index = find_span_type(program, begin_index, i)
requoted.append(span_type)
i = end_index
elif not in_string:
entity_match = ENTITY_MATCH_REGEX.match(token)
if entity_match is not None:
requoted.append(entity_match[1])
elif token != 'location:':
requoted.append(token)
i += 1
return ' '.join(requoted)
def tokenizer(s):
return s.split()
def mask_special_tokens(string: str):
exceptions = [match.group(0) for match in re.finditer('[A-Za-z:_.]+_[0-9]+', string)]
for e in exceptions:
string = string.replace(e, '<temp>', 1)
return string, exceptions
def unmask_special_tokens(string: str, exceptions: list):
for e in exceptions:
string = string.replace('<temp>', e, 1)
return string
def detokenize(string: str):
string, exceptions = mask_special_tokens(string)
tokens = ["'d", "n't", "'ve", "'m", "'re", "'ll", ".", ",", "?", "!", "'s", ")", ":"]
for t in tokens:
string = string.replace(' ' + t, t)
string = string.replace("( ", "(")
string = string.replace('gon na', 'gonna')
string = string.replace('wan na', 'wanna')
string = unmask_special_tokens(string, exceptions)
return string
def tokenize(string: str):
string, exceptions = mask_special_tokens(string)
tokens = ["'d", "n't", "'ve", "'m", "'re", "'ll", ".", ",", "?", "!", "'s", ")", ":"]
for t in tokens:
string = string.replace(t, ' ' + t)
string = string.replace("(", "( ")
string = string.replace('gonna', 'gon na')
string = string.replace('wanna', 'wan na')
string = re.sub('\s+', ' ', string)
string = unmask_special_tokens(string, exceptions)
return string.strip()
def lower_case(string):
string, exceptions = mask_special_tokens(string)
string = string.lower()
string = unmask_special_tokens(string, exceptions)
return string
def get_number_of_lines(file_path):
count = 0
with open(file_path) as f:
for line in f:
count += 1
return count
def get_part_path(path, part_idx):
if path.endswith(os.path.sep):
has_separator = True
path = path[:-1]
else:
has_separator = False
return path + '_part' + str(part_idx+1) + (os.path.sep if has_separator else '')
def split_folder_on_disk(folder_path, num_splits):
new_folder_paths = [get_part_path(folder_path, part_idx) for part_idx in range(num_splits)]
for subdir, dirs, files in os.walk(folder_path):
for file in files:
new_file_paths = [os.path.join(subdir.replace(folder_path, new_folder_paths[part_idx]), file) for part_idx in range(num_splits)]
split_file_on_disk(os.path.join(subdir, file), num_splits, output_paths=new_file_paths)
return new_folder_paths
def split_file_on_disk(file_path, num_splits, output_paths=None):
"""
"""
number_of_lines = get_number_of_lines(file_path)
all_output_paths = []
all_output_files = []
for part_idx in range(num_splits):
if output_paths is None:
output_path = get_part_path(file_path, part_idx)
else:
output_path = output_paths[part_idx]
all_output_paths.append(output_path)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
all_output_files.append(open(output_path, 'w'))
with open(file_path, 'r') as input_file:
output_file_idx = 0
for line in input_file:
all_output_files[output_file_idx].write(line)
output_file_idx = (output_file_idx + 1) % len(all_output_files)
for f in all_output_files:
f.close()
return all_output_paths
def combine_folders_on_disk(folder_path_prefix, num_files, line_group_size, delete=False):
folder_paths = [get_part_path(folder_path_prefix, part_idx) for part_idx in range(num_files)]
new_to_olds_map = {}
for i in range(num_files):
for subdir, dirs, files in os.walk(folder_paths[i]):
for file in files:
new_file_path = os.path.join(subdir.replace(folder_paths[i], folder_path_prefix), file)
if new_file_path not in new_to_olds_map:
new_to_olds_map[new_file_path] = []
new_to_olds_map[new_file_path].append(os.path.join(subdir, file))
for new, olds in new_to_olds_map.items():
os.makedirs(os.path.dirname(new), exist_ok=True)
with open(new, 'w') as combined_file:
if new.endswith('.json'):
new_json = None
for old in olds:
with open(old, 'r') as f:
if new_json is None:
try:
new_json = json.load(f)
except JSONDecodeError:
f.seek(0)
logger.info('Failed to read json file %s with content:\n %s', old, f.read())
else:
for k, v in json.load(f).items():
new_json[k] += v
for k, v in new_json.items():
new_json[k] /= float(num_files)
json.dump(new_json, combined_file)
else:
all_old_file_contents = []
for old in olds:
with open(old, 'r') as f:
all_old_file_contents.append([line for line in f])
old_file_idx = 0
all_indices = [0] * len(all_old_file_contents)
finished_reading = [False] * len(all_old_file_contents)
while True:
if finished_reading[old_file_idx]:
old_file_idx = (old_file_idx + 1) % len(all_old_file_contents)
continue
for i in range(line_group_size):
line = all_old_file_contents[old_file_idx][all_indices[old_file_idx]]
combined_file.write(line)
all_indices[old_file_idx] += 1
if all_indices[old_file_idx] == len(all_old_file_contents[old_file_idx]):
finished_reading[old_file_idx] = True
if all(finished_reading):
break
old_file_idx = (old_file_idx + 1) % len(all_old_file_contents)
if delete:
for folder in folder_paths:
shutil.rmtree(folder)
def combine_files_on_disk(file_path_prefix, num_files, line_group_size, delete=False):
all_input_file_contents = []
all_input_file_paths = []
for i in range(num_files):
input_file_path = get_part_path(file_path_prefix, i)
all_input_file_paths.append(input_file_path)
with open(input_file_path, 'r') as f:
all_input_file_contents.append([line for line in f])
all_indices = [0] * len(all_input_file_contents)
finished_reading = [False] * len(all_input_file_contents)
input_file_idx = 0
with open(file_path_prefix, 'w') as combined_file:
while True:
if finished_reading[input_file_idx]:
input_file_idx = (input_file_idx + 1) % len(all_input_file_contents)
continue
for i in range(line_group_size):
line = all_input_file_contents[input_file_idx][all_indices[input_file_idx]]
combined_file.write(line)
all_indices[input_file_idx] += 1
if all_indices[input_file_idx] == len(all_input_file_contents[input_file_idx]):
finished_reading[input_file_idx] = True
if all(finished_reading):
break
input_file_idx = (input_file_idx + 1) % len(all_input_file_contents)
if delete:
for file_path in all_input_file_paths:
os.remove(file_path)
def map_filter(callable, iterable):
output = []
for element in iterable:
new_element = callable(element)
if new_element is not None:
output.append(new_element)
return output
def preprocess_examples(args, tasks, splits, logger=None, train=True):
min_length = 1
max_context_length = args.max_train_context_length if train else args.max_val_context_length
is_too_long = lambda ex: (len(ex.answer) > args.max_answer_length or
len(ex.context) > max_context_length)
is_too_short = lambda ex: (len(ex.answer) < min_length or
len(ex.context) < min_length)
for task, s in zip(tasks, splits):
if logger is not None:
logger.info(f'{task.name} has {len(s.examples)} examples')
l = len(s.examples)
s.examples = map_filter(
lambda ex: task.preprocess_example(ex, train=train, max_context_length=max_context_length),
s.examples)
if train:
l = len(s.examples)
s.examples = [ex for ex in s.examples if not is_too_long(ex)]
if len(s.examples) < l:
if logger is not None:
logger.info(f'Filtering out long {task.name} examples: {l} -> {len(s.examples)}')
l = len(s.examples)
s.examples = [ex for ex in s.examples if not is_too_short(ex)]
if len(s.examples) < l:
if logger is not None:
logger.info(f'Filtering out short {task.name} examples: {l} -> {len(s.examples)}')
if logger is not None:
context_lengths = [len(ex.context) for ex in s.examples]
question_lengths = [len(ex.question) for ex in s.examples]
answer_lengths = [len(ex.answer) for ex in s.examples]
logger.info(
f'{task.name} context lengths (min, mean, max): {np.min(context_lengths)}, {int(np.mean(context_lengths))}, {np.max(context_lengths)}')
logger.info(
f'{task.name} question lengths (min, mean, max): {np.min(question_lengths)}, {int(np.mean(question_lengths))}, {np.max(question_lengths)}')
logger.info(
f'{task.name} answer lengths (min, mean, max): {np.min(answer_lengths)}, {int(np.mean(answer_lengths))}, {np.max(answer_lengths)}')
if logger is not None:
logger.info('Tokenized examples:')
for ex in s.examples[:10]:
logger.info('Context: ' + ' '.join([token.strip() for token in ex.context]))
logger.info('Question: ' + ' '.join([token.strip() for token in ex.question]))
logger.info('Answer: ' + ' '.join([token.strip() for token in ex.answer]))
def init_devices(args, devices=None):
if not torch.cuda.is_available():
return [torch.device('cpu')]
if not devices:
return [torch.device('cuda:'+str(i)) for i in range(torch.cuda.device_count())]
return [torch.device(ordinal) for ordinal in devices]
def set_seed(args):
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def get_trainable_params(model, name=False):
if name:
return list(filter(lambda p: p[1].requires_grad, model.named_parameters()))
else:
return list(filter(lambda p: p.requires_grad, model.parameters()))
def log_model_size(logger, model, model_name):
num_param = sum([p.nelement() for p in model.parameters() if p.requires_grad])
logger.info(f'{model_name} has {num_param:,} parameters')
def elapsed_time(log):
t = time.time() - log.start
day = int(t // (24 * 3600))
t = t % (24 * 3600)
hour = int(t // 3600)
t %= 3600
minutes = int(t // 60)
t %= 60
seconds = int(t)
return f'{day:02}:{hour:02}:{minutes:02}:{seconds:02}'
def make_data_loader(dataset, numericalizer, batch_size, device=None, paired=False, max_pairs=None, train=False,
valid=False, append_question_to_context_too=False, override_question=None, override_context=None):
iterator = Iterator(dataset,
batch_size,
shuffle=train,
repeat=train,
use_data_batch_fn=train,
use_data_sort_key=train)
collate_function = lambda minibatch: Batch.from_examples(minibatch, numericalizer, device=device,
paired=paired and train, max_pairs=max_pairs, groups=iterator.groups,
append_question_to_context_too=append_question_to_context_too,
override_question=override_question, override_context=override_context)
return torch.utils.data.DataLoader(iterator, batch_size=None, collate_fn=collate_function)
def pad(x, new_channel, dim, val=None):
if x.size(dim) > new_channel:
x = x.narrow(dim, 0, new_channel)
channels = x.size()
assert (new_channel >= channels[dim])
if new_channel == channels[dim]:
return x
size = list(channels)
size[dim] = new_channel - size[dim]
padding = x.new(*size).fill_(val)
return torch.cat([x, padding], dim)
def have_multilingual(task_names):
return any(['multilingual' in name for name in task_names])
def load_config_json(args):
args.almond_type_embeddings = False
with open(os.path.join(args.path, 'config.json')) as config_file:
config = json.load(config_file)
retrieve = ['model', 'seq2seq_encoder', 'seq2seq_decoder', 'transformer_layers', 'rnn_layers', 'rnn_zero_state',
'transformer_hidden', 'dimension', 'rnn_dimension', 'load', 'max_val_context_length',
'transformer_heads', 'max_output_length', 'max_generative_vocab', 'lower',
'encoder_embeddings', 'context_embeddings', 'question_embeddings', 'decoder_embeddings',
'trainable_decoder_embeddings', 'trainable_encoder_embeddings', 'train_encoder_embeddings',
'train_context_embeddings', 'train_question_embeddings', 'locale', 'use_pretrained_bert',
'train_context_embeddings_after', 'train_question_embeddings_after',
'pretrain_context', 'pretrain_mlm_probability', 'force_subword_tokenize',
'append_question_to_context_too', 'almond_preprocess_context', 'almond_lang_as_question',
'override_question', 'override_context', 'almond_has_multiple_programs']
# train and predict scripts have these arguments in common. We use the values from train only if they are not provided in predict
if 'num_beams' in config and not isinstance(config['num_beams'], list):
# num_beams used to be an integer in previous versions of the code
config['num_beams'] = [config['num_beams']]
overwrite = ['val_batch_size', 'num_beams', 'num_outputs', 'no_repeat_ngram_size', 'top_p', 'top_k', 'repetition_penalty', 'temperature', 'reduce_metrics']
for o in overwrite:
if o not in args or getattr(args, o) is None:
retrieve.append(o)
for r in retrieve:
if r in config:
setattr(args, r, config[r])
# These are for backward compatibility with models that were trained before we added these arguments
elif r == 'almond_has_multiple_programs':
setattr(args, r, False)
elif r == 'almond_lang_as_question':
setattr(args, r, False)
elif r == 'locale':
setattr(args, r, 'en')
elif r in ('trainable_decoder_embedding', 'trainable_encoder_embeddings', 'pretrain_context',
'train_context_embeddings_after', 'train_question_embeddings_after'):
setattr(args, r, 0)
elif r == 'pretrain_mlm_probability':
setattr(args, r, 0.15)
elif r == 'context_embeddings':
if args.seq2seq_encoder == 'Coattention':
setattr(args, r, '')
else:
setattr(args, r, args.encoder_embeddings)
elif r == 'question_embeddings':
setattr(args, r, args.encoder_embeddings)
elif r == 'train_encoder_embeddings':
setattr(args, r, False)
elif r == 'train_context_embeddings':
if args.seq2seq_encoder == 'Coattention':
setattr(args, r, False)
else:
setattr(args, r, args.train_encoder_embeddings)
elif r == 'train_question_embeddings':
setattr(args, r, args.train_encoder_embeddings)
elif r == 'rnn_dimension':
setattr(args, r, args.dimension)
elif r == 'rnn_zero_state':
setattr(args, r, 'zero')
elif r == 'use_pretrained_bert':
setattr(args, r, True)
elif r in ('append_question_to_context_too', 'almond_preprocess_context'):
setattr(args, r, False)
elif r == 'num_beams':
setattr(args, r, [1])
elif r == 'num_outputs':
setattr(args, r, [1])
elif r == 'no_repeat_ngram_size':
setattr(args, r, [0])
elif r == 'top_p':
setattr(args, r, [1.0])
elif r == 'top_k':
setattr(args, r, [0])
elif r == 'repetition_penalty':
setattr(args, r, [1.0])
elif r == 'temperature':
setattr(args, r, [0.0])
elif r == 'reduce_metrics':
setattr(args, r, 'max')
else:
setattr(args, r, None)
args.dropout_ratio = 0.0
args.best_checkpoint = os.path.join(args.path, args.checkpoint_name)
| [
"logging.getLogger",
"re.compile",
"torch.utils.data.DataLoader",
"torch.cuda.device_count",
"torch.cuda.is_available",
"os.walk",
"os.remove",
"numpy.mean",
"numpy.max",
"numpy.random.seed",
"re.finditer",
"numpy.min",
"re.match",
"os.path.dirname",
"re.sub",
"time.time",
"torch.cat... | [((1883, 1910), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1900, 1910), False, 'import logging\n'), ((1933, 1965), 're.compile', 're.compile', (['"""^([A-Z].*)_[0-9]+$"""'], {}), "('^([A-Z].*)_[0-9]+$')\n", (1943, 1965), False, 'import re\n'), ((6738, 6765), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'string'], {}), "('\\\\s+', ' ', string)\n", (6744, 6765), False, 'import re\n'), ((7603, 7623), 'os.walk', 'os.walk', (['folder_path'], {}), '(folder_path)\n', (7610, 7623), False, 'import os\n'), ((16093, 16118), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (16107, 16118), True, 'import numpy as np\n'), ((16123, 16145), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (16134, 16145), False, 'import random\n'), ((16150, 16178), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (16167, 16178), False, 'import torch\n'), ((16183, 16220), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (16209, 16220), False, 'import torch\n'), ((17861, 17949), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['iterator'], {'batch_size': 'None', 'collate_fn': 'collate_function'}), '(iterator, batch_size=None, collate_fn=\n collate_function)\n', (17888, 17949), False, 'import torch\n'), ((18298, 18326), 'torch.cat', 'torch.cat', (['[x, padding]', 'dim'], {}), '([x, padding], dim)\n', (18307, 18326), False, 'import torch\n'), ((22965, 23010), 'os.path.join', 'os.path.join', (['args.path', 'args.checkpoint_name'], {}), '(args.path, args.checkpoint_name)\n', (22977, 23010), False, 'import os\n'), ((2785, 2813), 're.finditer', 're.finditer', (['self.pattern', 's'], {}), '(self.pattern, s)\n', (2796, 2813), False, 'import re\n'), ((3238, 3272), 're.match', 're.match', (['self.pattern', 'occurrence'], {}), '(self.pattern, occurrence)\n', (3246, 3272), False, 'import re\n'), ((9074, 9098), 'os.walk', 'os.walk', (['folder_paths[i]'], {}), '(folder_paths[i])\n', (9081, 9098), False, 'import os\n'), ((15837, 15862), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15860, 15862), False, 'import torch\n'), ((16021, 16042), 'torch.device', 'torch.device', (['ordinal'], {}), '(ordinal)\n', (16033, 16042), False, 'import torch\n'), ((16677, 16688), 'time.time', 'time.time', ([], {}), '()\n', (16686, 16688), False, 'import time\n'), ((18585, 18607), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (18594, 18607), False, 'import json\n'), ((5612, 5654), 're.finditer', 're.finditer', (['"""[A-Za-z:_.]+_[0-9]+"""', 'string'], {}), "('[A-Za-z:_.]+_[0-9]+', string)\n", (5623, 5654), False, 'import re\n'), ((8372, 8400), 'os.path.dirname', 'os.path.dirname', (['output_path'], {}), '(output_path)\n', (8387, 8400), False, 'import os\n'), ((9501, 9521), 'os.path.dirname', 'os.path.dirname', (['new'], {}), '(new)\n', (9516, 9521), False, 'import os\n'), ((11601, 11622), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {}), '(folder)\n', (11614, 11622), False, 'import shutil\n'), ((13008, 13028), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (13017, 13028), False, 'import os\n'), ((15880, 15899), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (15892, 15899), False, 'import torch\n'), ((18512, 18550), 'os.path.join', 'os.path.join', (['args.path', '"""config.json"""'], {}), "(args.path, 'config.json')\n", (18524, 18550), False, 'import os\n'), ((3835, 3877), 're.sub', 're.sub', (['l[i]', 'o[i]', 's'], {'flags': 're.IGNORECASE'}), '(l[i], o[i], s, flags=re.IGNORECASE)\n', (3841, 3877), False, 'import re\n'), ((7824, 7850), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (7836, 7850), False, 'import os\n'), ((10325, 10359), 'json.dump', 'json.dump', (['new_json', 'combined_file'], {}), '(new_json, combined_file)\n', (10334, 10359), False, 'import json\n'), ((9402, 9428), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (9414, 9428), False, 'import os\n'), ((15981, 16006), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (16004, 16006), False, 'import torch\n'), ((14948, 14971), 'numpy.min', 'np.min', (['context_lengths'], {}), '(context_lengths)\n', (14954, 14971), True, 'import numpy as np\n'), ((15008, 15031), 'numpy.max', 'np.max', (['context_lengths'], {}), '(context_lengths)\n', (15014, 15031), True, 'import numpy as np\n'), ((15126, 15150), 'numpy.min', 'np.min', (['question_lengths'], {}), '(question_lengths)\n', (15132, 15150), True, 'import numpy as np\n'), ((15188, 15212), 'numpy.max', 'np.max', (['question_lengths'], {}), '(question_lengths)\n', (15194, 15212), True, 'import numpy as np\n'), ((15305, 15327), 'numpy.min', 'np.min', (['answer_lengths'], {}), '(answer_lengths)\n', (15311, 15327), True, 'import numpy as np\n'), ((15363, 15385), 'numpy.max', 'np.max', (['answer_lengths'], {}), '(answer_lengths)\n', (15369, 15385), True, 'import numpy as np\n'), ((14979, 15003), 'numpy.mean', 'np.mean', (['context_lengths'], {}), '(context_lengths)\n', (14986, 15003), True, 'import numpy as np\n'), ((15158, 15183), 'numpy.mean', 'np.mean', (['question_lengths'], {}), '(question_lengths)\n', (15165, 15183), True, 'import numpy as np\n'), ((15335, 15358), 'numpy.mean', 'np.mean', (['answer_lengths'], {}), '(answer_lengths)\n', (15342, 15358), True, 'import numpy as np\n'), ((9854, 9866), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9863, 9866), False, 'import json\n'), ((10140, 10152), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10149, 10152), False, 'import json\n')] |
"""
Class for processing FITS files processed by DECam Community Pipelines.
These pipelines will bundle entire focal planes into a single file, which can
be successfully processed by the MultiExtensionFits class, but for which we can
create better visualisations.
Note that the focusing and guiding chips are not processed, but are usually
present in Community Pipelines products.
"""
import os
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import numpy as np
from .multi_extension_fits import MultiExtensionFits
from upload.models import Thumbnails
__all__ = ["DecamFits", ]
row_layout = {
1: {"indices": ( (2, 0), (3, 0), (4, 0)),
"names": ( "S29", "S30", "S31"), "rtype": "even"},
2: {"indices": ( (1, 1), (2, 1), (3, 1), (4, 1)),
"names": ( "S25", "S26", "S27", "S28"), "rtype": "odd"},
3: {"indices": ( (1, 2), (2, 2), (3, 2), (4, 2), (5, 2)),
"names": ( "S20", "S21", "S22", "S23", "S24"), "rtype": "even"},
4: {"indices": ( (0, 3), (1, 3), (2, 3), (3, 3), (4, 3), (5, 3)),
"names": ( "S14", "S15", "S16", "S17", "S18", "S19"), "rtype": "odd"},
5: {"indices": ( (0, 4), (1, 4), (2, 4), (3, 4), (4, 4), (5, 4)),
"names": ( "S8", "S9", "S10", "S11", "S12", "S13"), "rtype": "odd"},
6: {"indices": ( (0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 5), (6, 5)),
"names": ( "S1", "S2", "S3", "S4", "S5", "S6", "S7"), "rtype":"even"},
7: {"indices": ( (0, 6), (1, 6), (2, 6), (3, 6), (4, 6), (5, 6), (6, 6)),
"names": ( "N1", "N2", "N3", "N4", "N5", "N6", "N7"), "rtype":"even"},
8: {"indices": ( (0, 7), (1, 7), (2, 7), (3, 7), (4, 7), (5, 7)),
"names": ( "N8", "N9", "N10", "N11", "N12", "N13"),"rtype": "odd"},
9: {"indices": ( (0, 8), (1, 8), (2, 8), (3, 8), (4, 8), (5, 8)),
"names": ( "N14", "N15", "N16", "N17", "N18", "N19"),"rtype": "odd"},
10: {"indices": ( (1, 9), (2, 9), (3, 9), (4, 9), (5, 9)),
"names": ( "N20", "N21", "N22", "N23", "N24"), "rtype": "even"},
11: {"indices": ((1, 10), (2, 10), (3, 10), (4, 10)),
"names": ( "N25", "N26", "N27", "N28"), "rtype": "odd"},
12: {"indices": ((2, 11), (3, 11), (4, 11)),
"names": ( "N29", "N30", "N31"), "rtype": "even"},
}
"""A row-based layour of the DECam focal plane science detectors."""
class Detector:
"""A single DECam science CCD detector.""""""
Attributes
----------
scaling : `float`
Detector size scaling factor. Detector's scaled size is the detector's
physical size divided by the scaling factor.
idx : `tuple`, optional
Detector zero-based index (row, col), as counted from the center of a
detector.
label : `str`, optional
Detector label, for example S1, S3 etc.
xdim : `int`, optional
Detector's physical width, in pixels. Defaults to 4096.
ydim : `int`, optional
Detector's physical height, in pixels. Defaults to 2048
"""
index_detector_map = {name:index for row in row_layout.values()
for name, index in zip(row["names"], row["indices"])}
"""Map between detector index and detector label."""
detector_index_map = {name:index for row in row_layout.values()
for name, index in zip(row["names"], row["indices"])}
"""Map between detector labels and their positional indices."""
detector_type_map = {name:row["rtype"] for row in row_layout.values()
for name in row["names"]}
"""Map between detector labels and their row type."""
dimX = 4096
"""Default, assumed, width, in pixels, of a detector."""
dimY = 2048
"""Default, assumed, height, in pixels, of a detector."""
def __init__(self, scaling, idx=None, label=None, xdim=None, ydim=None):
if idx is not None:
self.row, self.col = idx
self.label = self.index_detector_map[idx]
self.rowType = self.detector_type_map[self.label]
elif label is not None:
self.row, self.col = self.detector_index_map[label]
self.label = label
self.rowType = self.detector_type_map[label]
self.scale = scaling
self.setDimensions(xdim, ydim)
def setDimensions(self, xdim=None, ydim=None):
"""Updates the detector dimensions using the default or provided values
and recalculates relavant detector's scaled dimensions.
Parameters
----------
xdim : `int`
New detector width.
ydim : `int`
New detector height.
"""
self.xdim = xdim if xdim is not None else self.dimX
self.ydim = ydim if ydim is not None else self.dimY
self.scaledX = int(self.xdim/self.scale)
self.scaledY = int(self.ydim/self.scale)
class DecamFocalPlane:
"""Represents the science only CCDs of the DECam focal plane.
All CCDs are assumed to have the same size and the same scaling factor.
Parameters
----------
scaling : `float`
Scaling factor for which the focal plane size in pixels will be reduced
by in order to be displayed.
detectorSize : `tuple`, optional
Physical detector size in pixels as a tuple (width, height). Defaults
to (4096, 2048) as set in `Detector`.
ccdGap : `int`, optional
Physical size of the gap between detectors, in pixels. Defaults to 208.
rowOffset : `int`, optional
Physical offset, in pixels, between 'even' and 'odd' rows.
"""
detector_labels = [name for row in row_layout.values() for name in row["names"]]
"""A list of all detector labels."""
nRows = 7
"""Number of detector rows in the focal plane."""
nCols = 12
"""Number of columns in the focal plane."""
ccd_gap = 208
"""Default, assumed, gap size in pixels, between two detectors."""
row_offset = Detector.dimX/2
"""Default, assumed, offset between even and odd rows."""
def __init__(self, scaling, detectorSize=None, ccdGap=None, rowOffset=None):
self.scale = scaling
self.gap = self.ccd_gap if ccdGap is None else ccdGap
self.rowOffset = self.row_offset if rowOffset is None else rowOffset
self.__initAssumedDetectorDimensions(detectorSize)
self.detectors = {}
for label in self.detector_labels:
self.detectors[label] = Detector(scaling, label=label)
self.planeImage = None
def __initAssumedDetectorDimensions(self, detectorSize=None):
"""In general there is no reason to assume all detectors have the same
sizes, gaps or offsets. But for DECam they do and this lets us perform
an easy and quick generation of in-focal-plane-image-array coordinate
calculations.
Unfortunately it also requires pre-calculating and storing a lot of
not-very-clear quantities.
"""
if detectorSize is None:
xdim, ydim = Detector.dimX, Detector.dimY
else:
xdim, ydim = detectorSize
self.xdim = xdim
self.ydim = ydim
self.scaledX = int(self.xdim/self.scale)
self.scaledY = int(self.ydim/self.scale)
self.scaledGap = int(self.gap/self.scale)
self.scaledRowOffset = int(self.scaledX/2)
self.scaledGappedX = self.scaledX + self.scaledGap
self.scaledGappedY = self.scaledY + self.scaledGap
self.scaledGappedOffsetX = self.scaledGappedX*1.5 + self.scaledGap
def _even_row_coords(self, i, j):
return (i*self.scaledGappedX), int(j*self.scaledGappedY)
def _odd_row_coords(self, i, j):
return (self.scaledRowOffset + i*self.scaledGappedX), j*self.scaledGappedY
def get_coords(self, detectorLabel):
"""Get start and end coordinates of the scaled detector.
Parameters
----------
detectorLabel : `str`
Label of the detector in the focal plane.
Returns
-------
xCoordinates : `tuple`
Tuple of start and end coordinates in the x axis.
yCoordinates : `tuple`
Tuple of start and end coordinates in the y axis.
"""
detector = self.detectors[detectorLabel]
if detector.rowType == "even":
coords = self._even_row_coords(detector.row, detector.col)
elif detector.rowType == "odd":
coords = self._odd_row_coords(detector.row, detector.col)
else:
raise ValueError("Unrecognized row type. Expected 'odd' or 'even' "
"got {detector.rowType} instead.")
return (coords[0], coords[0]+self.scaledX), (coords[1], coords[1]+self.scaledY)
def get_slice(self, detectorLabel):
"""Get array slice that covers the area of the detector.
Parameters
----------
detectorLabel : `str`
Label of the detector in the focal plane.
Returns
-------
xSlice : `slice`
An edge-to-edge slice of the detector, i.e. [start:end], in x axis.
ySlice : `tuple`
An edge-to-edge slice of the detector, i.e. [start:end], in y axis.
"""
coords = self.get_coords(detectorLabel)
return slice(*coords[0]), slice(*coords[1])
def add_image(self, image, detectorLabel):
"""Will place the given image at the location of the given detector
label.
Parameters
----------
image : `np.array`
A 2D array representing the image that will be placed at the
location of the detector
detectorLabel : `str`
The label of the detector.
Note
----
Depending on the scaling factor used, materializing the full focal
plane can require a lot of memory. The plane image will not be
materialized until the firt image is placed in it.
"""
if self.planeImage is None:
self.planeImage = np.zeros((self.nRows*self.scaledGappedX,
self.nCols*self.scaledGappedY),
dtype=np.uint8)
start, end = self.get_slice(detectorLabel)
self.planeImage[start, end] = image
class DecamFits(MultiExtensionFits):
name = "DECamCommunityFits"
priority = 2
def __init__(self, uploadInfo, uploadedFile):
super().__init__(uploadInfo, uploadedFile)
# Override the default processed exts to filter only science images
# from all image-like exts, ignoring focus and guider chips.
self.exts = self._getScienceImages(self.exts)
@classmethod
def _getScienceImages(cls, hdulist):
exts = []
for hdu in hdulist:
exttype = hdu.header.get("DETPOS", False)
if exttype:
if "G" not in exttype and "F" not in exttype:
exts.append(hdu)
return exts
@classmethod
def canProcess(cls, uploadedFile, returnHdulist=False):
canProcess, hdulist = super().canProcess(uploadedFile, returnHdulist=True)
# Data examples I have seen Community Pipeines exclusively utilize the
# CompImageHDU headers and at any time in history there was at most 1
# Here, we bet that if we are near 62 CCDs encoded as CompImageHDUs,
# ignoring guider and focus, we are looking at DECam CP product
exts = cls._getScienceImages(hdulist)
if len(exts) > 60 and len(exts) <= 62:
canProcess = canProcess and True
else:
canProcess = canProcess and False
if returnHdulist:
return canProcess, hdulist
return canProcess
@classmethod
def normalizeImage(cls, image):
# astropy equalizer averages 4.3 seconds, the PIL approach can bring
# that down to cca 0.13. Without normalization the time is 0.04s
avg, std = image.mean(), image.std()
image[image>(avg + 0.5*std)] = avg + 0.5*std
image[image<(avg - 0.5*std)] = avg - 0.5*std
image = image/image.max()
image = (image*255).astype(np.uint8)
image = Image.fromarray(image, "L")
image = ImageOps.equalize(image, mask=None)
return image
def _createFocalPlaneImage(self, focalPlane):
for ext in self.exts:
# no matter how painful this is, if we don't, normalize will mutate
# in science data in place....
image = ext.data.copy()
image = self.normalizeImage(image)
# TODO: test here if the step-vise resizing is faster...
image = image.resize((focalPlane.scaledY, focalPlane.scaledX),
Image.ANTIALIAS)
focalPlane.add_image(image, ext.header["DETPOS"])
return focalPlane
def createThumbnails(self, scaling=(4, 10)):
xdim = self.exts[0].header["NAXIS2"]
ydim = self.exts[0].header["NAXIS1"]
largePlane = DecamFocalPlane(scaling[0], (xdim, ydim))
smallPlane = DecamFocalPlane(scaling[1], (xdim, ydim))
# TODO: a note to fix os.path dependency when transitioning to S3
# and fix saving method from plt to boto3
smallPath = os.path.join(self.media_root, self.uploadedFile.basename+'_plane_small.jpg')
largePath = os.path.join(self.media_root, self.uploadedFile.basename+'_plane_large.jpg')
smallThumb = self._createFocalPlaneImage(smallPlane)
self._storeThumbnail(smallThumb.planeImage.T, savepath=smallPath)
# due to potential size of these images immediately release memory
del smallThumb
largeThumb = self._createFocalPlaneImage(largePlane)
self._storeThumbnail(largeThumb.planeImage.T, savepath=largePath)
del largeThumb
return Thumbnails(large=largePath, small=smallPath)
| [
"PIL.Image.fromarray",
"upload.models.Thumbnails",
"os.path.join",
"numpy.zeros",
"PIL.ImageOps.equalize"
] | [((12304, 12331), 'PIL.Image.fromarray', 'Image.fromarray', (['image', '"""L"""'], {}), "(image, 'L')\n", (12319, 12331), False, 'from PIL import Image, ImageOps\n'), ((12348, 12383), 'PIL.ImageOps.equalize', 'ImageOps.equalize', (['image'], {'mask': 'None'}), '(image, mask=None)\n', (12365, 12383), False, 'from PIL import Image, ImageOps\n'), ((13390, 13468), 'os.path.join', 'os.path.join', (['self.media_root', "(self.uploadedFile.basename + '_plane_small.jpg')"], {}), "(self.media_root, self.uploadedFile.basename + '_plane_small.jpg')\n", (13402, 13468), False, 'import os\n'), ((13487, 13565), 'os.path.join', 'os.path.join', (['self.media_root', "(self.uploadedFile.basename + '_plane_large.jpg')"], {}), "(self.media_root, self.uploadedFile.basename + '_plane_large.jpg')\n", (13499, 13565), False, 'import os\n'), ((13973, 14017), 'upload.models.Thumbnails', 'Thumbnails', ([], {'large': 'largePath', 'small': 'smallPath'}), '(large=largePath, small=smallPath)\n', (13983, 14017), False, 'from upload.models import Thumbnails\n'), ((10137, 10233), 'numpy.zeros', 'np.zeros', (['(self.nRows * self.scaledGappedX, self.nCols * self.scaledGappedY)'], {'dtype': 'np.uint8'}), '((self.nRows * self.scaledGappedX, self.nCols * self.scaledGappedY),\n dtype=np.uint8)\n', (10145, 10233), True, 'import numpy as np\n')] |
"""
Class for estimating the model distribution of an RBM with contrastive divergence algorithm.
"""
import numpy as np
from sampling.model import Model
from sampling.utils import sample, sigmoid
class ModelCD(Model):
"""
Base class for model
"""
def __init__(self, cd_iter, seed=None):
super(ModelCD, self).__init__("model_cd")
self.cd_iter = cd_iter
self.generator = np.random.default_rng(seed)
self.dataset = None
def set_dataset(self, dataset):
"""
Set the base dataset for the initial states of the CD algorithm
"""
self.dataset = dataset
def estimate_model(self):
"""
Estimate the model distribution by cd algorithm.
"""
if self.dataset is None:
raise Exception("Dataset not set for the contrastive divergence!")
vis_state = np.copy(self.dataset)
hid_state = self.activate_hidden(vis_state)
for i in range(self.cd_iter):
vis_state = self.activate_visible(hid_state, True)
if i + 1 == self.cd_iter:
hid_state = self.activate_hidden(vis_state, True)
else:
hid_state = self.activate_hidden(vis_state)
return [vis_state, hid_state]
def activate_hidden(self, values, exact = False):
"""
Return sampled hidden units for visible values
"""
if exact:
return sigmoid(np.dot(values, self.weights) + self.hidden)
else:
return sample(sigmoid(np.dot(values, self.weights) + self.hidden))
def activate_visible(self, values, exact = False):
"""
Return sampled visible units for hidden values
"""
if exact:
return sigmoid(np.dot(values, self.weights.transpose()) + self.visible)
else:
return sample(sigmoid(np.dot(values, self.weights.transpose()) + self.visible))
| [
"numpy.copy",
"numpy.dot",
"numpy.random.default_rng"
] | [((413, 440), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (434, 440), True, 'import numpy as np\n'), ((878, 899), 'numpy.copy', 'np.copy', (['self.dataset'], {}), '(self.dataset)\n', (885, 899), True, 'import numpy as np\n'), ((1455, 1483), 'numpy.dot', 'np.dot', (['values', 'self.weights'], {}), '(values, self.weights)\n', (1461, 1483), True, 'import numpy as np\n'), ((1547, 1575), 'numpy.dot', 'np.dot', (['values', 'self.weights'], {}), '(values, self.weights)\n', (1553, 1575), True, 'import numpy as np\n')] |
# coding: utf-8
import numpy as np
from time import time
import sys
def find_primes(maxN):
x1 = np.arange(maxN + 1, dtype=np.int64)
b1 = np.zeros(np.shape(x1), dtype=np.bool)
b1[x1 > 1] = True
maxN2 = np.int64(maxN**(0.5) + 1)
for n in range(2, maxN2 + 1):
b1[2*n::n] = False
return x1[b1]
def prime_factors(N):
pNums = find_primes(N//2 + 1)
pExps = np.zeros(np.shape(pNums), dtype=int)
max_exp = int(np.log(N)/np.log(2))
for n in range(1, max_exp + 1):
pExps[np.mod(N, pNums**n) == 0] = n
pN = pNums[pExps > 0]
pE = pExps[pExps > 0]
if 0 < np.size(pN) < 10:
disp_pf(N, pN, pE)
elif np.size(pN) == 0:
print('{N} is a prime number!'.format(N=N))
else:
pass
return pN, pE
def find_lcm(num_array):
Nmax = max(num_array)
pNums = find_primes(Nmax + 1)
pExps = np.zeros(np.shape(pNums), dtype=int)
for N in num_array:
pExps2 = np.zeros(np.shape(pNums), dtype=int)
if N in pNums:
pExps2[pNums == N] = 1
else:
max_exp = int(np.log(N)/np.log(2))
for n in range(1, max_exp + 1):
pExps2[np.mod(N, pNums**n) == 0] = n
pExps = np.maximum(pExps, pExps2)
pN = pNums[pExps > 0]
pE = pExps[pExps > 0]
outN = np.product(pN**pE)
if 0 < np.size(pN) < 10:
disp_pf(outN, pN, pE)
else:
pass
return outN, pN, pE
def disp_pf(N, pNums, pExps):
factors1 = []
for n, e in zip(pNums, pExps):
if e > 1:
factor = '{n:,d}^{e}'.format(n=n, e=e)
else:
factor = '{n:,d}'.format(n=n)
factors1.append(factor)
print('\n{N:,d} = '.format(N=N) + ' * '.join(factors1))
def test_fun1(upper_limit):
t0 = time()
prime_array = find_primes(upper_limit)
t1 = time()
Nprime = np.size(prime_array)
print('\nFound {0:,d} prime numbers in {1:.4e} sec'.format(Nprime, t1 - t0))
print('\nOr, ~{0:,d} prime numbers per second'.format(int(Nprime/(t1 - t0))))
print('\nPython version: {0}'.format(sys.version))
return prime_array
if __name__ == "__main__":
out1 = test_fun1(np.int64(1e+8))
# num_array = list(range(2, 11))
# lcm, pN, pE = find_lcm(num_array)
| [
"numpy.product",
"numpy.int64",
"numpy.size",
"numpy.log",
"numpy.mod",
"numpy.maximum",
"numpy.shape",
"time.time",
"numpy.arange"
] | [((107, 142), 'numpy.arange', 'np.arange', (['(maxN + 1)'], {'dtype': 'np.int64'}), '(maxN + 1, dtype=np.int64)\n', (116, 142), True, 'import numpy as np\n'), ((227, 252), 'numpy.int64', 'np.int64', (['(maxN ** 0.5 + 1)'], {}), '(maxN ** 0.5 + 1)\n', (235, 252), True, 'import numpy as np\n'), ((1359, 1379), 'numpy.product', 'np.product', (['(pN ** pE)'], {}), '(pN ** pE)\n', (1369, 1379), True, 'import numpy as np\n'), ((1849, 1855), 'time.time', 'time', ([], {}), '()\n', (1853, 1855), False, 'from time import time\n'), ((1910, 1916), 'time.time', 'time', ([], {}), '()\n', (1914, 1916), False, 'from time import time\n'), ((1931, 1951), 'numpy.size', 'np.size', (['prime_array'], {}), '(prime_array)\n', (1938, 1951), True, 'import numpy as np\n'), ((162, 174), 'numpy.shape', 'np.shape', (['x1'], {}), '(x1)\n', (170, 174), True, 'import numpy as np\n'), ((419, 434), 'numpy.shape', 'np.shape', (['pNums'], {}), '(pNums)\n', (427, 434), True, 'import numpy as np\n'), ((635, 646), 'numpy.size', 'np.size', (['pN'], {}), '(pN)\n', (642, 646), True, 'import numpy as np\n'), ((920, 935), 'numpy.shape', 'np.shape', (['pNums'], {}), '(pNums)\n', (928, 935), True, 'import numpy as np\n'), ((1267, 1292), 'numpy.maximum', 'np.maximum', (['pExps', 'pExps2'], {}), '(pExps, pExps2)\n', (1277, 1292), True, 'import numpy as np\n'), ((1390, 1401), 'numpy.size', 'np.size', (['pN'], {}), '(pN)\n', (1397, 1401), True, 'import numpy as np\n'), ((2251, 2272), 'numpy.int64', 'np.int64', (['(100000000.0)'], {}), '(100000000.0)\n', (2259, 2272), True, 'import numpy as np\n'), ((466, 475), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (472, 475), True, 'import numpy as np\n'), ((476, 485), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (482, 485), True, 'import numpy as np\n'), ((691, 702), 'numpy.size', 'np.size', (['pN'], {}), '(pN)\n', (698, 702), True, 'import numpy as np\n'), ((1000, 1015), 'numpy.shape', 'np.shape', (['pNums'], {}), '(pNums)\n', (1008, 1015), True, 'import numpy as np\n'), ((539, 560), 'numpy.mod', 'np.mod', (['N', '(pNums ** n)'], {}), '(N, pNums ** n)\n', (545, 560), True, 'import numpy as np\n'), ((1130, 1139), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (1136, 1139), True, 'import numpy as np\n'), ((1140, 1149), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1146, 1149), True, 'import numpy as np\n'), ((1220, 1241), 'numpy.mod', 'np.mod', (['N', '(pNums ** n)'], {}), '(N, pNums ** n)\n', (1226, 1241), True, 'import numpy as np\n')] |
import numpy as np
from pygesture.pipeline import PipelineBlock
class FeatureExtractor(PipelineBlock):
def __init__(self, features, n_channels):
super(FeatureExtractor, self).__init__()
self.features = features
self.n_channels = n_channels
self.n_features = n_channels*sum(
[f.dim_per_channel for f in self.features])
self.output = np.zeros(self.n_channels*self.n_features)
def process(self, data):
# TODO use pre-allocated output array instead of hstack
return np.hstack([f.compute(data) for f in self.features])
def __repr__(self):
return "%s.%s(%s)" % (
self.__class__.__module__,
self.__class__.__name__,
str([str(f) for f in self.features])
)
class Feature(object):
def __repr__(self):
return "%s.%s()" % (
self.__class__.__module__,
self.__class__.__name__
)
class MAV(Feature):
"""
Calculates the mean absolute value of a signal.
"""
def __init__(self):
self.dim_per_channel = 1
def compute(self, x):
y = np.mean(np.absolute(x), axis=0)
return y
class WL(Feature):
"""
Calculates the waveform length of a signal. Waveform length is just the
sum of the absolute value of all deltas (between adjacent taps) of a
signal.
"""
def __init__(self):
self.dim_per_channel = 1
def compute(self, x):
y = np.sum(np.absolute(np.diff(x, axis=0)), axis=0)
return y
class ZC(Feature):
"""
Calculates the number of zero crossings in a signal, subject to a threshold
for discarding noisy fluctuations above and below zero.
Parameters
----------
thresh : float (default=0.0)
The threshold for discriminating true zero crossings from those caused
by noise.
use_sm : bool (default=False)
Specifies if spectral moments should be used for the computation. This
is much faster, but the threshold is not taken into account, making it
potentially affected by noise.
"""
def __init__(self, thresh=0.0, use_sm=False):
self.dim_per_channel = 1
self.thresh = thresh
self.use_sm = use_sm
def compute(self, x):
if self.use_sm:
y = np.sqrt(
SpectralMoment(2).compute(x) / SpectralMoment(0).compute(x))
else:
xrows, xcols = x.shape
y = np.zeros(xcols)
for i in range(xcols):
for j in range(1, xrows):
if ((x[j, i] > 0 and x[j-1, i] < 0) or
(x[j, i] < 0 and x[j-1, i] > 0)):
if np.absolute(x[j, i] - x[j-1, i]) > self.thresh:
y[i] += 1
return y
class SSC(Feature):
"""
Calculates the number of slope sign changes in a signal, subject to a
threshold for discarding noisy fluctuations.
Parameters
----------
thresh : float (default=0.0)
The threshold for discriminating true slope sign changes from those
caused by noise.
use_sm : bool (deafult=False)
Specifies if spectral moments should be used for the computation. This
is much faster, but the threshold is not taken into account, making it
potentially affected by noise.
"""
def __init__(self, thresh=0.0, use_sm=False):
self.dim_per_channel = 1
self.thresh = thresh
self.use_sm = use_sm
def compute(self, x):
if self.use_sm:
y = np.sqrt(
SpectralMoment(4).compute(x) / SpectralMoment(2).compute(x))
else:
xrows, xcols = x.shape
y = np.zeros(xcols)
for i in range(xcols):
for j in range(1, xrows-1):
if ((x[j, i] > x[j-1, i] and x[j, i] > x[j+1, i]) or
(x[j, i] < x[j-1, i] and x[j, i] < x[j+1, i])):
if (np.absolute(x[j, i]-x[j-1, i]) > self.thresh or
np.absolute(x[j, i]-x[j+1, i]) > self.thresh):
y[i] += 1
return y
class SpectralMoment(Feature):
"""
Calculates the nth-order spectral moment.
Parameters
----------
n : int
The spectral moment order. Should be even and greater than or equal to
zero.
"""
def __init__(self, n):
self.dim_per_channel = 1
self.n = n
def compute(self, x):
xrows, xcols = x.shape
y = np.zeros(xcols)
if self.n % 2 != 0:
return y
# special case, zeroth order moment is just the power
if self.n == 0:
y = np.sum(np.multiply(x, x), axis=0)
else:
y = SpectralMoment(0).compute(np.diff(x, int(self.n/2), axis=0))
return y
class KhushabaSet(Feature):
"""
Calcuates a set of 5 features introduced by Khushaba et al. at ISCIT 2012.
(see reference [1]). They are:
1. log of the 0th order spectral moment
2. log of normalized 2nd order spectral moment (m2 / m0^u)
3. log of normalized 4th order spectral moment (m4 / m0^(u+2))
4. log of the sparseness (see paper)
5. log of the irregularity factor / waveform length (see paper)
Parameters
----------
u : int (default=0)
Used in the exponent of m0 for normalizing higher-orer moments
References
----------
.. [1] `<NAME>, <NAME>, and <NAME>, "Time-dependent spectral
features for limb position invariant myoelectric pattern recognition,"
Communications and Information Technologies (ISCIT), 2012 International
Symposium on, 2012.`
"""
def __init__(self, u=0):
self.dim_per_channel = 5
self.u = u
def compute(self, x):
xrows, xcols = x.shape
# TODO fill this instead of using hstack
# y = np.zeros(self.dim_per_channel*xcols)
m0 = SpectralMoment(0).compute(x)
m2 = SpectralMoment(2).compute(x)
m4 = SpectralMoment(4).compute(x)
S = m0 / np.sqrt(np.abs((m0-m2)*(m0-m4)))
IF = np.sqrt(m2**2 / (m0*m4))
return np.hstack((
np.log(m0),
np.log(m2 / m0**2),
np.log(m4 / m0**4),
np.log(S),
np.log(IF / WL().compute(x))))
class SampEn(Feature):
"""
Calculates the sample entropy of time series data. See reference [1].
The basic idea is to take all possible m-length subsequences of the
time series and count the number of these subsequences whose Chebyshev
distance from all other subsequences is less than the tolerance parameter,
r (self-matches excluded). This is repeated for (m+1)-length subsequences,
and SampEn is given by the log of the number of m-length matches divided
by the number of (m+1)-length matches.
This feature can have some issues if the tolerance r is too low and/or the
subsequence length m is too high. A typical value for r is apparently
0.2*std(x).
Parameters
----------
m : int
Length of sequences to compare (>1)
r : float
Tolerance for counting matches.
References
----------
.. [1] `<NAME> and <NAME>, "Physiological time series
analysis using approximate entropy and sample entropy," American
Journal of Physiology -- Heart and Circulatory Physiology, vol. 278
no. 6, 2000.`
"""
def __init__(self, m, r):
self.dim_per_channel = 1
self.m = m
self.r = r
def compute(self, x):
xrows, xcols = x.shape
y = np.zeros(xcols)
m = self.m
N = xrows
for c in range(xcols):
correl = np.zeros(2) + np.finfo(np.float).eps
xmat = np.zeros((m+1, N-m+1))
for i in range(m):
xmat[i, :] = x[i:N-m+i+1, c]
# handle last row separately
xmat[m, :-1] = x[m:N, c]
xmat[-1, -1] = 10*np.max(xmat) # something that won't get matched
for mc in [m, m+1]:
count = 0
for i in range(N-mc-1):
dist = np.max(
np.abs(xmat[:mc, i+1:] - xmat[:mc, i][:, np.newaxis]),
axis=0)
count += np.sum(dist <= self.r)
correl[mc-m] = count
y[c] = np.log(correl[0] / correl[1])
return y
| [
"numpy.abs",
"numpy.multiply",
"numpy.sqrt",
"numpy.absolute",
"numpy.log",
"numpy.diff",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"numpy.finfo"
] | [((393, 436), 'numpy.zeros', 'np.zeros', (['(self.n_channels * self.n_features)'], {}), '(self.n_channels * self.n_features)\n', (401, 436), True, 'import numpy as np\n'), ((4575, 4590), 'numpy.zeros', 'np.zeros', (['xcols'], {}), '(xcols)\n', (4583, 4590), True, 'import numpy as np\n'), ((6191, 6219), 'numpy.sqrt', 'np.sqrt', (['(m2 ** 2 / (m0 * m4))'], {}), '(m2 ** 2 / (m0 * m4))\n', (6198, 6219), True, 'import numpy as np\n'), ((7682, 7697), 'numpy.zeros', 'np.zeros', (['xcols'], {}), '(xcols)\n', (7690, 7697), True, 'import numpy as np\n'), ((1146, 1160), 'numpy.absolute', 'np.absolute', (['x'], {}), '(x)\n', (1157, 1160), True, 'import numpy as np\n'), ((2477, 2492), 'numpy.zeros', 'np.zeros', (['xcols'], {}), '(xcols)\n', (2485, 2492), True, 'import numpy as np\n'), ((3740, 3755), 'numpy.zeros', 'np.zeros', (['xcols'], {}), '(xcols)\n', (3748, 3755), True, 'import numpy as np\n'), ((7845, 7873), 'numpy.zeros', 'np.zeros', (['(m + 1, N - m + 1)'], {}), '((m + 1, N - m + 1))\n', (7853, 7873), True, 'import numpy as np\n'), ((8457, 8486), 'numpy.log', 'np.log', (['(correl[0] / correl[1])'], {}), '(correl[0] / correl[1])\n', (8463, 8486), True, 'import numpy as np\n'), ((1501, 1519), 'numpy.diff', 'np.diff', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1508, 1519), True, 'import numpy as np\n'), ((4751, 4768), 'numpy.multiply', 'np.multiply', (['x', 'x'], {}), '(x, x)\n', (4762, 4768), True, 'import numpy as np\n'), ((6153, 6182), 'numpy.abs', 'np.abs', (['((m0 - m2) * (m0 - m4))'], {}), '((m0 - m2) * (m0 - m4))\n', (6159, 6182), True, 'import numpy as np\n'), ((6256, 6266), 'numpy.log', 'np.log', (['m0'], {}), '(m0)\n', (6262, 6266), True, 'import numpy as np\n'), ((6280, 6300), 'numpy.log', 'np.log', (['(m2 / m0 ** 2)'], {}), '(m2 / m0 ** 2)\n', (6286, 6300), True, 'import numpy as np\n'), ((6312, 6332), 'numpy.log', 'np.log', (['(m4 / m0 ** 4)'], {}), '(m4 / m0 ** 4)\n', (6318, 6332), True, 'import numpy as np\n'), ((6344, 6353), 'numpy.log', 'np.log', (['S'], {}), '(S)\n', (6350, 6353), True, 'import numpy as np\n'), ((7788, 7799), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (7796, 7799), True, 'import numpy as np\n'), ((8052, 8064), 'numpy.max', 'np.max', (['xmat'], {}), '(xmat)\n', (8058, 8064), True, 'import numpy as np\n'), ((7802, 7820), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (7810, 7820), True, 'import numpy as np\n'), ((8376, 8398), 'numpy.sum', 'np.sum', (['(dist <= self.r)'], {}), '(dist <= self.r)\n', (8382, 8398), True, 'import numpy as np\n'), ((8259, 8314), 'numpy.abs', 'np.abs', (['(xmat[:mc, i + 1:] - xmat[:mc, i][:, np.newaxis])'], {}), '(xmat[:mc, i + 1:] - xmat[:mc, i][:, np.newaxis])\n', (8265, 8314), True, 'import numpy as np\n'), ((2718, 2752), 'numpy.absolute', 'np.absolute', (['(x[j, i] - x[j - 1, i])'], {}), '(x[j, i] - x[j - 1, i])\n', (2729, 2752), True, 'import numpy as np\n'), ((4012, 4046), 'numpy.absolute', 'np.absolute', (['(x[j, i] - x[j - 1, i])'], {}), '(x[j, i] - x[j - 1, i])\n', (4023, 4046), True, 'import numpy as np\n'), ((4092, 4126), 'numpy.absolute', 'np.absolute', (['(x[j, i] - x[j + 1, i])'], {}), '(x[j, i] - x[j + 1, i])\n', (4103, 4126), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.fft
import numpy as np
class FourierLowK(nn.Module):
def __init__(self,realshape,k_cut_dimless=4):
super().__init__()
assert all([d%2==0 for d in realshape]),"Only Even dimensions allowed"
self.realshape=realshape
self.scale=np.prod(realshape)
shape=list(realshape)
shape[-1]=shape[-1]//2+1
self.shape=tuple(shape)
self.n_d=len(shape)
mids=np.array(shape)//2
mids[-1]=shape[-1]-1
self.mids=tuple(mids)
self.init_feed_ind(k_cut_dimless)
def hermitian_symmetric(self,f_k_half):
if self.n_d==2:
f_k_half[self.mids[0]+1:,0]=torch.flip(f_k_half[1:self.mids[0],0],dims=(0,)).conj()
f_k_half[self.mids[0]+1:,self.mids[1]]=torch.flip(f_k_half[1:self.mids[0],self.mids[1]],dims=(0,)).conj()
elif self.n_d==3:
f_k_half[self.mids[0]+1:,0,0]=torch.flip(f_k_half[1:self.mids[0],0,0],dims=(0,)).conj().clone()
f_k_half[self.mids[0]+1:,0,self.mids[2]]=torch.flip(f_k_half[1:self.mids[0],0,self.mids[2]],dims=(0,)).conj().clone()
f_k_half[self.mids[0]+1:,self.mids[1],0]=torch.flip(f_k_half[1:self.mids[0],self.mids[1],0],dims=(0,)).conj().clone()
f_k_half[self.mids[0]+1:,self.mids[1],self.mids[2]]=torch.flip(f_k_half[1:self.mids[0],self.mids[1],self.mids[2]],dims=(0,)).conj().clone()
f_k_half[0,self.mids[1]+1:,0]=torch.flip(f_k_half[0,1:self.mids[1],0],dims=(0,)).conj().clone()
f_k_half[0,self.mids[1]+1:,self.mids[2]]=torch.flip(f_k_half[0,1:self.mids[1],self.mids[2]],dims=(0,)).conj().clone()
f_k_half[self.mids[0],self.mids[1]+1:,0]=torch.flip(f_k_half[self.mids[0],1:self.mids[1],0],dims=(0,)).conj().clone()
f_k_half[self.mids[0],self.mids[1]+1:,self.mids[2]]=torch.flip(f_k_half[self.mids[0],1:self.mids[1],self.mids[2]],dims=(0,)).conj().clone()
f_k_half[self.mids[0]+1:,self.mids[1]+1:,0]=torch.flip(f_k_half[1:self.mids[0],1:self.mids[1],0],dims=(0,1)).conj().clone()
f_k_half[self.mids[0]+1:,1:self.mids[1]:,0]=torch.flip(f_k_half[1:self.mids[0],self.mids[1]+1:,0],dims=(0,1)).conj().clone()
f_k_half[self.mids[0]+1:,self.mids[1]+1:,self.mids[2]]=torch.flip(f_k_half[1:self.mids[0],1:self.mids[1],self.mids[2]],dims=(0,1)).conj().clone()
f_k_half[self.mids[0]+1:,1:self.mids[1]:,self.mids[2]]=torch.flip(f_k_half[1:self.mids[0],self.mids[1]+1:,self.mids[2]],dims=(0,1)).conj().clone()
else:
assert False, "n_d=2 or 3"
return f_k_half
def init_feed_ind(self,k_cut_dimless):
ks=[np.fft.fftfreq(d,1/d) for d in self.shape[:-1]]#descrete fourier transform sample frequencies
ks.append(np.arange(self.shape[-1]))
self.kgrid=np.array(np.meshgrid(*ks,indexing="ij"))#grids of the fourier space
self.ksqabs=np.sum(self.kgrid**2,axis=0)
arrs=[torch.arange(d) for d in self.shape]
unique_holders=torch.stack(torch.meshgrid(*arrs,indexing="ij"))#grids of the image
holders_hs=torch.stack([self.hermitian_symmetric(unique_holder.clone()) for unique_holder in unique_holders])
same=torch.all(holders_hs==unique_holders,dim=0)
rem=same*(self.ksqabs<=k_cut_dimless**2)
feed_ind=torch.stack([rem,rem])
if self.n_d==2:
feed_ind[1,0,0]=0
feed_ind[1,self.mids[0],0]=0
feed_ind[1,0,self.mids[1]]=0
feed_ind[1,self.mids[0],self.mids[1]]=0
elif self.n_d==3:
feed_ind[1, 0, 0, 0]=0
feed_ind[1, 0, 0,self.mids[2]]=0
feed_ind[1, 0,self.mids[1], 0]=0
feed_ind[1, 0,self.mids[1],self.mids[2]]=0
feed_ind[1,self.mids[0], 0, 0]=0
feed_ind[1,self.mids[0], 0,self.mids[2]]=0
feed_ind[1,self.mids[0],self.mids[1], 0]=0
feed_ind[1,self.mids[0],self.mids[1],self.mids[2]]=0
else:
assert False, "n_d=2 or 3"
self.feed_ind=feed_ind.to(dtype=torch.bool)
self.feed_ind.requires_grad=False
self.dim=torch.sum(feed_ind).item()#converts the sum of all elements into python scaler
def forward(self,x):
x=x*self.scale
b,c=x.size(0),x.size(1)
x=x.reshape(b*c,-1)
arr=torch.zeros((x.size(0),2,*self.shape),dtype=torch.float32,device=x.device)
arrcomps=torch.zeros((x.size(0),*self.shape),dtype=torch.complex64,device=x.device)
arr[:,self.feed_ind]=x
arrcomps.real+=arr[:,0]
arrcomps.imag+=arr[:,1]
outs=[]
for arrcomp in arrcomps:
outs.append(self.hermitian_symmetric(arrcomp.clone())[None])
x=torch.cat(outs,dim=0)
s=list(x.size())
self.f_k=x.reshape(b,c,*s[1:])
return torch.fft.irfftn(self.f_k,s=self.realshape)#computes n dimensional inverse descrete fourier transform of the real input- imaginary components ignored
def to(self,**kwargs):
self.feed_ind=self.feed_ind.to(**kwargs)
def get_deformation_lowk(ptfrom,ptto,sh,k_cut_dimless=2.5,lr=0.1,iterations=200,frac=0.5,lambda_div=1,scale=(1,1,1),at_least=8,device="cpu",verbose=False,return_losses=False,**kwargs):
vecs=(ptto-ptfrom)
valids=np.nonzero(np.all(np.isnan(vecs)==0,axis=1))[0]
if len(valids)<at_least:
return None,"Not enough points"
vecs=vecs[valids][:,:]
locs=ptfrom[valids][:,:]
W,H,D=sh
f=FourierLowK((W,H,D),k_cut_dimless=k_cut_dimless)
locs_gridded=2*(torch.tensor(locs)[:,None,None,None,:]/(np.array([W,H,D])[None,None,None,None,:]-1))-1#MB: I rhink with None we add unit axis
locs_gridded=locs_gridded[...,[2,1,0]].to(device=device,dtype=torch.float32)
vecs_target=torch.tensor(vecs).to(device=device,dtype=torch.float32)
x=torch.zeros(1,3,f.dim)
#initialize the displacement field as the mean displacement
x[0,:,0]=torch.tensor(np.mean(vecs,axis=0))
x=x.to(device=device)
x=torch.nn.Parameter(x)
opt=torch.optim.Adam([x],lr=lr)
if return_losses:
losses=[]
for iters in range(iterations):
deformation=f(x)
#deformation=torch.mean(deformation,dim=4,keepdim=True).repeat(1,1,1,1,deformation.size(4))#mean over z axis
vecs_sampled=torch.nn.functional.grid_sample(deformation.repeat(locs_gridded.size(0),1,1,1,1),locs_gridded, mode='bilinear', padding_mode='border',align_corners=True)[:,:,0,0,0]
loss=torch.nn.functional.l1_loss(vecs_sampled,vecs_target)#this is where the points that are mapped to each other are included
gx=deformation[:,0,2:,1:-1,1:-1]-deformation[:,0,:-2,1:-1,1:-1]#how far is each pixel from the previous one in the direction of x
gy=deformation[:,1,1:-1,2:,1:-1]-deformation[:,1,1:-1,:-2,1:-1]
gz=deformation[:,2,1:-1,1:-1,2:]-deformation[:,2,1:-1,1:-1,:-2]
divergence=scale[0]*gx+scale[1]*gy+scale[2]*gz
loss+=lambda_div*torch.mean(torch.abs(divergence))
#print(f.kgrid.shape)
opt.zero_grad()
loss.backward()
opt.step()
if return_losses:
losses.append(loss.item())
if verbose:
print(loss.item())
if return_losses:
return frac*deformation[0],losses,"success"
return frac*deformation[0],"success"
def get_deformation_tps(ptfrom,ptto,grid,scale=(1,1,1),at_least=5,epsilon=1e-8,device="cpu"):
vecs=(ptto-ptfrom)
valids=np.nonzero(np.all(np.isnan(vecs)==0,axis=1))[0]
if len(valids)<at_least:
return None,"Not enough points"
vecs=vecs[valids][:,:]
locs=ptfrom[valids][:,:]
vecs=torch.tensor(vecs,device=device)
locs=torch.tensor(locs,device=device)
if not torch.is_tensor(grid):
grid=torch.tensor(grid,device=device)
gridpts=grid.reshape(-1,3)
weights=1/(torch.sqrt(torch.sum(( (gridpts[:,None,:2]-locs[None,:,:2])*torch.tensor(scale,dtype=locs.dtype,device=device)[None,None,:2] )**2,axis=2))+epsilon)**2
norms=weights.sum(1)
deformation=(weights@vecs)/norms[:,None]
return deformation.reshape(grid.shape).movedim(-1,0),"success"
def deform(image,deformation,mask=None):
C,W,H,D=image.shape
grid=torch.stack(torch.meshgrid(*[torch.arange(s) for s in (W,H,D)],indexing="ij"),dim=0)
grid=grid.to(dtype=deformation.dtype,device=deformation.device)
grid+=deformation
coords=grid.reshape(3,-1).T
image_def=get_at_coords(image,coords)
image_def=image_def.reshape(W,H,D,C).permute(3,0,1,2)
if mask is not None:
mask_def=get_at_coords(mask,coords,ismask=True)
mask_def=mask_def.reshape(W,H,D)
return image_def,mask_def
return image_def
def get_at_coords(image,coords,ismask=False):
if ismask:
image=image[None]
C,W,H,D=image.shape
coords=coords[:,None,None,:].clone()
coords[...,0]/=(W-1)/2
coords[...,0]-=1
coords[...,1]/=(H-1)/2
coords[...,1]-=1
coords[...,2]/=(D-1)/2
coords[...,2]-=1
coords=coords[...,[2,1,0]]
if ismask:
res=torch.nn.functional.grid_sample(image[None].to(torch.float32),coords[None], mode='bilinear', padding_mode="zeros",align_corners=True)[0].to(dtype=image.dtype)
return res[0,:,0,0]
else:
res=torch.nn.functional.grid_sample(image[None],coords[None], mode='bilinear', padding_mode="zeros",align_corners=True)[0]
return res[:,:,0,0].T
def invert_deformation(deformation,n_iter=10):
dim,W,H,D=deformation.shape
inv_deformation=torch.zeros_like(deformation)
grid=torch.stack(torch.meshgrid(*[torch.arange(s) for s in (W,H,D)],indexing="ij"),dim=0)
grid=grid.to(dtype=deformation.dtype,device=deformation.device)
coords=grid.reshape(3,-1).T
for i in range(n_iter):
inv_deformation=-get_at_coords(deformation,coords+get_at_coords(inv_deformation,coords))
inv_deformation=inv_deformation.reshape(W,H,D,dim).permute(3,0,1,2)
return inv_deformation
def no():
pass
| [
"numpy.prod",
"numpy.array",
"torch.sum",
"torch.flip",
"numpy.arange",
"torch.arange",
"numpy.mean",
"torch.nn.functional.grid_sample",
"torch.fft.irfftn",
"torch.meshgrid",
"numpy.meshgrid",
"torch.zeros_like",
"torch.nn.functional.l1_loss",
"torch.abs",
"torch.is_tensor",
"numpy.isn... | [((5870, 5894), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', 'f.dim'], {}), '(1, 3, f.dim)\n', (5881, 5894), False, 'import torch\n'), ((6037, 6058), 'torch.nn.Parameter', 'torch.nn.Parameter', (['x'], {}), '(x)\n', (6055, 6058), False, 'import torch\n'), ((6067, 6095), 'torch.optim.Adam', 'torch.optim.Adam', (['[x]'], {'lr': 'lr'}), '([x], lr=lr)\n', (6083, 6095), False, 'import torch\n'), ((7670, 7703), 'torch.tensor', 'torch.tensor', (['vecs'], {'device': 'device'}), '(vecs, device=device)\n', (7682, 7703), False, 'import torch\n'), ((7712, 7745), 'torch.tensor', 'torch.tensor', (['locs'], {'device': 'device'}), '(locs, device=device)\n', (7724, 7745), False, 'import torch\n'), ((9529, 9558), 'torch.zeros_like', 'torch.zeros_like', (['deformation'], {}), '(deformation)\n', (9545, 9558), False, 'import torch\n'), ((310, 328), 'numpy.prod', 'np.prod', (['realshape'], {}), '(realshape)\n', (317, 328), True, 'import numpy as np\n'), ((2905, 2936), 'numpy.sum', 'np.sum', (['(self.kgrid ** 2)'], {'axis': '(0)'}), '(self.kgrid ** 2, axis=0)\n', (2911, 2936), True, 'import numpy as np\n'), ((3207, 3253), 'torch.all', 'torch.all', (['(holders_hs == unique_holders)'], {'dim': '(0)'}), '(holders_hs == unique_holders, dim=0)\n', (3216, 3253), False, 'import torch\n'), ((3317, 3340), 'torch.stack', 'torch.stack', (['[rem, rem]'], {}), '([rem, rem])\n', (3328, 3340), False, 'import torch\n'), ((4772, 4794), 'torch.cat', 'torch.cat', (['outs'], {'dim': '(0)'}), '(outs, dim=0)\n', (4781, 4794), False, 'import torch\n'), ((4873, 4917), 'torch.fft.irfftn', 'torch.fft.irfftn', (['self.f_k'], {'s': 'self.realshape'}), '(self.f_k, s=self.realshape)\n', (4889, 4917), False, 'import torch\n'), ((5983, 6004), 'numpy.mean', 'np.mean', (['vecs'], {'axis': '(0)'}), '(vecs, axis=0)\n', (5990, 6004), True, 'import numpy as np\n'), ((6512, 6566), 'torch.nn.functional.l1_loss', 'torch.nn.functional.l1_loss', (['vecs_sampled', 'vecs_target'], {}), '(vecs_sampled, vecs_target)\n', (6539, 6566), False, 'import torch\n'), ((7756, 7777), 'torch.is_tensor', 'torch.is_tensor', (['grid'], {}), '(grid)\n', (7771, 7777), False, 'import torch\n'), ((7792, 7825), 'torch.tensor', 'torch.tensor', (['grid'], {'device': 'device'}), '(grid, device=device)\n', (7804, 7825), False, 'import torch\n'), ((465, 480), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (473, 480), True, 'import numpy as np\n'), ((2659, 2683), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['d', '(1 / d)'], {}), '(d, 1 / d)\n', (2673, 2683), True, 'import numpy as np\n'), ((2771, 2796), 'numpy.arange', 'np.arange', (['self.shape[-1]'], {}), '(self.shape[-1])\n', (2780, 2796), True, 'import numpy as np\n'), ((2826, 2857), 'numpy.meshgrid', 'np.meshgrid', (['*ks'], {'indexing': '"""ij"""'}), "(*ks, indexing='ij')\n", (2837, 2857), True, 'import numpy as np\n'), ((2948, 2963), 'torch.arange', 'torch.arange', (['d'], {}), '(d)\n', (2960, 2963), False, 'import torch\n'), ((3020, 3056), 'torch.meshgrid', 'torch.meshgrid', (['*arrs'], {'indexing': '"""ij"""'}), "(*arrs, indexing='ij')\n", (3034, 3056), False, 'import torch\n'), ((5806, 5824), 'torch.tensor', 'torch.tensor', (['vecs'], {}), '(vecs)\n', (5818, 5824), False, 'import torch\n'), ((9283, 9404), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['image[None]', 'coords[None]'], {'mode': '"""bilinear"""', 'padding_mode': '"""zeros"""', 'align_corners': '(True)'}), "(image[None], coords[None], mode='bilinear',\n padding_mode='zeros', align_corners=True)\n", (9314, 9404), False, 'import torch\n'), ((4178, 4197), 'torch.sum', 'torch.sum', (['feed_ind'], {}), '(feed_ind)\n', (4187, 4197), False, 'import torch\n'), ((7007, 7028), 'torch.abs', 'torch.abs', (['divergence'], {}), '(divergence)\n', (7016, 7028), False, 'import torch\n'), ((694, 744), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], 0]'], {'dims': '(0,)'}), '(f_k_half[1:self.mids[0], 0], dims=(0,))\n', (704, 744), False, 'import torch\n'), ((801, 862), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], self.mids[1]]'], {'dims': '(0,)'}), '(f_k_half[1:self.mids[0], self.mids[1]], dims=(0,))\n', (811, 862), False, 'import torch\n'), ((5338, 5352), 'numpy.isnan', 'np.isnan', (['vecs'], {}), '(vecs)\n', (5346, 5352), True, 'import numpy as np\n'), ((5583, 5601), 'torch.tensor', 'torch.tensor', (['locs'], {}), '(locs)\n', (5595, 5601), False, 'import torch\n'), ((7506, 7520), 'numpy.isnan', 'np.isnan', (['vecs'], {}), '(vecs)\n', (7514, 7520), True, 'import numpy as np\n'), ((8264, 8279), 'torch.arange', 'torch.arange', (['s'], {}), '(s)\n', (8276, 8279), False, 'import torch\n'), ((9597, 9612), 'torch.arange', 'torch.arange', (['s'], {}), '(s)\n', (9609, 9612), False, 'import torch\n'), ((5623, 5642), 'numpy.array', 'np.array', (['[W, H, D]'], {}), '([W, H, D])\n', (5631, 5642), True, 'import numpy as np\n'), ((936, 989), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], 0, 0]'], {'dims': '(0,)'}), '(f_k_half[1:self.mids[0], 0, 0], dims=(0,))\n', (946, 989), False, 'import torch\n'), ((1055, 1119), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], 0, self.mids[2]]'], {'dims': '(0,)'}), '(f_k_half[1:self.mids[0], 0, self.mids[2]], dims=(0,))\n', (1065, 1119), False, 'import torch\n'), ((1185, 1249), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], self.mids[1], 0]'], {'dims': '(0,)'}), '(f_k_half[1:self.mids[0], self.mids[1], 0], dims=(0,))\n', (1195, 1249), False, 'import torch\n'), ((1326, 1401), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], self.mids[1], self.mids[2]]'], {'dims': '(0,)'}), '(f_k_half[1:self.mids[0], self.mids[1], self.mids[2]], dims=(0,))\n', (1336, 1401), False, 'import torch\n'), ((1457, 1510), 'torch.flip', 'torch.flip', (['f_k_half[0, 1:self.mids[1], 0]'], {'dims': '(0,)'}), '(f_k_half[0, 1:self.mids[1], 0], dims=(0,))\n', (1467, 1510), False, 'import torch\n'), ((1576, 1640), 'torch.flip', 'torch.flip', (['f_k_half[0, 1:self.mids[1], self.mids[2]]'], {'dims': '(0,)'}), '(f_k_half[0, 1:self.mids[1], self.mids[2]], dims=(0,))\n', (1586, 1640), False, 'import torch\n'), ((1706, 1770), 'torch.flip', 'torch.flip', (['f_k_half[self.mids[0], 1:self.mids[1], 0]'], {'dims': '(0,)'}), '(f_k_half[self.mids[0], 1:self.mids[1], 0], dims=(0,))\n', (1716, 1770), False, 'import torch\n'), ((1847, 1922), 'torch.flip', 'torch.flip', (['f_k_half[self.mids[0], 1:self.mids[1], self.mids[2]]'], {'dims': '(0,)'}), '(f_k_half[self.mids[0], 1:self.mids[1], self.mids[2]], dims=(0,))\n', (1857, 1922), False, 'import torch\n'), ((1992, 2060), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], 1:self.mids[1], 0]'], {'dims': '(0, 1)'}), '(f_k_half[1:self.mids[0], 1:self.mids[1], 0], dims=(0, 1))\n', (2002, 2060), False, 'import torch\n'), ((2128, 2199), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], self.mids[1] + 1:, 0]'], {'dims': '(0, 1)'}), '(f_k_half[1:self.mids[0], self.mids[1] + 1:, 0], dims=(0, 1))\n', (2138, 2199), False, 'import torch\n'), ((2276, 2355), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], 1:self.mids[1], self.mids[2]]'], {'dims': '(0, 1)'}), '(f_k_half[1:self.mids[0], 1:self.mids[1], self.mids[2]], dims=(0, 1))\n', (2286, 2355), False, 'import torch\n'), ((2434, 2521), 'torch.flip', 'torch.flip', (['f_k_half[1:self.mids[0], self.mids[1] + 1:, self.mids[2]]'], {'dims': '(0, 1)'}), '(f_k_half[1:self.mids[0], self.mids[1] + 1:, self.mids[2]], dims=\n (0, 1))\n', (2444, 2521), False, 'import torch\n'), ((7931, 7983), 'torch.tensor', 'torch.tensor', (['scale'], {'dtype': 'locs.dtype', 'device': 'device'}), '(scale, dtype=locs.dtype, device=device)\n', (7943, 7983), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2020 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from bokeh.plotting import figure, output_file, show, curdoc, ColumnDataSource
from bokeh.layouts import column
from bokeh.palettes import Spectral11, viridis
from bokeh.io import export_svgs
import numpy as np
import pandas as pd
def plot_flux_profiles(data,
filename='out.html',
min_max=None,
colors=None, **kwargs):
"""
:param data: pd.DataFrame
:param filename: string
:param min_max: tfa min/max output
:return:
"""
# output to static HTML file
output_file(filename)
# MAKE COLORS
if colors is None:
num_profiles = data.shape[0]
if num_profiles > 11:
colors = viridis(num_profiles)
else:
colors = Spectral11[:num_profiles]
if not min_max is None:
this_min_max = min_max.loc[data.columns]
mid = np.arange(data.columns)
left = mid - 0.2
right = mid + 0.2
p.quad(top=this_min_max['minimum'], bottom=this_min_max['maximum'], left=[1, 2, 3],
right=[1.2, 2.5, 3.7], color="#B3DE69")
source = ColumnDataSource(data)
plot = figure(**kwargs)
| [
"bokeh.plotting.ColumnDataSource",
"bokeh.plotting.figure",
"numpy.arange",
"bokeh.palettes.viridis",
"bokeh.plotting.output_file"
] | [((1418, 1439), 'bokeh.plotting.output_file', 'output_file', (['filename'], {}), '(filename)\n', (1429, 1439), False, 'from bokeh.plotting import figure, output_file, show, curdoc, ColumnDataSource\n'), ((1983, 2005), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['data'], {}), '(data)\n', (1999, 2005), False, 'from bokeh.plotting import figure, output_file, show, curdoc, ColumnDataSource\n'), ((2018, 2034), 'bokeh.plotting.figure', 'figure', ([], {}), '(**kwargs)\n', (2024, 2034), False, 'from bokeh.plotting import figure, output_file, show, curdoc, ColumnDataSource\n'), ((1745, 1768), 'numpy.arange', 'np.arange', (['data.columns'], {}), '(data.columns)\n', (1754, 1768), True, 'import numpy as np\n'), ((1570, 1591), 'bokeh.palettes.viridis', 'viridis', (['num_profiles'], {}), '(num_profiles)\n', (1577, 1591), False, 'from bokeh.palettes import Spectral11, viridis\n')] |
import torch
from torch import nn
import torchvision.models
from torchvision.models.resnet import BasicBlock, Bottleneck
import argparse
import numpy as np
import json
import sys
from types import SimpleNamespace
from cdfsl import make_cdfsl_loader
from core import EvaluateFewShot
from datasets import ImagenetBasedDataset, MiniImageNet
from hebb import stage_two, hebb_rule
from res12 import resnet12
from res10 import res10
from models import conv64
from utils import prepare_meta_batch, make_task_loader
def basic_block_forward(layer, x):
identity = x[-1]
x.append(layer.conv1(x[-1]))
x.append(layer.bn1(x[-1]))
x.append(layer.relu(x[-1]))
x.append(layer.conv2(x[-1]))
x.append(layer.bn2(x[-1]))
if layer.downsample is not None:
identity = layer.downsample(identity)
x.append(x[-1] + identity)
#print('basic_block_forward', len(x)-1)
x.append(layer.relu(x[-1]))
return x
def bottleneck_forward(layer, x):
identity = x[-1]
x.append(layer.conv1(x[-1]))
x.append(layer.bn1(x[-1]))
x.append(layer.relu(x[-1]))
x.append(layer.conv2(x[-1]))
x.append(layer.bn2(x[-1]))
x.append(layer.relu(x[-1]))
x.append(layer.conv3(x[-1]))
x.append(layer.bn3(x[-1]))
if layer.downsample is not None:
identity = layer.downsample(identity)
x.append(x[-1] + identity)
x.append(layer.relu(x[-1]))
return x
def recursive_forward(module, x):
if isinstance(module, BasicBlock):
x = basic_block_forward(module, x)
return x
elif isinstance(module, Bottleneck):
x = bottleneck_forward(module, x)
return x
elif isinstance(module, nn.AdaptiveAvgPool2d):
x.append(module.forward(x[-1]).flatten(1))
return x
elif hasattr(module, '_modules') and module._modules:
for m in module._modules.values():
x = recursive_forward(m, x)
return x
else:
x.append(module.forward(x[-1]))
return x
class ModelWrapper(nn.Module):
def __init__(self, embed):
super(ModelWrapper, self).__init__()
self.embed = embed
self.feature_index = [-1]
def forward(self, x, output_layer=True):
self.x = [x]
self.x = recursive_forward(self.embed, self.x)
self.features = [self.x[fi].flatten(1) for fi in self.feature_index]
#[print(f.shape) for f in self.features]
#exit()
return self.x[-1]
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument('config', type=str)
parser.add_argument('--dataset', type=str)
parser.add_argument('--n', type=int)
parser.add_argument('--k', type=int)
parser.add_argument('--q', type=int)
parser.add_argument('--eval-batches', type=int)
parser.add_argument('--gpu', type=int, nargs='+')
parser.add_argument('--num-workers', type=int)
parser.add_argument('--hebb-lr', type=float)
parser.add_argument('--inner-val-steps', type=int)
parser.add_argument('--meta-batch-size', type=int)
parser.add_argument('--seed', type=int)
parser.add_argument('--feature-index', type=int, nargs='+')
# res18 ablation: -1 59 52 45 38 31
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# override config with cmd line args
config.update(vars(args))
args = SimpleNamespace(**config)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
assert(torch.cuda.is_available())
device = torch.device(args.gpu[0])
eval_few_shot_args = {
'num_tasks': args.eval_batches,
'n_shot': args.n,
'k_way': args.k,
'q_queries': args.q,
'prepare_batch': prepare_meta_batch(
args.n, args.k, args.q, args.meta_batch_size, 2, device),
'inner_train_steps': args.inner_val_steps,
'hebb_lr': args.hebb_lr,
'device': device,
'xdom': hasattr(args, 'dataset') and args.dataset not in ('mini', 'tier'),
}
model = torchvision.models.resnet18(pretrained=True)
#model = torchvision.models.resnet34(pretrained=True)
#model = torchvision.models.resnet50(pretrained=True)
#model = torchvision.models.resnet101(pretrained=True)
#model = torchvision.models.resnet152(pretrained=True)
#model_orig = model # FIXME integrity check
model = ModelWrapper(model)
model.feature_index = args.feature_index #[-1, -2, -3, -8]
model = nn.DataParallel(model, device_ids=args.gpu)
model = model.to(device, dtype=torch.double)
model.eval()
# FIXME integrity check
#model_orig = model_orig.to(device, dtype=torch.double)
#x = torch.rand(1, 3, 224, 244).to(device, dtype=torch.double)
#print((model(x)-model_orig(x)).sum())
#exit()
if (not hasattr(args, 'dataset')) or args.dataset == 'mini':
#test_loader = make_task_loader(MiniImageNet('test', small=False),
# args, train=False, meta=True)
test_loader = make_task_loader(ImagenetBasedDataset('test', small=False),
args, train=False, meta=True)
elif args.dataset == 'tier':
test_loader = make_task_loader(ImagenetBasedDataset('test', small=False, tier=True),
args, train=False, meta=True)
else:
test_loader = make_cdfsl_loader(args.dataset,
args.eval_batches,
args.n,
args.k,
args.q,
small=False)
loss_fn = nn.CrossEntropyLoss().to(device)
evaluator = EvaluateFewShot(eval_fn=hebb_rule,
taskloader=test_loader,
**eval_few_shot_args)
#logs = {'dummy': 0} # it's important to have logs be non-empty
logs = {
'dataset': args.dataset if hasattr(args, 'dataset') else 'miniImagenet',
'feature_index': args.feature_index,
}
evaluator.model = {'sys1': model}
evaluator.optimiser = None
evaluator.loss_fn = loss_fn
evaluator.on_epoch_end(0, logs)
print(logs)
feature_index = 'ensemble' if len(args.feature_index) > 1 else args.feature_index
print(f'res18,{args.dataset},{args.n},{feature_index},{logs[evaluator.metric_name]}')
| [
"torch.manual_seed",
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"types.SimpleNamespace",
"torch.nn.DataParallel",
"cdfsl.make_cdfsl_loader",
"torch.cuda.is_available",
"numpy.random.seed",
"datasets.ImagenetBasedDataset",
"json.load",
"utils.prepare_meta_batch",
"core.EvaluateFewS... | [((2321, 2380), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'argument_default': 'argparse.SUPPRESS'}), '(argument_default=argparse.SUPPRESS)\n', (2344, 2380), False, 'import argparse\n'), ((3155, 3180), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**config)\n', (3170, 3180), False, 'from types import SimpleNamespace\n'), ((3182, 3207), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3196, 3207), True, 'import numpy as np\n'), ((3208, 3236), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3225, 3236), False, 'import torch\n'), ((3325, 3350), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3348, 3350), False, 'import torch\n'), ((3361, 3386), 'torch.device', 'torch.device', (['args.gpu[0]'], {}), '(args.gpu[0])\n', (3373, 3386), False, 'import torch\n'), ((4193, 4236), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {'device_ids': 'args.gpu'}), '(model, device_ids=args.gpu)\n', (4208, 4236), False, 'from torch import nn\n'), ((5345, 5430), 'core.EvaluateFewShot', 'EvaluateFewShot', ([], {'eval_fn': 'hebb_rule', 'taskloader': 'test_loader'}), '(eval_fn=hebb_rule, taskloader=test_loader, **eval_few_shot_args\n )\n', (5360, 5430), False, 'from core import EvaluateFewShot\n'), ((3071, 3083), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3080, 3083), False, 'import json\n'), ((3526, 3601), 'utils.prepare_meta_batch', 'prepare_meta_batch', (['args.n', 'args.k', 'args.q', 'args.meta_batch_size', '(2)', 'device'], {}), '(args.n, args.k, args.q, args.meta_batch_size, 2, device)\n', (3544, 3601), False, 'from utils import prepare_meta_batch, make_task_loader\n'), ((4716, 4757), 'datasets.ImagenetBasedDataset', 'ImagenetBasedDataset', (['"""test"""'], {'small': '(False)'}), "('test', small=False)\n", (4736, 4757), False, 'from datasets import ImagenetBasedDataset, MiniImageNet\n'), ((5025, 5116), 'cdfsl.make_cdfsl_loader', 'make_cdfsl_loader', (['args.dataset', 'args.eval_batches', 'args.n', 'args.k', 'args.q'], {'small': '(False)'}), '(args.dataset, args.eval_batches, args.n, args.k, args.q,\n small=False)\n', (5042, 5116), False, 'from cdfsl import make_cdfsl_loader\n'), ((5299, 5320), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5318, 5320), False, 'from torch import nn\n'), ((4885, 4937), 'datasets.ImagenetBasedDataset', 'ImagenetBasedDataset', (['"""test"""'], {'small': '(False)', 'tier': '(True)'}), "('test', small=False, tier=True)\n", (4905, 4937), False, 'from datasets import ImagenetBasedDataset, MiniImageNet\n')] |
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))
from utils.tqdm_op import tqdm_range
import numpy as np
import copy
def np_softmax(x):
'''
Args:
x - Numpy 2D array
'''
x_softmax = np.zeros_like(x)
ndata, nfeature = x.shape
for idx in range(ndata):
tmp_max = np.max(x[idx])
tmp_exp = np.exp(x[idx] - tmp_max)
x_softmax[idx] = tmp_exp/np.sum(tmp_exp)
return x_softmax
def get_ginni_variance_conti(array):
''' FactorVAE https://arxiv.org/pdf/1802.05983.pdf
Args:
array - Numpy 1D array
'''
ndata = array.shape[0]
return ndata/(ndata-1)*np.var(array)
def get_ginni_variance_discrete(array):
''' FactorVAE https://arxiv.org/pdf/1802.05983.pdf
Args: array - Numpy 1D array, argmax index
'''
array = array.astype(int)
ndata = array.shape[0]
count = np.zeros([np.max(array)+1])
for idx in range(ndata): count[array[idx]]+=1
count = count.astype(float)
return (ndata*ndata - np.sum(np.square(count)))/(2*ndata*(ndata-1))
def zero_padding2nmul(inputs, mul):
'''Add zero padding to inputs to be multiple of mul
Args:
inputs - np array
mul - int
Return:
np array (inputs + zero_padding)
int original input size
'''
input_shape = list(inputs.shape)
ndata = input_shape[0]
if ndata%mul==0: return inputs, ndata
input_shape[0] = mul-ndata%mul
return np.concatenate([inputs, np.zeros(input_shape)], axis=0), ndata
def np_random_crop_4d(imgs, size):
'''
Args:
imgs - 4d image NHWC
size - list (rh, rw)
'''
rh, rw = size
on, oh, ow, oc = imgs.shape
cropped_imgs = np.zeros([on, rh, rw, oc])
ch = np.random.randint(low=0, high=oh-rh, size=on)
cw = np.random.randint(low=0, high=ow-rw, size=on)
for idx in range(on): cropped_imgs[idx] = imgs[idx,ch[idx]:ch[idx]+rh,cw[idx]:cw[idx]+rw]
return cropped_imgs
| [
"numpy.max",
"numpy.exp",
"os.path.dirname",
"numpy.zeros",
"numpy.random.randint",
"numpy.sum",
"numpy.square",
"numpy.zeros_like",
"numpy.var"
] | [((260, 276), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (273, 276), True, 'import numpy as np\n'), ((1743, 1769), 'numpy.zeros', 'np.zeros', (['[on, rh, rw, oc]'], {}), '([on, rh, rw, oc])\n', (1751, 1769), True, 'import numpy as np\n'), ((1779, 1826), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(oh - rh)', 'size': 'on'}), '(low=0, high=oh - rh, size=on)\n', (1796, 1826), True, 'import numpy as np\n'), ((1834, 1881), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(ow - rw)', 'size': 'on'}), '(low=0, high=ow - rw, size=on)\n', (1851, 1881), True, 'import numpy as np\n'), ((356, 370), 'numpy.max', 'np.max', (['x[idx]'], {}), '(x[idx])\n', (362, 370), True, 'import numpy as np\n'), ((389, 413), 'numpy.exp', 'np.exp', (['(x[idx] - tmp_max)'], {}), '(x[idx] - tmp_max)\n', (395, 413), True, 'import numpy as np\n'), ((680, 693), 'numpy.var', 'np.var', (['array'], {}), '(array)\n', (686, 693), True, 'import numpy as np\n'), ((66, 91), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (81, 91), False, 'import os\n'), ((447, 462), 'numpy.sum', 'np.sum', (['tmp_exp'], {}), '(tmp_exp)\n', (453, 462), True, 'import numpy as np\n'), ((924, 937), 'numpy.max', 'np.max', (['array'], {}), '(array)\n', (930, 937), True, 'import numpy as np\n'), ((1057, 1073), 'numpy.square', 'np.square', (['count'], {}), '(count)\n', (1066, 1073), True, 'import numpy as np\n'), ((1513, 1534), 'numpy.zeros', 'np.zeros', (['input_shape'], {}), '(input_shape)\n', (1521, 1534), True, 'import numpy as np\n')] |
import multiprocessing as mp
import logging
import numpy as np
import ctypes as c
from module.config import *
"""
Global variables shared between processes
"""
global raw_image
# Loglevel
LOG_LEVEL = logging.ERROR
# Raw Image array
t_raw_image = mp.Value('f', 0.0) # timestamp of last frame
if IMAGERAW:
raw_image = mp.RawArray('d', IMG_SIZE[0] * IMG_SIZE[1] * IMG_SIZE[2])
# Raw Verts array
if VERTSRAW:
raw_verts = None
# Speed
speed = mp.Value('f', 0.0)
# Heading
steer = mp.Value('f', 0.0)
# Cpu temp
cpu_temp = mp.Value('f', 0.0)
# Battery current
I_b = mp.Value('f', 0.0)
# Battery voltage
U_b = mp.Value('f', 0.0)
# Motor current
I_m = mp.Value('f', 0.0)
#
def nparray_to_rawarray(arr):
global raw_image
#raw_arr = mp.RawArray(c.c_double, int(np.prod(arr.shape)))
np.frombuffer(raw_image).reshape(arr.shape)[...] = arr
return raw_image
def rawarray_to_nparray(raw_arr, shape):
return np.frombuffer(raw_arr).reshape(shape)
| [
"numpy.frombuffer",
"multiprocessing.Value",
"multiprocessing.RawArray"
] | [((249, 267), 'multiprocessing.Value', 'mp.Value', (['"""f"""', '(0.0)'], {}), "('f', 0.0)\n", (257, 267), True, 'import multiprocessing as mp\n'), ((453, 471), 'multiprocessing.Value', 'mp.Value', (['"""f"""', '(0.0)'], {}), "('f', 0.0)\n", (461, 471), True, 'import multiprocessing as mp\n'), ((491, 509), 'multiprocessing.Value', 'mp.Value', (['"""f"""', '(0.0)'], {}), "('f', 0.0)\n", (499, 509), True, 'import multiprocessing as mp\n'), ((533, 551), 'multiprocessing.Value', 'mp.Value', (['"""f"""', '(0.0)'], {}), "('f', 0.0)\n", (541, 551), True, 'import multiprocessing as mp\n'), ((577, 595), 'multiprocessing.Value', 'mp.Value', (['"""f"""', '(0.0)'], {}), "('f', 0.0)\n", (585, 595), True, 'import multiprocessing as mp\n'), ((621, 639), 'multiprocessing.Value', 'mp.Value', (['"""f"""', '(0.0)'], {}), "('f', 0.0)\n", (629, 639), True, 'import multiprocessing as mp\n'), ((663, 681), 'multiprocessing.Value', 'mp.Value', (['"""f"""', '(0.0)'], {}), "('f', 0.0)\n", (671, 681), True, 'import multiprocessing as mp\n'), ((325, 382), 'multiprocessing.RawArray', 'mp.RawArray', (['"""d"""', '(IMG_SIZE[0] * IMG_SIZE[1] * IMG_SIZE[2])'], {}), "('d', IMG_SIZE[0] * IMG_SIZE[1] * IMG_SIZE[2])\n", (336, 382), True, 'import multiprocessing as mp\n'), ((934, 956), 'numpy.frombuffer', 'np.frombuffer', (['raw_arr'], {}), '(raw_arr)\n', (947, 956), True, 'import numpy as np\n'), ((805, 829), 'numpy.frombuffer', 'np.frombuffer', (['raw_image'], {}), '(raw_image)\n', (818, 829), True, 'import numpy as np\n')] |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import nibabel as nib
import numpy as np
from monai.data import ImageDataset
from monai.transforms import Randomizable
FILENAMES = ["test1.nii.gz", "test2.nii", "test3.nii.gz"]
class RandTest(Randomizable):
"""
randomisable transform for testing.
"""
def randomize(self, data=None):
self._a = self.R.random()
def __call__(self, data):
self.randomize()
return data + self._a
class TestImageDataset(unittest.TestCase):
def test_dataset(self):
with tempfile.TemporaryDirectory() as tempdir:
full_names, ref_data = [], []
for filename in FILENAMES:
test_image = np.random.randint(0, 2, size=(4, 4, 4))
ref_data.append(test_image)
save_path = os.path.join(tempdir, filename)
full_names.append(save_path)
nib.save(nib.Nifti1Image(test_image, np.eye(4)), save_path)
# default loading no meta
dataset = ImageDataset(full_names)
for d, ref in zip(dataset, ref_data):
np.testing.assert_allclose(d, ref, atol=1e-3)
# loading no meta, int
dataset = ImageDataset(full_names, dtype=np.float16)
for d, _ in zip(dataset, ref_data):
self.assertEqual(d.dtype, np.float16)
# loading with meta, no transform
dataset = ImageDataset(full_names, image_only=False)
for d_tuple, ref in zip(dataset, ref_data):
d, meta = d_tuple
np.testing.assert_allclose(d, ref, atol=1e-3)
np.testing.assert_allclose(meta["original_affine"], np.eye(4))
# loading image/label, no meta
dataset = ImageDataset(full_names, seg_files=full_names, image_only=True)
for d_tuple, ref in zip(dataset, ref_data):
img, seg = d_tuple
np.testing.assert_allclose(img, ref, atol=1e-3)
np.testing.assert_allclose(seg, ref, atol=1e-3)
# loading image/label, no meta
dataset = ImageDataset(full_names, transform=lambda x: x + 1, image_only=True)
for d, ref in zip(dataset, ref_data):
np.testing.assert_allclose(d, ref + 1, atol=1e-3)
# set seg transform, but no seg_files
with self.assertRaises(RuntimeError):
dataset = ImageDataset(full_names, seg_transform=lambda x: x + 1, image_only=True)
_ = dataset[0]
# set seg transform, but no seg_files
with self.assertRaises(RuntimeError):
dataset = ImageDataset(full_names, seg_transform=lambda x: x + 1, image_only=True)
_ = dataset[0]
# loading image/label, with meta
dataset = ImageDataset(
full_names,
transform=lambda x: x + 1,
seg_files=full_names,
seg_transform=lambda x: x + 2,
image_only=False,
)
for d_tuple, ref in zip(dataset, ref_data):
img, seg, meta = d_tuple
np.testing.assert_allclose(img, ref + 1, atol=1e-3)
np.testing.assert_allclose(seg, ref + 2, atol=1e-3)
np.testing.assert_allclose(meta["original_affine"], np.eye(4), atol=1e-3)
# loading image/label, with meta
dataset = ImageDataset(
full_names, transform=lambda x: x + 1, seg_files=full_names, labels=[1, 2, 3], image_only=False
)
for idx, (d_tuple, ref) in enumerate(zip(dataset, ref_data)):
img, seg, label, meta = d_tuple
np.testing.assert_allclose(img, ref + 1, atol=1e-3)
np.testing.assert_allclose(seg, ref, atol=1e-3)
np.testing.assert_allclose(idx + 1, label)
np.testing.assert_allclose(meta["original_affine"], np.eye(4), atol=1e-3)
# loading image/label, with sync. transform
dataset = ImageDataset(
full_names, transform=RandTest(), seg_files=full_names, seg_transform=RandTest(), image_only=False
)
for d_tuple, ref in zip(dataset, ref_data):
img, seg, meta = d_tuple
np.testing.assert_allclose(img, seg, atol=1e-3)
self.assertTrue(not np.allclose(img, ref))
np.testing.assert_allclose(meta["original_affine"], np.eye(4), atol=1e-3)
if __name__ == "__main__":
unittest.main()
| [
"tempfile.TemporaryDirectory",
"monai.data.ImageDataset",
"numpy.eye",
"numpy.allclose",
"numpy.testing.assert_allclose",
"os.path.join",
"numpy.random.randint",
"unittest.main"
] | [((5143, 5158), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5156, 5158), False, 'import unittest\n'), ((1135, 1164), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1162, 1164), False, 'import tempfile\n'), ((1613, 1637), 'monai.data.ImageDataset', 'ImageDataset', (['full_names'], {}), '(full_names)\n', (1625, 1637), False, 'from monai.data import ImageDataset\n'), ((1808, 1850), 'monai.data.ImageDataset', 'ImageDataset', (['full_names'], {'dtype': 'np.float16'}), '(full_names, dtype=np.float16)\n', (1820, 1850), False, 'from monai.data import ImageDataset\n'), ((2022, 2064), 'monai.data.ImageDataset', 'ImageDataset', (['full_names'], {'image_only': '(False)'}), '(full_names, image_only=False)\n', (2034, 2064), False, 'from monai.data import ImageDataset\n'), ((2362, 2425), 'monai.data.ImageDataset', 'ImageDataset', (['full_names'], {'seg_files': 'full_names', 'image_only': '(True)'}), '(full_names, seg_files=full_names, image_only=True)\n', (2374, 2425), False, 'from monai.data import ImageDataset\n'), ((2711, 2779), 'monai.data.ImageDataset', 'ImageDataset', (['full_names'], {'transform': '(lambda x: x + 1)', 'image_only': '(True)'}), '(full_names, transform=lambda x: x + 1, image_only=True)\n', (2723, 2779), False, 'from monai.data import ImageDataset\n'), ((3426, 3552), 'monai.data.ImageDataset', 'ImageDataset', (['full_names'], {'transform': '(lambda x: x + 1)', 'seg_files': 'full_names', 'seg_transform': '(lambda x: x + 2)', 'image_only': '(False)'}), '(full_names, transform=lambda x: x + 1, seg_files=full_names,\n seg_transform=lambda x: x + 2, image_only=False)\n', (3438, 3552), False, 'from monai.data import ImageDataset\n'), ((4035, 4148), 'monai.data.ImageDataset', 'ImageDataset', (['full_names'], {'transform': '(lambda x: x + 1)', 'seg_files': 'full_names', 'labels': '[1, 2, 3]', 'image_only': '(False)'}), '(full_names, transform=lambda x: x + 1, seg_files=full_names,\n labels=[1, 2, 3], image_only=False)\n', (4047, 4148), False, 'from monai.data import ImageDataset\n'), ((1287, 1326), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(4, 4, 4)'}), '(0, 2, size=(4, 4, 4))\n', (1304, 1326), True, 'import numpy as np\n'), ((1399, 1430), 'os.path.join', 'os.path.join', (['tempdir', 'filename'], {}), '(tempdir, filename)\n', (1411, 1430), False, 'import os\n'), ((1704, 1750), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['d', 'ref'], {'atol': '(0.001)'}), '(d, ref, atol=0.001)\n', (1730, 1750), True, 'import numpy as np\n'), ((2171, 2217), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['d', 'ref'], {'atol': '(0.001)'}), '(d, ref, atol=0.001)\n', (2197, 2217), True, 'import numpy as np\n'), ((2533, 2581), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['img', 'ref'], {'atol': '(0.001)'}), '(img, ref, atol=0.001)\n', (2559, 2581), True, 'import numpy as np\n'), ((2597, 2645), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['seg', 'ref'], {'atol': '(0.001)'}), '(seg, ref, atol=0.001)\n', (2623, 2645), True, 'import numpy as np\n'), ((2846, 2896), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['d', '(ref + 1)'], {'atol': '(0.001)'}), '(d, ref + 1, atol=0.001)\n', (2872, 2896), True, 'import numpy as np\n'), ((3023, 3095), 'monai.data.ImageDataset', 'ImageDataset', (['full_names'], {'seg_transform': '(lambda x: x + 1)', 'image_only': '(True)'}), '(full_names, seg_transform=lambda x: x + 1, image_only=True)\n', (3035, 3095), False, 'from monai.data import ImageDataset\n'), ((3254, 3326), 'monai.data.ImageDataset', 'ImageDataset', (['full_names'], {'seg_transform': '(lambda x: x + 1)', 'image_only': '(True)'}), '(full_names, seg_transform=lambda x: x + 1, image_only=True)\n', (3266, 3326), False, 'from monai.data import ImageDataset\n'), ((3757, 3809), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['img', '(ref + 1)'], {'atol': '(0.001)'}), '(img, ref + 1, atol=0.001)\n', (3783, 3809), True, 'import numpy as np\n'), ((3825, 3877), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['seg', '(ref + 2)'], {'atol': '(0.001)'}), '(seg, ref + 2, atol=0.001)\n', (3851, 3877), True, 'import numpy as np\n'), ((4313, 4365), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['img', '(ref + 1)'], {'atol': '(0.001)'}), '(img, ref + 1, atol=0.001)\n', (4339, 4365), True, 'import numpy as np\n'), ((4381, 4429), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['seg', 'ref'], {'atol': '(0.001)'}), '(seg, ref, atol=0.001)\n', (4407, 4429), True, 'import numpy as np\n'), ((4445, 4487), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(idx + 1)', 'label'], {}), '(idx + 1, label)\n', (4471, 4487), True, 'import numpy as np\n'), ((4913, 4961), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['img', 'seg'], {'atol': '(0.001)'}), '(img, seg, atol=0.001)\n', (4939, 4961), True, 'import numpy as np\n'), ((2285, 2294), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2291, 2294), True, 'import numpy as np\n'), ((3945, 3954), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3951, 3954), True, 'import numpy as np\n'), ((4556, 4565), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4562, 4565), True, 'import numpy as np\n'), ((5088, 5097), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5094, 5097), True, 'import numpy as np\n'), ((1529, 1538), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1535, 1538), True, 'import numpy as np\n'), ((4997, 5018), 'numpy.allclose', 'np.allclose', (['img', 'ref'], {}), '(img, ref)\n', (5008, 5018), True, 'import numpy as np\n')] |
# Beidou B3I code construction
#
# Copyright 2018 <NAME>
import numpy as np
chip_rate = 10230000
code_length = 10230
secondary_code = np.array([0,0,0,0,0,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0])
secondary_code = 1.0 - 2.0*secondary_code
b3i_g2_initial = {
1: "1010111111111", 2: "1111000101011", 3: "1011110001010", 4: "1111111111011",
5: "1100100011111", 6: "1001001100100", 7: "1111111010010", 8: "1110111111101",
9: "1010000000010", 10: "0010000011011", 11: "1110101110000", 12: "0010110011110",
13: "0110010010101", 14: "0111000100110", 15: "1000110001001", 16: "1110001111100",
17: "0010011000101", 18: "0000011101100", 19: "1000101010111", 20: "0001011011110",
21: "0010000101101", 22: "0010110001010", 23: "0001011001111", 24: "0011001100010",
25: "0011101001000", 26: "0100100101001", 27: "1011011010011", 28: "1010111100010",
29: "0001011110101", 30: "0111111111111", 31: "0110110001111", 32: "1010110001001",
33: "1001010101011", 34: "1100110100101", 35: "1101001011101", 36: "1111101110100",
37: "0010101100111", 38: "1110100010000", 39: "1101110010000", 40: "1101011001110",
41: "1000000110100", 42: "0101111011001", 43: "0110110111100", 44: "1101001110001",
45: "0011100100010", 46: "0101011000101", 47: "1001111100110", 48: "1111101001000",
49: "0000101001001", 50: "1000010101100", 51: "1111001001100", 52: "0100110001111",
53: "0000000011000", 54: "1000000000100", 55: "0011010100110", 56: "1011001000110",
57: "0111001111000", 58: "0010111001010", 59: "1100111110110", 60: "1001001000101",
61: "0111000100000", 62: "0011001000010", 63: "0010001001110",
}
def str2list(s):
x = []
for c in s:
if c=='0':
x.append(0)
else:
x.append(1)
return x
def b3i_g1_shift(x):
if x==[1,1,1,1,1,1,1,1,1,1,1,0,0]:
return [1,1,1,1,1,1,1,1,1,1,1,1,1]
else:
return [x[0]^x[2]^x[3]^x[12]] + x[0:12]
def b3i_g2_shift(x):
return [x[0]^x[4]^x[5]^x[6]^x[8]^x[9]^x[11]^x[12]] + x[0:12]
def b3i(prn):
n = code_length
g1 = [1,1,1,1,1,1,1,1,1,1,1,1,1]
g2 = str2list(b3i_g2_initial[prn])
b3i = np.zeros(n)
for i in range(n):
b3i[i] = g1[12] ^ g2[12]
g1 = b3i_g1_shift(g1)
g2 = b3i_g2_shift(g2)
return b3i
codes = {}
def b3i_code(prn):
if prn not in codes:
codes[prn] = b3i(prn)
return codes[prn]
def code(prn,chips,frac,incr,n):
c = b3i_code(prn)
idx = (chips%code_length) + frac + incr*np.arange(n)
idx = np.floor(idx).astype('int')
idx = np.mod(idx,code_length)
x = c[idx]
return 1.0 - 2.0*x
try:
from numba import jit
except:
def jit(**kwargs):
return lambda x: x
@jit(nopython=True)
def correlate(x,prn,chips,frac,incr,c):
n = len(x)
p = 0.0j
cp = (chips+frac)%code_length
for i in range(n):
p += x[i]*(1.0-2.0*c[int(cp)])
cp = (cp+incr)%code_length
return p
# test
if __name__=='__main__':
print(b3i_code(1)[0:20])
print(b3i_code(2)[0:20])
| [
"numpy.floor",
"numpy.array",
"numpy.zeros",
"numba.jit",
"numpy.mod",
"numpy.arange"
] | [((137, 207), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0]'], {}), '([0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0])\n', (145, 207), True, 'import numpy as np\n'), ((2639, 2657), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2642, 2657), False, 'from numba import jit\n'), ((2116, 2127), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2124, 2127), True, 'import numpy as np\n'), ((2497, 2521), 'numpy.mod', 'np.mod', (['idx', 'code_length'], {}), '(idx, code_length)\n', (2503, 2521), True, 'import numpy as np\n'), ((2440, 2452), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2449, 2452), True, 'import numpy as np\n'), ((2461, 2474), 'numpy.floor', 'np.floor', (['idx'], {}), '(idx)\n', (2469, 2474), True, 'import numpy as np\n')] |
from skopt import Optimizer
import numpy as np
def dbtime(p1,p2,p3,p4,p5,p6,p7,p8,p9,p10):
return np.sin(p1)*10+np.sin(p2)*5+np.sin(p3)+np.sin(p4)+np.sin(p5)+np.sin(p6)+np.sin(p7)+np.sin(p8)+np.sin(p9)+np.sin(p10)+3*8+10+5
search_space = {
'param_1': (0.0, 6.0),
'param_2': (0.0, 6.0),
'param_3': (0.0, 6.0),
'param_4': (0.0, 6.0),
'param_5': (0.0, 6.0),
'param_6': (0.0, 6.0),
'param_7': (0.0, 6.0),
'param_8': (0.0, 6.0),
'param_9': (0.0, 6.0),
'param_10': (0.0, 6.0)
}
best_runtime=1000
best_config=[]
opt = Optimizer([search_space['param_1'], search_space['param_2'], search_space['param_3'], search_space['param_4'], search_space['param_5'], search_space['param_6'], search_space['param_7'], search_space['param_8'], search_space['param_9'], search_space['param_10']], "GP", n_initial_points=3)
for iteration in range(100):
next_config = opt.ask()
print(next_config)
runtime=dbtime(next_config[0],next_config[1],next_config[2],next_config[3],next_config[4],next_config[5],next_config[6],next_config[7],next_config[8],next_config[9])
if runtime < best_runtime:
best_runtime=runtime
best_config=next_config
opt.tell(next_config, runtime)
print(best_runtime)
print(best_config)
| [
"numpy.sin",
"skopt.Optimizer"
] | [((569, 875), 'skopt.Optimizer', 'Optimizer', (["[search_space['param_1'], search_space['param_2'], search_space['param_3'],\n search_space['param_4'], search_space['param_5'], search_space[\n 'param_6'], search_space['param_7'], search_space['param_8'],\n search_space['param_9'], search_space['param_10']]", '"""GP"""'], {'n_initial_points': '(3)'}), "([search_space['param_1'], search_space['param_2'], search_space[\n 'param_3'], search_space['param_4'], search_space['param_5'],\n search_space['param_6'], search_space['param_7'], search_space[\n 'param_8'], search_space['param_9'], search_space['param_10']], 'GP',\n n_initial_points=3)\n", (578, 875), False, 'from skopt import Optimizer\n'), ((211, 222), 'numpy.sin', 'np.sin', (['p10'], {}), '(p10)\n', (217, 222), True, 'import numpy as np\n'), ((200, 210), 'numpy.sin', 'np.sin', (['p9'], {}), '(p9)\n', (206, 210), True, 'import numpy as np\n'), ((189, 199), 'numpy.sin', 'np.sin', (['p8'], {}), '(p8)\n', (195, 199), True, 'import numpy as np\n'), ((178, 188), 'numpy.sin', 'np.sin', (['p7'], {}), '(p7)\n', (184, 188), True, 'import numpy as np\n'), ((167, 177), 'numpy.sin', 'np.sin', (['p6'], {}), '(p6)\n', (173, 177), True, 'import numpy as np\n'), ((156, 166), 'numpy.sin', 'np.sin', (['p5'], {}), '(p5)\n', (162, 166), True, 'import numpy as np\n'), ((145, 155), 'numpy.sin', 'np.sin', (['p4'], {}), '(p4)\n', (151, 155), True, 'import numpy as np\n'), ((134, 144), 'numpy.sin', 'np.sin', (['p3'], {}), '(p3)\n', (140, 144), True, 'import numpy as np\n'), ((107, 117), 'numpy.sin', 'np.sin', (['p1'], {}), '(p1)\n', (113, 117), True, 'import numpy as np\n'), ((121, 131), 'numpy.sin', 'np.sin', (['p2'], {}), '(p2)\n', (127, 131), True, 'import numpy as np\n')] |
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import matplotlib.pyplot as plt
from math import hypot,sqrt
from . import PondingLoadCell
import sys
if(sys.version < '3'):
raise Exception('This script requires Python 3')
class Model:
ndof = 0
use_sparse_matrix_solver = True
def __init__(self,name):
self.name = name
self.Nodes = dict()
self.Elements = dict()
self.PondingLoadCells = dict()
def AddNode(self,id,coords,dof_types):
# possible dof_types: 'DX','DY','DZ','RX','RY','RZ'
if id in self.Nodes:
raise Exception('Input Error - node "%s" is already defined' % id)
dofs = dict()
for i in range(len(dof_types)):
dofs[dof_types[i]] = dof(self.ndof)
self.ndof += 1
self.Nodes[id] = Node(id,coords,dofs)
def AddElement(self,id,type,nodes,*args):
if id in self.Elements:
raise Exception('Input Error - element "%s" is already defined' % id)
if type.lower() == 'elasticbeam2d':
nodei = self.Nodes[nodes[0]]
nodej = self.Nodes[nodes[1]]
self.Elements[id] = ElasticBeam2d(id,nodei,nodej,*args)
elif type.lower() == 'elasticbeam3d':
nodei = self.Nodes[nodes[0]]
nodej = self.Nodes[nodes[1]]
self.Elements[id] = ElasticBeam3d(id,nodei,nodej,*args)
else:
raise Exception('Input Error - unknown element type (%s)' % type)
def AddPondingLoadCell(self,id,type,nodes,*args):
if id in self.PondingLoadCells:
raise Exception('Input Error - ponding load cell "%s" is already defined' % id)
if type.lower() == '2d':
nodei = self.Nodes[nodes[0]]
nodej = self.Nodes[nodes[1]]
self.PondingLoadCells[id] = PondingLoadCell2d_FE(id,nodei,nodej,*args)
elif type.lower() == '3d':
nodei = self.Nodes[nodes[0]]
nodej = self.Nodes[nodes[1]]
nodek = self.Nodes[nodes[2]]
nodel = self.Nodes[nodes[3]]
self.PondingLoadCells[id] = PondingLoadCell3d_FE(id,nodei,nodej,nodek,nodel,*args)
else:
raise Exception('Input Error - unknown ponding load cell type (%s)' % type)
def GetGlobalStiffnessMatrix(self):
# assemble stiffness matrix
K = np.zeros((self.ndof,self.ndof))
for iElement in self.Elements:
iK = self.Elements[iElement].StiffnessMatrix()
idofs = self.Elements[iElement].get_dof_ids()
for i in range(len(idofs)):
for j in range(len(idofs)):
K[idofs[i],idofs[j]] += iK[i,j]
return K
def GetNodalForceVector(self,load_factors):
# assemble force vector
f = np.zeros(self.ndof)
for load_pattern in load_factors:
lf = load_factors[load_pattern]
for iNode in self.Nodes:
for idof in self.Nodes[iNode].dofs:
if load_pattern in self.Nodes[iNode].dofs[idof].loads:
f[self.Nodes[iNode].dofs[idof].id] += lf*self.Nodes[iNode].dofs[idof].loads[load_pattern]
return f
def GetPondingForceVector(self,d,z):
# assemble force vector
f = np.zeros(self.ndof)
for iCell in self.PondingLoadCells:
self.PondingLoadCells[iCell].update(d)
ipf = self.PondingLoadCells[iCell].get_load_vector(z)
idofs = self.PondingLoadCells[iCell].get_dof_ids()
for i in range(len(idofs)):
f[idofs[i]] += ipf[i]
return f
def GetPondingVolume(self,d,z):
V = 0
dVdz = 0
for iCell in self.PondingLoadCells:
self.PondingLoadCells[iCell].update(d)
ires = self.PondingLoadCells[iCell].get_volume(z)
V += ires[0]
dVdz += ires[1]
return (V,dVdz)
def SolveForDisp(self,K,f):
# identify free dofs and equal dof constraints
free_dofs = list()
dof_map = dict()
equal_dof_constraints = dict()
for iNode in self.Nodes:
for idof in self.Nodes[iNode].dofs:
if self.Nodes[iNode].dofs[idof].constrained == True:
pass
elif self.Nodes[iNode].dofs[idof].constrained == False:
free_dofs.append(self.Nodes[iNode].dofs[idof].id)
dof_map[self.Nodes[iNode].dofs[idof].id] = len(free_dofs) - 1
else:
equal_dof_constraints[self.Nodes[iNode].dofs[idof].id] = self.Nodes[iNode].dofs[idof].constrained
# Assemble the free dofs
K_free = np.zeros((len(free_dofs),len(free_dofs)))
f_free = np.zeros(len(free_dofs))
for i in range(len(free_dofs)):
for j in range(len(free_dofs)):
K_free[i,j] = K[free_dofs[i],free_dofs[j]]
f_free[i] = f[free_dofs[i]]
# Add in stiffness from equal dof constraints
for i in equal_dof_constraints:
for j in free_dofs:
K_free[dof_map[equal_dof_constraints[i]],dof_map[j]] += K[i,j]
K_free[dof_map[j],dof_map[equal_dof_constraints[i]]] += K[j,i] # @todo - Check this.
K_free[dof_map[equal_dof_constraints[i]],dof_map[equal_dof_constraints[i]]] += K[i,i]
f_free[dof_map[equal_dof_constraints[i]]] += f[i]
# Solve the system of equations
if self.use_sparse_matrix_solver:
K_free = scipy.sparse.csc_matrix(K_free)
d_free = scipy.sparse.linalg.spsolve(K_free,f_free)
else:
d_free = np.linalg.solve(K_free,f_free)
# assemble the entire deformation vector
d = np.zeros(self.ndof)
for i in range(len(free_dofs)):
d[free_dofs[i]] = d_free[i]
for i in equal_dof_constraints:
d[i] = d_free[dof_map[equal_dof_constraints[i]]]
# compute reaction vector
r = np.dot(K,d) - f
for i in equal_dof_constraints:
r[equal_dof_constraints[i]] += r[i]
r[i] = 0
# Return displacement and reaction vector
return (d,r)
def StoreAnalysis(self):
# identidy free dofs and equal dof constraints
free_dofs = list()
dof_map = dict()
equal_dof_constraints = dict()
for iNode in self.Nodes:
for idof in self.Nodes[iNode].dofs:
if self.Nodes[iNode].dofs[idof].constrained == True:
pass
elif self.Nodes[iNode].dofs[idof].constrained == False:
free_dofs.append(self.Nodes[iNode].dofs[idof].id)
dof_map[self.Nodes[iNode].dofs[idof].id] = len(free_dofs) - 1
else:
equal_dof_constraints[self.Nodes[iNode].dofs[idof].id] = self.Nodes[iNode].dofs[idof].constrained
# Get Global Stiffness Matrix
K = self.GetGlobalStiffnessMatrix()
# Assemble the free dofs
K_free = np.zeros((len(free_dofs),len(free_dofs)))
for i in range(len(free_dofs)):
for j in range(len(free_dofs)):
K_free[i,j] = K[free_dofs[i],free_dofs[j]]
# Add in stiffness from equal dof constraints
for i in equal_dof_constraints:
for j in free_dofs:
K_free[dof_map[equal_dof_constraints[i]],dof_map[j]] += K[i,j]
K_free[dof_map[j],dof_map[equal_dof_constraints[i]]] += K[j,i] # @todo - Check this.
K_free[dof_map[equal_dof_constraints[i]],dof_map[equal_dof_constraints[i]]] += K[i,i]
# Solve the system of equations
if self.use_sparse_matrix_solver:
K_free = scipy.sparse.csc_matrix(K_free)
inv_K_free = scipy.sparse.linalg.splu(K_free)
else:
inv_K_free = np.linalg.inv(K_free)
# Store Analysis Matricies
self.stored_free_dofs = free_dofs
self.stored_dof_map = dof_map
self.stored_equal_dof_constraints = equal_dof_constraints
self.stored_inv_K_free = inv_K_free
self.stored_K = K
def SolveForDispWithStored(self,f):
# Assemble the free dofs
f_free = np.zeros(len(self.stored_free_dofs))
for i in range(len(self.stored_free_dofs)):
f_free[i] = f[self.stored_free_dofs[i]]
# Add in stiffness from equal dof constraints
for i in self.stored_equal_dof_constraints:
f_free[self.stored_dof_map[self.stored_equal_dof_constraints[i]]] += f[i]
# Solve the system of equations
if self.use_sparse_matrix_solver:
d_free = self.stored_inv_K_free.solve(f_free)
else:
d_free = self.stored_inv_K_free.dot(f_free)
# assemble the entire deformation vector
d = np.zeros(self.ndof)
for i in range(len(self.stored_free_dofs)):
d[self.stored_free_dofs[i]] = d_free[i]
for i in self.stored_equal_dof_constraints:
d[i] = d_free[self.stored_dof_map[self.stored_equal_dof_constraints[i]]]
# compute reaction vector
r = self.stored_K.dot(d) - f
for i in self.stored_equal_dof_constraints:
r[self.stored_equal_dof_constraints[i]] += r[i]
r[i] = 0
# Return displacement and reaction vector
return (d,r)
def PlotModel3d(self):
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for iNode in self.Nodes:
ax.scatter(self.Nodes[iNode].coords[0], self.Nodes[iNode].coords[1], self.Nodes[iNode].coords[2])
for iElement in self.Elements:
x = (self.Elements[iElement].nodeI.coords[0],self.Elements[iElement].nodeJ.coords[0])
y = (self.Elements[iElement].nodeI.coords[1],self.Elements[iElement].nodeJ.coords[1])
z = (self.Elements[iElement].nodeI.coords[2],self.Elements[iElement].nodeJ.coords[2])
ax.plot(x, y, z)
plt.show()
def PlotModel2d(self):
fig = plt.figure()
ax = fig.add_subplot(111)
for iNode in self.Nodes:
ax.scatter(self.Nodes[iNode].coords[0], self.Nodes[iNode].coords[1])
for iElement in self.Elements:
x = (self.Elements[iElement].nodeI.coords[0],self.Elements[iElement].nodeJ.coords[0])
y = (self.Elements[iElement].nodeI.coords[1],self.Elements[iElement].nodeJ.coords[1])
ax.plot(x, y)
plt.show()
def TotalReaction(self,results):
react = dict()
for iNode in self.Nodes:
for idof in self.Nodes[iNode].dofs:
if idof in react.keys():
react[idof] += self.Nodes[iNode].dofs[idof].react(results)
else:
react[idof] = self.Nodes[iNode].dofs[idof].react(results)
return react
def print_nodes(self,filename):
f = open(filename,'w')
f.write('ID,x,y,z\n')
for iNode in self.Nodes:
f.write('%s,%g,%g,%g\n'%(self.Nodes[iNode].id,self.Nodes[iNode].coords[0],self.Nodes[iNode].coords[1],self.Nodes[iNode].coords[2]))
f.close()
def print_dofs(self,filename,results):
f = open(filename,'w')
f.write('Node ID,dof type,dof id,constrained,dead load,disp,react\n')
for iNode in self.Nodes:
for idof in self.Nodes[iNode].dofs:
if 'DEAD' in self.Nodes[iNode].dofs[idof].loads:
p = self.Nodes[iNode].dofs[idof].loads['DEAD']
else:
p = 0
f.write('%s,%s,%s,%i,%g,%g,%g\n'%(self.Nodes[iNode].id,idof,self.Nodes[iNode].dofs[idof].id,self.Nodes[iNode].dofs[idof].constrained,p, \
self.Nodes[iNode].dofs[idof].disp(results),self.Nodes[iNode].dofs[idof].react(results)))
f.close()
class Node:
def __init__(self,id,coords,dofs):
self.id = id
self.coords = coords
self.dofs = dofs
class dof:
def __init__(self,id):
self.id = id;
self.constrained = False;
self.loads = dict();
def disp(self,results):
return results.d[self.id]
def react(self,results):
return results.r[self.id]
class ElasticBeam2d:
def __init__(self,id,nodeI,nodeJ,E,I,A):
self.id = id
self.nodeI = nodeI
self.nodeJ = nodeJ
self.E = E
self.I = I
self.A = A
def get_dof_ids(self):
dofs = [self.nodeI.dofs['UX'].id,
self.nodeI.dofs['UY'].id,
self.nodeI.dofs['RZ'].id,
self.nodeJ.dofs['UX'].id,
self.nodeJ.dofs['UY'].id,
self.nodeJ.dofs['RZ'].id]
return dofs
def StiffnessMatrix(self):
nIx = self.nodeI.coords[0]
nIy = self.nodeI.coords[1]
nJx = self.nodeJ.coords[0]
nJy = self.nodeJ.coords[1]
L = hypot(nJx-nIx,nJy-nIy);
lx = (nJx-nIx)/L;
ly = (nJy-nIy)/L;
k1 = self.E*self.A/L;
k2 = 12*self.E*self.I/(L*L*L);
k3 = 6*self.E*self.I/(L*L);
k4 = 4*self.E*self.I/L;
k5 = 2*self.E*self.I/L;
Kp = np.mat([[k1,0,0,-k1,0,0],
[0,k2,k3,0,-k2,k3],
[0,k3,k4,0,-k3,k5],
[-k1,0,0,k1,0,0],
[0,-k2,-k3,0,k2,-k3],
[0,k3,k5,0,-k3,k4]]);
T = np.mat([[ lx,ly,0, 0, 0,0],
[-ly,lx,0, 0, 0,0],
[ 0, 0,1, 0, 0,0],
[ 0, 0,0, lx,ly,0],
[ 0, 0,0,-ly,lx,0],
[ 0, 0,0, 0, 0,1]]);
K = np.transpose(T)*Kp*T
return K
def disp(self,results):
dofs = self.get_dof_ids()
d = np.zeros(len(dofs))
for i in range(len(dofs)):
d[i] = results.d[dofs[i]]
return d
def force(self,results):
K_ele = self.StiffnessMatrix()
d_ele = self.disp(results)
return np.dot(K_ele,d_ele)
class ElasticBeam3d:
release_Mzi = False
release_Mzj = False
release_Myi = False
release_Myj = False
release_Ti = False
release_Tj = False
def __init__(self,id,nodeI,nodeJ,vec_xz,E,Iz,Iy,A,GJ):
self.id = id
self.nodeI = nodeI
self.nodeJ = nodeJ
self.vec_xz = vec_xz # A vector in the xz plane of the local coordinate system
self.E = E
self.Iz = Iz
self.Iy = Iy
self.A = A
self.GJ = GJ
def get_dof_ids(self):
dofs = [self.nodeI.dofs['UX'].id,
self.nodeI.dofs['UY'].id,
self.nodeI.dofs['UZ'].id,
self.nodeI.dofs['RX'].id,
self.nodeI.dofs['RY'].id,
self.nodeI.dofs['RZ'].id,
self.nodeJ.dofs['UX'].id,
self.nodeJ.dofs['UY'].id,
self.nodeJ.dofs['UZ'].id,
self.nodeJ.dofs['RX'].id,
self.nodeJ.dofs['RY'].id,
self.nodeJ.dofs['RZ'].id]
return dofs
def StiffnessMatrix(self,do_releases = True):
nIx = self.nodeI.coords[0]
nIy = self.nodeI.coords[1]
nIz = self.nodeI.coords[2]
nJx = self.nodeJ.coords[0]
nJy = self.nodeJ.coords[1]
nJz = self.nodeJ.coords[2]
L = sqrt((nJx-nIx)*(nJx-nIx)+(nJy-nIy)*(nJy-nIy)+(nJz-nIz)*(nJz-nIz));
k1 = self.E*self.A/L
k2 = 12*self.E*self.Iz/(L*L*L)
k3 = 6*self.E*self.Iz/(L*L)
k4 = 12*self.E*self.Iy/(L*L*L)
k5 = 6*self.E*self.Iy/(L*L)
k6 = self.GJ/L
k7 = 4*self.E*self.Iy/L
k8 = 2*self.E*self.Iy/L
k9 = 4*self.E*self.Iz/L
k10 = 2*self.E*self.Iz/L
Kp = np.mat([[ k1, 0, 0, 0, 0, 0,-k1, 0, 0, 0, 0, 0],
[ 0, k2, 0, 0, 0, k3, 0,-k2, 0, 0, 0, k3],
[ 0, 0, k4, 0,-k5, 0, 0, 0,-k4, 0,-k5, 0],
[ 0, 0, 0, k6, 0, 0, 0, 0, 0,-k6, 0, 0],
[ 0, 0,-k5, 0, k7, 0, 0, 0, k5, 0, k8, 0],
[ 0, k3, 0, 0, 0, k9, 0,-k3, 0, 0, 0,k10],
[-k1, 0, 0, 0, 0, 0, k1, 0, 0, 0, 0, 0],
[ 0,-k2, 0, 0, 0,-k3, 0, k2, 0, 0, 0,-k3],
[ 0, 0,-k4, 0, k5, 0, 0, 0, k4, 0, k5, 0],
[ 0, 0, 0,-k6, 0, 0, 0, 0, 0, k6, 0, 0],
[ 0, 0,-k5, 0, k8, 0, 0, 0, k5, 0, k7, 0],
[ 0, k3, 0, 0, 0,k10, 0,-k3, 0, 0, 0, k9]]);
if do_releases:
if self.release_Ti:
Kp[3,:] = 0;
Kp[:,3] = 0;
if self.release_Myi:
Kp[4,:] = 0;
Kp[:,4] = 0;
if self.release_Mzi:
Kp[5,:] = 0;
Kp[:,5] = 0;
if self.release_Tj:
Kp[9,:] = 0;
Kp[:,9] = 0;
if self.release_Myj:
Kp[10,:] = 0;
Kp[:,10] = 0;
if self.release_Mzj:
Kp[11,:] = 0;
Kp[:,11] = 0;
local_x = np.array([(nJx-nIx)/L, (nJy-nIy)/L, (nJz-nIz)/L])
local_xz = np.array([self.vec_xz[0], self.vec_xz[1], self.vec_xz[2]])
local_y = np.cross(local_x,local_xz)
local_y = local_y/np.linalg.norm(local_y)
local_z = np.cross(local_x,local_y)
local_z = local_z/np.linalg.norm(local_z)
lx = local_x[0]
mx = local_x[1]
nx = local_x[2]
ly = local_y[0]
my = local_y[1]
ny = local_y[2]
lz = local_z[0]
mz = local_z[1]
nz = local_z[2]
T = np.mat([[lx,mx,nx, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ly,my,ny, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[lz,mz,nz, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0,lx,mx,nx, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0,ly,my,ny, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0,lz,mz,nz, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0,lx,mx,nx, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0,ly,my,ny, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0,lz,mz,nz, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0,lx,mx,nx],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0,ly,my,ny],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0,lz,mz,nz]]);
K = np.transpose(T)*Kp*T
return K
def disp(self,results):
dofs = self.get_dof_ids()
d = np.zeros(12)
for i in range(len(dofs)):
d[i] = results.d[dofs[i]]
return d
def force(self,results):
K_ele = self.StiffnessMatrix(True)
d_ele = self.disp(results)
return np.dot(K_ele,d_ele)
class PondingLoadCell2d_FE(PondingLoadCell.PondingLoadCell2d):
def __init__(self,id,nodeI,nodeJ,gamma,tw):
self.id = id
self.nodeI = nodeI
self.xI = self.nodeI.coords[0]
self.yI = self.nodeI.coords[1]
self.nodeJ = nodeJ
self.xJ = self.nodeJ.coords[0]
self.yJ = self.nodeJ.coords[1]
self.gamma = gamma
self.tw = tw
def update(self,d):
self.dyI = d[self.nodeI.dofs['UY'].id]
self.dyJ = d[self.nodeJ.dofs['UY'].id]
def get_dof_ids(self):
dofs = [self.nodeI.dofs['UY'].id,
self.nodeJ.dofs['UY'].id]
return dofs
class PondingLoadCell3d_FE(PondingLoadCell.PondingLoadCell3d):
def __init__(self,id,nodeI,nodeJ,nodeK,nodeL,gamma,na=1,nb=1):
self.id = id
self.nodeI = nodeI # Define nodes counterclockwise
self.xI = self.nodeI.coords[0]
self.yI = self.nodeI.coords[1]
self.zI = self.nodeI.coords[2]
self.nodeJ = nodeJ
self.xJ = self.nodeJ.coords[0]
self.yJ = self.nodeJ.coords[1]
self.zJ = self.nodeJ.coords[2]
self.nodeK = nodeK
self.xK = self.nodeK.coords[0]
self.yK = self.nodeK.coords[1]
self.zK = self.nodeK.coords[2]
self.nodeL = nodeL
self.xL = self.nodeL.coords[0]
self.yL = self.nodeL.coords[1]
self.zL = self.nodeL.coords[2]
self.gamma = gamma # Fluid density
self.na = na # Number of sub-cells along IJ
self.nb = nb # Number of sub-cells along JK
def update(self,d):
self.dzI = d[self.nodeI.dofs['UZ'].id]
self.dzJ = d[self.nodeJ.dofs['UZ'].id]
self.dzK = d[self.nodeK.dofs['UZ'].id]
self.dzL = d[self.nodeL.dofs['UZ'].id]
def get_dof_ids(self):
dofs = [self.nodeI.dofs['UZ'].id,
self.nodeJ.dofs['UZ'].id,
self.nodeK.dofs['UZ'].id,
self.nodeL.dofs['UZ'].id]
return dofs
class LinearAnalysis:
def __init__(self,model):
self.model = model;
def run(self,load_factors):
K = self.model.GetGlobalStiffnessMatrix()
f = self.model.GetNodalForceVector(load_factors)
(d,r) = self.model.SolveForDisp(K,f)
# store results
self.load_factors = load_factors;
self.d = d
self.r = r
class PondingAnalysis:
max_iterations_z = 20
max_iter_const_V = 20
max_iter_find_z = 10
tol_z = 1e-3
tol_V = 1e-4
output_level = 0
use_stored_analysis = False
def __init__(self,model,type='Constant_Level'):
self.model = model
self.type = type
def run(self,load_factors,x):
if self.type == 'Constant_Level':
z = x
f_nodal = self.model.GetNodalForceVector(load_factors)
# Run Initial Analysis
if self.use_stored_analysis:
(d,r) = self.model.SolveForDispWithStored(f_nodal)
else:
K = self.model.GetGlobalStiffnessMatrix()
(d,r) = self.model.SolveForDisp(K,f_nodal)
# Iterate
for i in range(self.max_iterations_z):
d_last = d
f_ponding = self.model.GetPondingForceVector(d,z)
if self.output_level > 0:
print('Iteration %3i, Total Ponding Load = %.6f' % (i,-np.sum(f_ponding)))
f = f_nodal + f_ponding
if self.use_stored_analysis:
(d,r) = self.model.SolveForDispWithStored(f)
else:
(d,r) = self.model.SolveForDisp(K,f)
if self.output_level > 0:
print('Min Deflection: %.3f \tNode Disp Incr. %.6f' % (min(d),np.linalg.norm(d-d_last)))
if np.linalg.norm(d-d_last) < self.tol_z:
if self.output_level > 0:
print('Converged')
# store results
self.load_factors = load_factors
self.z = z
self.d = d
self.r = r
return 0
elif self.type == 'Modified_Rain_Load':
z = x
f_nodal = self.model.GetNodalForceVector({'DEAD':1.0})
# Run Initial Analysis
if self.use_stored_analysis:
(d,r) = self.model.SolveForDispWithStored(f_nodal)
else:
K = self.model.GetGlobalStiffnessMatrix()
(d,r) = self.model.SolveForDisp(K,f_nodal)
# Iterate
for i in range(self.max_iterations_z):
d_last = d
f = f_nodal + self.model.GetPondingForceVector(d,z)
if self.use_stored_analysis:
(d,r) = self.model.SolveForDispWithStored(f)
else:
(d,r) = self.model.SolveForDisp(K,f)
if self.output_level > 0:
print('Min Deflection: %.3f \tNode Disp Incr. %.6f' % (min(d),np.linalg.norm(d-d_last)))
if np.linalg.norm(d-d_last) < self.tol_z:
if self.output_level > 0:
print('Converged')
# subtract out dead load and add back in defined load combination
f_nodal_1 = self.model.GetNodalForceVector({'DEAD':-1.0})
f_nodal_2 = self.model.GetNodalForceVector(load_factors)
if self.use_stored_analysis:
(d1,r1) = self.model.SolveForDispWithStored(f_nodal_1)
(d2,r2) = self.model.SolveForDispWithStored(f_nodal_2)
else:
K = self.model.GetGlobalStiffnessMatrix()
(d1,r1) = self.model.SolveForDisp(K,f_nodal_1)
(d2,r2) = self.model.SolveForDisp(K,f_nodal_2)
# store results
self.load_factors = load_factors
self.z = z
self.d = 1.6*(d+d1)+d2 # @todo make option for other rain load factors
self.r = 1.6*(r+r1)+r2
return 0
elif self.type == 'Constant_Volume':
V = x
f_nodal = self.model.GetNodalForceVector(load_factors)
# Run Initial Analysis
if self.use_stored_analysis:
(d,r) = self.model.SolveForDispWithStored(f_nodal)
else:
K = self.model.GetGlobalStiffnessMatrix()
(d,r) = self.model.SolveForDisp(K,f_nodal)
# Iterate
z = 1
for i in range(self.max_iter_const_V):
d_last = d
# determine z given V
for j in range(self.max_iter_find_z):
z_last = z
res = self.model.GetPondingVolume(d,z)
V_calc = res[0]
dVdz = res[1]
if self.output_level > 0:
print('Iteration = %i' % j)
print('V = %g' % V)
print('V_calc = %g' % V_calc)
print('z = %g' % (z + (V-V_calc)/dVdz))
print('z_last = %g' % z_last)
if abs(V-V_calc)/V < self.tol_V:
if self.output_level > 0:
print('found z')
break
z = z + (V-V_calc)/dVdz
if j == self.max_iter_find_z-1:
if self.output_level > 0:
print('Could not find z')
return -1
# solve for d given z
f = f_nodal + self.model.GetPondingForceVector(d,z)
if self.use_stored_analysis:
(d,r) = self.model.SolveForDispWithStored(f)
else:
(d,r) = self.model.SolveForDisp(K,f)
if self.output_level > 0:
print('Min Deflection: %.3f \tNode Disp Incr. %.6f' % (min(d),np.linalg.norm(d-d_last)))
if np.linalg.norm(d-d_last) < self.tol_z:
if self.output_level > 0:
print('Converged')
# store results
self.load_factors = load_factors
self.z = z
self.d = d
self.r = r
return 0
if i == self.max_iter_const_V-1:
if self.output_level > 0:
print('Could not find a solution')
return -1
elif self.type == 'No_Ponding_Effect':
z = x
f = self.model.GetNodalForceVector(load_factors) + self.model.GetPondingForceVector(np.zeros(self.model.ndof),z)
if self.use_stored_analysis:
(d,r) = self.model.SolveForDispWithStored(f)
else:
K = self.model.GetGlobalStiffnessMatrix()
(d,r) = self.model.SolveForDisp(K,f)
# store results
self.load_factors = load_factors
self.z = z
self.d = d
self.r = r
return 0
| [
"numpy.mat",
"numpy.linalg.solve",
"numpy.cross",
"math.sqrt",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.dot",
"numpy.linalg.inv",
"numpy.sum",
"numpy.linalg.norm",
"math.hypot",
"numpy.transpose",
"matplotlib.pyplot.show"
] | [((2400, 2432), 'numpy.zeros', 'np.zeros', (['(self.ndof, self.ndof)'], {}), '((self.ndof, self.ndof))\n', (2408, 2432), True, 'import numpy as np\n'), ((2845, 2864), 'numpy.zeros', 'np.zeros', (['self.ndof'], {}), '(self.ndof)\n', (2853, 2864), True, 'import numpy as np\n'), ((3333, 3352), 'numpy.zeros', 'np.zeros', (['self.ndof'], {}), '(self.ndof)\n', (3341, 3352), True, 'import numpy as np\n'), ((5880, 5899), 'numpy.zeros', 'np.zeros', (['self.ndof'], {}), '(self.ndof)\n', (5888, 5899), True, 'import numpy as np\n'), ((9123, 9142), 'numpy.zeros', 'np.zeros', (['self.ndof'], {}), '(self.ndof)\n', (9131, 9142), True, 'import numpy as np\n'), ((9783, 9795), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9793, 9795), True, 'import matplotlib.pyplot as plt\n'), ((10361, 10371), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10369, 10371), True, 'import matplotlib.pyplot as plt\n'), ((10414, 10426), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10424, 10426), True, 'import matplotlib.pyplot as plt\n'), ((10845, 10855), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10853, 10855), True, 'import matplotlib.pyplot as plt\n'), ((13359, 13386), 'math.hypot', 'hypot', (['(nJx - nIx)', '(nJy - nIy)'], {}), '(nJx - nIx, nJy - nIy)\n', (13364, 13386), False, 'from math import hypot, sqrt\n'), ((13641, 13806), 'numpy.mat', 'np.mat', (['[[k1, 0, 0, -k1, 0, 0], [0, k2, k3, 0, -k2, k3], [0, k3, k4, 0, -k3, k5], [\n -k1, 0, 0, k1, 0, 0], [0, -k2, -k3, 0, k2, -k3], [0, k3, k5, 0, -k3, k4]]'], {}), '([[k1, 0, 0, -k1, 0, 0], [0, k2, k3, 0, -k2, k3], [0, k3, k4, 0, -k3,\n k5], [-k1, 0, 0, k1, 0, 0], [0, -k2, -k3, 0, k2, -k3], [0, k3, k5, 0, -\n k3, k4]])\n', (13647, 13806), True, 'import numpy as np\n'), ((13888, 14030), 'numpy.mat', 'np.mat', (['[[lx, ly, 0, 0, 0, 0], [-ly, lx, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0,\n lx, ly, 0], [0, 0, 0, -ly, lx, 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[lx, ly, 0, 0, 0, 0], [-ly, lx, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0,\n 0, 0, lx, ly, 0], [0, 0, 0, -ly, lx, 0], [0, 0, 0, 0, 0, 1]])\n', (13894, 14030), True, 'import numpy as np\n'), ((14516, 14536), 'numpy.dot', 'np.dot', (['K_ele', 'd_ele'], {}), '(K_ele, d_ele)\n', (14522, 14536), True, 'import numpy as np\n'), ((15885, 15976), 'math.sqrt', 'sqrt', (['((nJx - nIx) * (nJx - nIx) + (nJy - nIy) * (nJy - nIy) + (nJz - nIz) * (nJz -\n nIz))'], {}), '((nJx - nIx) * (nJx - nIx) + (nJy - nIy) * (nJy - nIy) + (nJz - nIz) *\n (nJz - nIz))\n', (15889, 15976), False, 'from math import hypot, sqrt\n'), ((16335, 16887), 'numpy.mat', 'np.mat', (['[[k1, 0, 0, 0, 0, 0, -k1, 0, 0, 0, 0, 0], [0, k2, 0, 0, 0, k3, 0, -k2, 0, 0,\n 0, k3], [0, 0, k4, 0, -k5, 0, 0, 0, -k4, 0, -k5, 0], [0, 0, 0, k6, 0, 0,\n 0, 0, 0, -k6, 0, 0], [0, 0, -k5, 0, k7, 0, 0, 0, k5, 0, k8, 0], [0, k3,\n 0, 0, 0, k9, 0, -k3, 0, 0, 0, k10], [-k1, 0, 0, 0, 0, 0, k1, 0, 0, 0, 0,\n 0], [0, -k2, 0, 0, 0, -k3, 0, k2, 0, 0, 0, -k3], [0, 0, -k4, 0, k5, 0, \n 0, 0, k4, 0, k5, 0], [0, 0, 0, -k6, 0, 0, 0, 0, 0, k6, 0, 0], [0, 0, -\n k5, 0, k8, 0, 0, 0, k5, 0, k7, 0], [0, k3, 0, 0, 0, k10, 0, -k3, 0, 0, \n 0, k9]]'], {}), '([[k1, 0, 0, 0, 0, 0, -k1, 0, 0, 0, 0, 0], [0, k2, 0, 0, 0, k3, 0, -\n k2, 0, 0, 0, k3], [0, 0, k4, 0, -k5, 0, 0, 0, -k4, 0, -k5, 0], [0, 0, 0,\n k6, 0, 0, 0, 0, 0, -k6, 0, 0], [0, 0, -k5, 0, k7, 0, 0, 0, k5, 0, k8, 0\n ], [0, k3, 0, 0, 0, k9, 0, -k3, 0, 0, 0, k10], [-k1, 0, 0, 0, 0, 0, k1,\n 0, 0, 0, 0, 0], [0, -k2, 0, 0, 0, -k3, 0, k2, 0, 0, 0, -k3], [0, 0, -k4,\n 0, k5, 0, 0, 0, k4, 0, k5, 0], [0, 0, 0, -k6, 0, 0, 0, 0, 0, k6, 0, 0],\n [0, 0, -k5, 0, k8, 0, 0, 0, k5, 0, k7, 0], [0, k3, 0, 0, 0, k10, 0, -k3,\n 0, 0, 0, k9]])\n', (16341, 16887), True, 'import numpy as np\n'), ((17808, 17869), 'numpy.array', 'np.array', (['[(nJx - nIx) / L, (nJy - nIy) / L, (nJz - nIz) / L]'], {}), '([(nJx - nIx) / L, (nJy - nIy) / L, (nJz - nIz) / L])\n', (17816, 17869), True, 'import numpy as np\n'), ((17877, 17935), 'numpy.array', 'np.array', (['[self.vec_xz[0], self.vec_xz[1], self.vec_xz[2]]'], {}), '([self.vec_xz[0], self.vec_xz[1], self.vec_xz[2]])\n', (17885, 17935), True, 'import numpy as np\n'), ((17955, 17982), 'numpy.cross', 'np.cross', (['local_x', 'local_xz'], {}), '(local_x, local_xz)\n', (17963, 17982), True, 'import numpy as np\n'), ((18052, 18078), 'numpy.cross', 'np.cross', (['local_x', 'local_y'], {}), '(local_x, local_y)\n', (18060, 18078), True, 'import numpy as np\n'), ((18378, 18905), 'numpy.mat', 'np.mat', (['[[lx, mx, nx, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ly, my, ny, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [lz, mz, nz, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, lx, mx, nx, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, ly, my, ny, 0, 0, 0, 0, 0, 0], [0, 0, 0, lz,\n mz, nz, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, lx, mx, nx, 0, 0, 0], [0,\n 0, 0, 0, 0, 0, ly, my, ny, 0, 0, 0], [0, 0, 0, 0, 0, 0, lz, mz, nz, 0, \n 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, lx, mx, nx], [0, 0, 0, 0, 0, 0, 0, 0,\n 0, ly, my, ny], [0, 0, 0, 0, 0, 0, 0, 0, 0, lz, mz, nz]]'], {}), '([[lx, mx, nx, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ly, my, ny, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [lz, mz, nz, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, lx, mx,\n nx, 0, 0, 0, 0, 0, 0], [0, 0, 0, ly, my, ny, 0, 0, 0, 0, 0, 0], [0, 0, \n 0, lz, mz, nz, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, lx, mx, nx, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, ly, my, ny, 0, 0, 0], [0, 0, 0, 0, 0, 0, lz, mz,\n nz, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, lx, mx, nx], [0, 0, 0, 0, 0, \n 0, 0, 0, 0, ly, my, ny], [0, 0, 0, 0, 0, 0, 0, 0, 0, lz, mz, nz]])\n', (18384, 18905), True, 'import numpy as np\n'), ((19228, 19240), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (19236, 19240), True, 'import numpy as np\n'), ((19463, 19483), 'numpy.dot', 'np.dot', (['K_ele', 'd_ele'], {}), '(K_ele, d_ele)\n', (19469, 19483), True, 'import numpy as np\n'), ((5779, 5810), 'numpy.linalg.solve', 'np.linalg.solve', (['K_free', 'f_free'], {}), '(K_free, f_free)\n', (5794, 5810), True, 'import numpy as np\n'), ((6136, 6148), 'numpy.dot', 'np.dot', (['K', 'd'], {}), '(K, d)\n', (6142, 6148), True, 'import numpy as np\n'), ((8094, 8115), 'numpy.linalg.inv', 'np.linalg.inv', (['K_free'], {}), '(K_free)\n', (8107, 8115), True, 'import numpy as np\n'), ((18009, 18032), 'numpy.linalg.norm', 'np.linalg.norm', (['local_y'], {}), '(local_y)\n', (18023, 18032), True, 'import numpy as np\n'), ((18105, 18128), 'numpy.linalg.norm', 'np.linalg.norm', (['local_z'], {}), '(local_z)\n', (18119, 18128), True, 'import numpy as np\n'), ((14150, 14165), 'numpy.transpose', 'np.transpose', (['T'], {}), '(T)\n', (14162, 14165), True, 'import numpy as np\n'), ((19107, 19122), 'numpy.transpose', 'np.transpose', (['T'], {}), '(T)\n', (19119, 19122), True, 'import numpy as np\n'), ((23611, 23637), 'numpy.linalg.norm', 'np.linalg.norm', (['(d - d_last)'], {}), '(d - d_last)\n', (23625, 23637), True, 'import numpy as np\n'), ((24986, 25012), 'numpy.linalg.norm', 'np.linalg.norm', (['(d - d_last)'], {}), '(d - d_last)\n', (25000, 25012), True, 'import numpy as np\n'), ((28260, 28286), 'numpy.linalg.norm', 'np.linalg.norm', (['(d - d_last)'], {}), '(d - d_last)\n', (28274, 28286), True, 'import numpy as np\n'), ((23565, 23591), 'numpy.linalg.norm', 'np.linalg.norm', (['(d - d_last)'], {}), '(d - d_last)\n', (23579, 23591), True, 'import numpy as np\n'), ((28997, 29022), 'numpy.zeros', 'np.zeros', (['self.model.ndof'], {}), '(self.model.ndof)\n', (29005, 29022), True, 'import numpy as np\n'), ((23158, 23175), 'numpy.sum', 'np.sum', (['f_ponding'], {}), '(f_ponding)\n', (23164, 23175), True, 'import numpy as np\n'), ((24940, 24966), 'numpy.linalg.norm', 'np.linalg.norm', (['(d - d_last)'], {}), '(d - d_last)\n', (24954, 24966), True, 'import numpy as np\n'), ((28214, 28240), 'numpy.linalg.norm', 'np.linalg.norm', (['(d - d_last)'], {}), '(d - d_last)\n', (28228, 28240), True, 'import numpy as np\n')] |
"""
This file contains the LPSolver part of the algorithm
The LPSolver uses the symbolic bounds and the Gurobi solver to verify properties as Safe, or to produce candidates for
counter examples.
Author: <NAME> <<EMAIL>>
"""
import os
import numpy as np
import gurobipy as grb
from src.algorithm.esip import ESIP
class LPSolver:
"""
The LPSolver class combines the symbolic bounds from ESIP and Gurobi LPSolver to verify properties as safe
or produce candidates for counter examples
"""
def __init__(self, input_size: int, output_size: int):
"""
Args:
input_size : The number of input nodes
output_size : The number of output nodes
"""
self._disable_log()
self._grb_solver = grb.Model("NN")
self._input_size = input_size
self._output_size = output_size
self._input_variables = None
self._output_variables = None
self._init_variables()
@property
def grb_solver(self):
return self._grb_solver
@property
def input_variables(self):
return self._input_variables
@property
def output_variables(self):
return self._output_variables
def _init_variables(self):
"""
Initializes the Gurobi input and output variables
The default bounds on the variables are from the ESIP object. If output bounds are also given the
these are used to refine the bounds.
"""
self._remove_variables()
self._input_variables = (self._grb_solver.addVars(range(self._input_size), lb=-grb.GRB.INFINITY,
ub=grb.GRB.INFINITY,
vtype=grb.GRB.CONTINUOUS, name="Input"))
self._output_variables = (self._grb_solver.addVars(range(self._output_size), lb=-grb.GRB.INFINITY,
ub=grb.GRB.INFINITY,
vtype=grb.GRB.CONTINUOUS, name="Output"))
self._grb_solver.update()
# noinspection PyArgumentList
def _remove_variables(self):
"""
Removes all variables initialized from nn_bounds
"""
if self._input_variables is not None:
self.grb_solver.remove(self._input_variables)
self._input_variables = None
if self._output_variables is not None:
self.grb_solver.remove(self._output_variables)
self._node_variables = None
self._grb_solver.update()
def solve(self) -> bool:
"""
Solves the system with current constraints
All variables are initialized to the given _input_bounds and _output_bounds. Symbolic interval propagation
is used to further refine the output bounds and to add the linear constraints on he output resulting from
the symbolic intervals.
Returns:
True if the system is feasible, else False
"""
# Uncommenting this line avoids gurobi _status code 4(INF_OR_UNBD)
# self._grb_solver.setParam("DualReductions", 0)
# Using dual simplex as it is numerically stable and the fastest for our tests
self._grb_solver.setParam("Method", 0)
self._grb_solver.optimize()
if self._grb_solver.status == 2: # Found an assignment
return True
elif self._grb_solver.status == 3: # Infeasible system
return False
else:
raise UnexpectedGurobiStatusException(f"Gurobi _status: {self._grb_solver._status}")
# noinspection PyArgumentList
def set_variable_bounds(self, bounds: ESIP, output_bounds: np.array=None, set_input: bool=True):
"""
Sets the variable bounds using bounds from ESIP, and possibly output_bounds
Args:
bounds : The ESIP object
output_bounds : A Nx2 array-like structure with the lower output bounds in the first and column
and upper in the second. The tightest bounds from ESIP and this array will
be used
set_input : If False, the input variables aren't adjusted
"""
if self._input_variables is None or self._output_variables is None:
raise VariablesNotInitializedException("set_input_bounds() called before input variables where initialized")
if set_input:
input_bounds_lower = bounds.bounds_concrete[0][:, 0]
input_bounds_upper = bounds.bounds_concrete[0][:, 1]
for node_num, var in enumerate(self._input_variables.select()):
var.lb, var.ub = input_bounds_lower[node_num], input_bounds_upper[node_num]
output_bounds_lower = bounds.bounds_concrete[-1][:, 0].copy()
output_bounds_upper = bounds.bounds_concrete[-1][:, 1].copy()
if output_bounds is not None:
# Refine the output bounds using the given output_bounds array
better_lower_idx = output_bounds[:, 0] > output_bounds_lower
better_upper_idx = output_bounds[:, 1] < output_bounds_upper
output_bounds_lower[better_lower_idx] = output_bounds[better_lower_idx, 0]
output_bounds_upper[better_upper_idx] = output_bounds[better_upper_idx, 1]
for node_num, var in enumerate(self._output_variables.select()):
var.lb, var.ub = output_bounds_lower[node_num], output_bounds_upper[node_num]
self._grb_solver.update()
# noinspection PyArgumentList
def get_assigned_values(self) -> tuple:
"""
Returns the currently assigned input and output values
Returns None if no values are assigned.
Returns:
(input_values, output_values)
"""
try:
input_values = [var.x for var in self._input_variables.select()]
except AttributeError:
# Values not assigned, solve() probably hasn't been called
input_values = None
try:
output_values = [var.x for var in self._output_variables.select()]
except AttributeError:
# Values not assigned, solve() probably hasn't been called
output_values = None
return np.array(input_values), np.array(output_values)
# noinspection PyMethodMayBeStatic
def _disable_log(self):
"""
Remove the automatically created Gurobi log file and disable future logging.
"""
# Get rid of log file
grb.setParam("OutputFlag", 0)
grb.setParam("LogFile", "")
try:
os.remove("./gurobi.log")
except FileNotFoundError:
pass
class LPSolverException(Exception):
pass
class VariablesNotInitializedException(LPSolverException):
pass
class UnexpectedGurobiStatusException(LPSolverException):
pass
| [
"gurobipy.setParam",
"numpy.array",
"os.remove",
"gurobipy.Model"
] | [((778, 793), 'gurobipy.Model', 'grb.Model', (['"""NN"""'], {}), "('NN')\n", (787, 793), True, 'import gurobipy as grb\n'), ((6580, 6609), 'gurobipy.setParam', 'grb.setParam', (['"""OutputFlag"""', '(0)'], {}), "('OutputFlag', 0)\n", (6592, 6609), True, 'import gurobipy as grb\n'), ((6618, 6645), 'gurobipy.setParam', 'grb.setParam', (['"""LogFile"""', '""""""'], {}), "('LogFile', '')\n", (6630, 6645), True, 'import gurobipy as grb\n'), ((6315, 6337), 'numpy.array', 'np.array', (['input_values'], {}), '(input_values)\n', (6323, 6337), True, 'import numpy as np\n'), ((6339, 6362), 'numpy.array', 'np.array', (['output_values'], {}), '(output_values)\n', (6347, 6362), True, 'import numpy as np\n'), ((6671, 6696), 'os.remove', 'os.remove', (['"""./gurobi.log"""'], {}), "('./gurobi.log')\n", (6680, 6696), False, 'import os\n')] |
# Copyright 2018 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: tomobar_recon
:platform: Unix
:synopsis: A wrapper around TOmographic MOdel-BAsed Reconstruction (ToMoBAR) software \
for advanced iterative image reconstruction
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from savu.plugins.reconstructions.base_recon import BaseRecon
from savu.data.plugin_list import CitationInformation
from savu.plugins.driver.gpu_plugin import GpuPlugin
import numpy as np
from tomobar.methodsIR import RecToolsIR
from savu.plugins.utils import register_plugin
from scipy import ndimage
@register_plugin
class TomobarRecon(BaseRecon, GpuPlugin):
"""
A Plugin to reconstruct full-field tomographic projection data using state-of-the-art regularised iterative algorithms from \
the ToMoBAR package. ToMoBAR includes FISTA and ADMM iterative methods and depends on the ASTRA toolbox and the CCPi RGL toolkit: \
https://github.com/vais-ral/CCPi-Regularisation-Toolkit.
:param output_size: Number of rows and columns in the \
reconstruction. Default: 'auto'.
:param data_fidelity: Data fidelity, chosoe Least Squares only at the moment. Default: 'LS'.
:param data_Huber_thresh: Threshold parameter for __Huber__ data fidelity . Default: None.
:param data_any_rings: a parameter to suppress various artifacts including rings and streaks. Default: None.
:param data_any_rings_winsizes: half window sizes to collect background information [detector, angles, num of projections]. Default: (9,7,0).
:param data_any_rings_power: a power parameter for Huber model. Default: 1.5.
:param data_full_ring_GH: Regularisation variable for full constant ring removal (GH model). Default: None.
:param data_full_ring_accelerator_GH: Acceleration constant for GH ring removal. Default: 10.0.
:param algorithm_iterations: Number of outer iterations for FISTA (default) or ADMM methods. Default: 20.
:param algorithm_verbose: print iterations number and other messages ('off' by default). Default: 'off'.
:param algorithm_ordersubsets: The number of ordered-subsets to accelerate reconstruction. Default: 6.
:param algorithm_nonnegativity: ENABLE or DISABLE nonnegativity constraint. Default: 'ENABLE'.
:param regularisation_method: To regularise choose methods ROF_TV, FGP_TV, PD_TV, SB_TV, LLT_ROF,\
NDF, TGV, NLTV, Diff4th. Default: 'FGP_TV'.
:param regularisation_parameter: Regularisation (smoothing) value, higher \
the value stronger the smoothing effect. Default: 0.00001.
:param regularisation_iterations: The number of regularisation iterations. Default: 80.
:param regularisation_device: The number of regularisation iterations. Default: 'gpu'.
:param regularisation_PD_lip: Primal-dual parameter for convergence. Default: 8.
:param regularisation_methodTV: 0/1 - TV specific isotropic/anisotropic choice. Default: 0.
:param regularisation_timestep: Time marching parameter, relevant for \
(ROF_TV, LLT_ROF, NDF, Diff4th) penalties. Default: 0.003.
:param regularisation_edge_thresh: Edge (noise) related parameter, relevant for NDF and Diff4th. Default: 0.01.
:param regularisation_parameter2: Regularisation (smoothing) value for LLT_ROF method. Default: 0.005.
:param regularisation_NDF_penalty: NDF specific penalty type Huber, Perona, Tukey. Default: 'Huber'.
"""
def __init__(self):
super(TomobarRecon, self).__init__("TomobarRecon")
def _shift(self, sinogram, centre_of_rotation):
centre_of_rotation_shift = (sinogram.shape[0]/2) - centre_of_rotation
result = ndimage.interpolation.shift(sinogram,
(centre_of_rotation_shift, 0))
return result
def pre_process(self):
# extract given parameters into dictionaries suitable for ToMoBAR input
self._data_ = {'OS_number' : self.parameters['algorithm_ordersubsets'],
'huber_threshold' : self.parameters['data_Huber_thresh'],
'ring_weights_threshold' : self.parameters['data_any_rings'],
'ring_tuple_halfsizes' : self.parameters['data_any_rings_winsizes'],
'ring_huber_power' : self.parameters['data_any_rings_power'],
'ringGH_lambda' : self.parameters['data_full_ring_GH'],
'ringGH_accelerate' : self.parameters['data_full_ring_accelerator_GH']}
self._algorithm_ = {'iterations' : self.parameters['algorithm_iterations'],
'nonnegativity' : self.parameters['algorithm_nonnegativity'],
'verbose' : self.parameters['algorithm_verbose']}
self._regularisation_ = {'method' : self.parameters['regularisation_method'],
'regul_param' : self.parameters['regularisation_parameter'],
'iterations' : self.parameters['regularisation_iterations'],
'device_regulariser' : self.parameters['regularisation_device'],
'edge_threhsold' : self.parameters['regularisation_edge_thresh'],
'time_marching_step' : self.parameters['regularisation_timestep'],
'regul_param2' : self.parameters['regularisation_parameter2'],
'PD_LipschitzConstant' : self.parameters['regularisation_PD_lip'],
'NDF_penalty' : self.parameters['regularisation_NDF_penalty'],
'methodTV' : self.parameters['regularisation_methodTV']}
def process_frames(self, data):
centre_of_rotations, angles, self.vol_shape, init = self.get_frame_params()
sinogram = data[0].astype(np.float32)
anglesTot, self.DetectorsDimH = np.shape(sinogram)
self.anglesRAD = np.deg2rad(angles.astype(np.float32))
self._data_.update({'projection_norm_data' : sinogram})
"""
# if one selects PWLS model and provides raw input data
if (self.parameters['data_fidelity'] == 'PWLS'):
rawdata = data[1].astype(np.float32)
rawdata /= np.max(rawdata)
self._data_.update({'projection_raw_data' : rawdata})
"""
# set parameters and initiate the ToMoBAR class object
self.Rectools = RecToolsIR(DetectorsDimH = self.DetectorsDimH, # DetectorsDimH # detector dimension (horizontal)
DetectorsDimV = None, # DetectorsDimV # detector dimension (vertical) for 3D case only
CenterRotOffset = None, # Center of Rotation (CoR) scalar (for 3D case only)
AnglesVec = self.anglesRAD, # array of angles in radians
ObjSize = self.vol_shape[0] , # a scalar to define the reconstructed object dimensions
datafidelity=self.parameters['data_fidelity'],# data fidelity, choose LS, PWLS
device_projector='gpu')
# Run FISTA reconstrucion algorithm here
recon = self.Rectools.FISTA(self._data_, self._algorithm_, self._regularisation_)
return recon
def get_max_frames(self):
return 'single'
def get_citation_information(self):
cite_info1 = CitationInformation()
cite_info1.name = 'citation1'
cite_info1.description = \
("First-order optimisation algorithm for linear inverse problems.")
cite_info1.bibtex = \
("@article{beck2009,\n" +
"title={A fast iterative shrinkage-thresholding algorithm for linear inverse problems},\n" +
"author={<NAME> Beck, Mark and Teboulle},\n" +
"journal={SIAM Journal on Imaging Sciences},\n" +
"volume={2},\n" +
"number={1},\n" +
"pages={183--202},\n" +
"year={2009},\n" +
"publisher={SIAM}\n" +
"}")
cite_info1.endnote = \
("%0 Journal Article\n" +
"%T A fast iterative shrinkage-thresholding algorithm for linear inverse problems\n" +
"%A Beck, Amir\n" +
"%A Teboulle, Mark\n" +
"%J SIAM Journal on Imaging Sciences\n" +
"%V 2\n" +
"%N 1\n" +
"%P 183--202\n" +
"%@ --\n" +
"%D 2009\n" +
"%I SIAM\n")
cite_info1.doi = "doi: "
return cite_info1
| [
"scipy.ndimage.interpolation.shift",
"numpy.shape",
"savu.data.plugin_list.CitationInformation",
"tomobar.methodsIR.RecToolsIR"
] | [((4221, 4289), 'scipy.ndimage.interpolation.shift', 'ndimage.interpolation.shift', (['sinogram', '(centre_of_rotation_shift, 0)'], {}), '(sinogram, (centre_of_rotation_shift, 0))\n', (4248, 4289), False, 'from scipy import ndimage\n'), ((6452, 6470), 'numpy.shape', 'np.shape', (['sinogram'], {}), '(sinogram)\n', (6460, 6470), True, 'import numpy as np\n'), ((6973, 7192), 'tomobar.methodsIR.RecToolsIR', 'RecToolsIR', ([], {'DetectorsDimH': 'self.DetectorsDimH', 'DetectorsDimV': 'None', 'CenterRotOffset': 'None', 'AnglesVec': 'self.anglesRAD', 'ObjSize': 'self.vol_shape[0]', 'datafidelity': "self.parameters['data_fidelity']", 'device_projector': '"""gpu"""'}), "(DetectorsDimH=self.DetectorsDimH, DetectorsDimV=None,\n CenterRotOffset=None, AnglesVec=self.anglesRAD, ObjSize=self.vol_shape[\n 0], datafidelity=self.parameters['data_fidelity'], device_projector='gpu')\n", (6983, 7192), False, 'from tomobar.methodsIR import RecToolsIR\n'), ((7881, 7902), 'savu.data.plugin_list.CitationInformation', 'CitationInformation', ([], {}), '()\n', (7900, 7902), False, 'from savu.data.plugin_list import CitationInformation\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import numpy as np
import network.resnet38d
class Net(network.resnet38d.Net):
def __init__(self, k_cluster, from_round_nb):
super().__init__()
self.k = k_cluster
self.from_round_nb = from_round_nb
print('k_cluster: {}'.format(self.k))
print('Round: {}'.format(self.from_round_nb))
self.dropout7 = torch.nn.Dropout2d(0.5)
# class 20
if self.from_round_nb == 0:
self.fc8 = nn.Conv2d(4096, 20, 1, bias=False)
torch.nn.init.xavier_uniform_(self.fc8.weight)
self.not_training = [self.conv1a, self.b2, self.b2_1, self.b2_2]
self.from_scratch_layers = [self.fc8]
# class 20 + class 200
else:
self.fc8_20 = nn.Conv2d(4096, 20, 1, bias=False)
self.fc8_200 = nn.Conv2d(4096, self.k*20, 1, bias=False)
torch.nn.init.xavier_uniform_(self.fc8_20.weight)
torch.nn.init.xavier_uniform_(self.fc8_200.weight)
self.not_training = [self.conv1a, self.b2, self.b2_1, self.b2_2]
self.from_scratch_layers = [self.fc8_20, self.fc8_200]
def forward(self, x, from_round_nb):
x = super().forward(x)
x = self.dropout7(x)
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0)
feature = x
feature = feature.view(feature.size(0), -1)
# class 20
if from_round_nb == 0:
x = self.fc8(x)
x = x.view(x.size(0), -1)
y = torch.sigmoid(x)
return x, feature, y
# class 20 + class 200
else:
x_20 = self.fc8_20(x)
x_20 = x_20.view(x_20.size(0), -1)
y_20 = torch.sigmoid(x_20)
x_200 = self.fc8_200(x)
x_200 = x_200.view(x_200.size(0), -1)
y_200 = torch.sigmoid(x_200)
return x_20, feature, y_20, x_200, y_200
def multi_label(self, x):
x = torch.sigmoid(x)
tmp = x.cpu()
tmp = tmp.data.numpy()
_, cls = np.where(tmp>0.5)
return cls, tmp
def forward_cam(self, x):
x = super().forward(x)
x = F.conv2d(x, self.fc8.weight)
x = F.relu(x)
return x
def forward_two_cam(self, x):
x_ = super().forward(x)
x_20 = F.conv2d(x_, self.fc8_20.weight)
cam_20 = F.relu(x_20)
x_200 = F.conv2d(x_, self.fc8_200.weight)
cam_200 = F.relu(x_200)
return cam_20, cam_200
def get_parameter_groups(self):
groups = ([], [], [], [])
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.weight.requires_grad:
if m in self.from_scratch_layers:
groups[2].append(m.weight)
else:
groups[0].append(m.weight)
if m.bias is not None and m.bias.requires_grad:
if m in self.from_scratch_layers:
groups[3].append(m.bias)
else:
groups[1].append(m.bias)
return groups
| [
"torch.nn.functional.conv2d",
"numpy.where",
"torch.nn.init.xavier_uniform_",
"torch.nn.Dropout2d",
"torch.sigmoid",
"torch.nn.Conv2d",
"torch.nn.functional.relu"
] | [((432, 455), 'torch.nn.Dropout2d', 'torch.nn.Dropout2d', (['(0.5)'], {}), '(0.5)\n', (450, 455), False, 'import torch\n'), ((2035, 2051), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (2048, 2051), False, 'import torch\n'), ((2122, 2141), 'numpy.where', 'np.where', (['(tmp > 0.5)'], {}), '(tmp > 0.5)\n', (2130, 2141), True, 'import numpy as np\n'), ((2240, 2268), 'torch.nn.functional.conv2d', 'F.conv2d', (['x', 'self.fc8.weight'], {}), '(x, self.fc8.weight)\n', (2248, 2268), True, 'import torch.nn.functional as F\n'), ((2281, 2290), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2287, 2290), True, 'import torch.nn.functional as F\n'), ((2393, 2425), 'torch.nn.functional.conv2d', 'F.conv2d', (['x_', 'self.fc8_20.weight'], {}), '(x_, self.fc8_20.weight)\n', (2401, 2425), True, 'import torch.nn.functional as F\n'), ((2443, 2455), 'torch.nn.functional.relu', 'F.relu', (['x_20'], {}), '(x_20)\n', (2449, 2455), True, 'import torch.nn.functional as F\n'), ((2473, 2506), 'torch.nn.functional.conv2d', 'F.conv2d', (['x_', 'self.fc8_200.weight'], {}), '(x_, self.fc8_200.weight)\n', (2481, 2506), True, 'import torch.nn.functional as F\n'), ((2525, 2538), 'torch.nn.functional.relu', 'F.relu', (['x_200'], {}), '(x_200)\n', (2531, 2538), True, 'import torch.nn.functional as F\n'), ((535, 569), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4096)', '(20)', '(1)'], {'bias': '(False)'}), '(4096, 20, 1, bias=False)\n', (544, 569), True, 'import torch.nn as nn\n'), ((583, 629), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['self.fc8.weight'], {}), '(self.fc8.weight)\n', (612, 629), False, 'import torch\n'), ((831, 865), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4096)', '(20)', '(1)'], {'bias': '(False)'}), '(4096, 20, 1, bias=False)\n', (840, 865), True, 'import torch.nn as nn\n'), ((893, 936), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4096)', '(self.k * 20)', '(1)'], {'bias': '(False)'}), '(4096, self.k * 20, 1, bias=False)\n', (902, 936), True, 'import torch.nn as nn\n'), ((948, 997), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['self.fc8_20.weight'], {}), '(self.fc8_20.weight)\n', (977, 997), False, 'import torch\n'), ((1010, 1060), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['self.fc8_200.weight'], {}), '(self.fc8_200.weight)\n', (1039, 1060), False, 'import torch\n'), ((1591, 1607), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (1604, 1607), False, 'import torch\n'), ((1787, 1806), 'torch.sigmoid', 'torch.sigmoid', (['x_20'], {}), '(x_20)\n', (1800, 1806), False, 'import torch\n'), ((1915, 1935), 'torch.sigmoid', 'torch.sigmoid', (['x_200'], {}), '(x_200)\n', (1928, 1935), False, 'import torch\n')] |
# MIT License
#
# Copyright (c) 2020 WGCN Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import json
import os
import time
import scipy.sparse as sp
import dgl.init
import numpy as np
import torch
import torch as th
import torch.nn.functional as F
import utils_data
from utils_layers import WGCNNet
from utils_structural import generate_dijkstra, load_adj, compute_structural_infot
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default="WGCN_TwoLayers")
parser.add_argument('--dataset', type=str, default="cora_ml")
parser.add_argument('--directed', type=bool, default=True)
parser.add_argument('--dataset_embedding', type=str, default="isomap")
parser.add_argument('--num_hidden', type=int, default=48)
parser.add_argument('--num_heads_layer_one', type=int, default=1)
parser.add_argument('--num_heads_layer_two', type=int, default=1)
parser.add_argument('--layer_one_ggcn_merge', type=str, default='cat')
parser.add_argument('--layer_two_ggcn_merge', type=str, default='cat')
parser.add_argument('--layer_one_channel_merge', type=str, default='cat')
parser.add_argument('--layer_two_channel_merge', type=str, default='mean')
parser.add_argument('--dropout_rate', type=float, default=0.5)
parser.add_argument('--weight_decay_layer_one', type=float, default=5e-6)
parser.add_argument('--weight_decay_layer_two', type=float, default=5e-6)
parser.add_argument('--num_epochs_max', type=int, default=1000)
parser.add_argument('--run_id', type=str, default="000")
parser.add_argument('--dataset_split', type=str, default="random")
parser.add_argument('--attention', type=bool, default=False)
parser.add_argument('--learning_rate_decay_patience', type=int, default=50)
parser.add_argument('--learning_rate_decay_factor', type=float, default=0.8)
parser.add_argument('--num_epochs_patience', type=int, default=1000)
parser.add_argument('--learning_rate', type=float, default=0.05)
parser.add_argument('--nfold', type=int, default=4)
parser.add_argument('--in_out_ratio', type=float, default=3)
parser.add_argument('--restart_rate', type=float, default=0.0)
parser.add_argument('--in_out_peak', type=float, default=0.4)
parser.add_argument('--dijkstra_k', type=int, default=1)
parser.add_argument('--latent', type=bool, default=True)
parser.add_argument('--ng', type=bool, default=False)
parser.add_argument('--layers', type=int, default=2)
args = parser.parse_args()
# args.dataset_embedding='poincare'
if os.path.exists('data/{}/{}_structural_{}_{}_{}_{}.npz'.format(args.dataset,args.dataset,args.in_out_ratio,args.restart_rate,args.in_out_peak,args.dijkstra_k)):
structural_info = sp.load_npz('data/{}/{}_structural_{}_{}_{}_{}.npz'.format(args.dataset,args.dataset,args.in_out_ratio,args.restart_rate,args.in_out_peak,args.dijkstra_k))
structural_info = structural_info.toarray()
else:
if not os.path.exists("data/{}/{}_{}_dijkstra.npz".format(args.dataset,args.dataset,args.dijkstra_k)):
generate_dijkstra(args.dataset, args.dijkstra_k)
origin_adj = load_adj(args.dataset)
structural_info = compute_structural_infot(args.dataset, args.directed, args.dijkstra_k, args.in_out_ratio,args.restart_rate,args.in_out_peak)
#structural_info = compute_structural_info(args.dataset,origin_adj, args.directed, args.dijkstra_k, args.in_out_ratio,args.restart_rate,args.in_out_peak)
structural_info = structural_info.toarray()
if args.dataset_split == 'random':
# g, features, labels, train_mask, val_mask, test_mask, num_features, num_labels, num_devisions, pairs = utils_data.load_data_custome(
# args.dataset, args.dijkstra_k, args.nfold, None, 20,500, 'WGCN', args.dataset_embedding, structural_info,args.latent,args.ng)
g, features, labels, train_mask, val_mask, test_mask, num_features, num_labels, num_devisions, pairs = utils_data.load_data(
args.dataset, args.dijkstra_k, args.nfold, None, 0.6, 0.2, 'WGCN', args.dataset_embedding, structural_info,args.latent,args.ng)
else:
g, features, labels, train_mask, val_mask, test_mask, num_features, num_labels, num_devisions, pairs = utils_data.load_data(
args.dataset, args.dijkstra_k, args.nfold, args.dataset_split, None, None, 'WGCN', args.dataset_embedding, structural_info,args.latent,args.ng)
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
net = WGCNNet(g=g, num_input_features=num_features, num_output_classes=num_labels, num_hidden=args.num_hidden,
num_divisions=num_devisions, pairs = pairs, dropout_rate=args.dropout_rate,
num_heads_layer_one=args.num_heads_layer_one, num_heads_layer_two=args.num_heads_layer_two,
layer_one_ggcn_merge=args.layer_one_ggcn_merge,
layer_one_channel_merge=args.layer_one_channel_merge,
layer_two_ggcn_merge=args.layer_two_ggcn_merge,
layer_two_channel_merge=args.layer_two_channel_merge, attention=args.attention,layers=args.layers)
optimizer = th.optim.Adam([{'params': net.wgcn1.parameters(), 'weight_decay': args.weight_decay_layer_one},
{'params': net.wgcn2.parameters(), 'weight_decay': args.weight_decay_layer_two}],
lr=args.learning_rate)
learning_rate_scheduler = th.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
factor=args.learning_rate_decay_factor,
patience=args.learning_rate_decay_patience)
net.cuda()
features = features.cuda()
structural_info = torch.tensor(structural_info)
structural_info = structural_info.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# Adapted from https://github.com/PetarV-/GAT/blob/master/execute_cora.py
vlss_mn = np.inf
vacc_mx = 0.0
vacc_early_model = None
vlss_early_model = None
tacc_early_model = None
tlss_early_model = None
state_dict_early_model = None
curr_step = 0
result_epoch = 0
# Adapted from https://docs.dgl.ai/tutorials/models/1_gnn/1_gcn.html
dur = []
results = ''
sum_dur = 0
for epoch in range(args.num_epochs_max):
t0 = time.time()
net.train()
train_logits = net(features)
train_logp = F.log_softmax(train_logits, 1)
train_loss = F.nll_loss(train_logp[train_mask], labels[train_mask])
train_pred = train_logp.argmax(dim=1)
train_acc = th.eq(train_pred[train_mask], labels[train_mask]).float().mean().item()
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
net.eval()
with th.no_grad():
val_logits = net(features)
val_logp = F.log_softmax(val_logits, 1)
val_loss = F.nll_loss(val_logp[val_mask], labels[val_mask]).item()
val_pred = val_logp.argmax(dim=1)
val_acc = th.eq(val_pred[val_mask], labels[val_mask]).float().mean().item()
test_logp = F.log_softmax(val_logits, 1)
test_loss = F.nll_loss(test_logp[test_mask], labels[test_mask]).item()
test_pred = test_logp.argmax(dim=1)
test_acc = th.eq(test_pred[test_mask], labels[test_mask]).float().mean().item()
learning_rate_scheduler.step(val_loss)
dur.append(time.time() - t0)
print(
"Epoch {:05d} | Train Loss {:.4f} | Train Acc {:.4f} | Val Loss {:.4f} | Val Acc {:.4f} | Time(s) {:.4f}".format(
epoch, train_loss.item(), train_acc, val_loss, val_acc, sum(dur) / len(dur)))
sum_dur = sum_dur + (sum(dur)/len(dur))
if (epoch+1)%100 == 0:
results+= 'epoch: ' + str(epoch+1) + ' (accuracy: ' + str(test_acc) +') '
# Adapted from https://github.com/PetarV-/GAT/blob/master/execute_cora.py
if (val_acc >= vacc_mx or val_loss <= vlss_mn):
if val_acc >= vacc_mx and val_loss <= vlss_mn:
vacc_early_model = val_acc
vlss_early_model = val_loss
tacc_early_model = test_acc
tlss_early_model = test_loss
state_dict_early_model = net.state_dict()
result_epoch = epoch
vacc_mx = np.max((val_acc, vacc_mx))
vlss_mn = np.min((val_loss, vlss_mn))
curr_step = 0
else:
curr_step += 1
if curr_step >= args.num_epochs_patience:
break
printres = 'Epoch: ' + str(result_epoch+1) + ' (accuracy: ' + str(tacc_early_model) + ')' + ' ' + results + ' ' + str(sum_dur/args.num_epochs_max)
with open(os.path.join('results', '{}_{}_{}_results.txt'.format(args.dataset,args.dataset_embedding, args.run_id)), 'w') as outfile:
outfile.write(json.dumps(printres) + '\n')
| [
"utils_layers.WGCNNet",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"utils_structural.compute_structural_infot",
"argparse.ArgumentParser",
"utils_structural.load_adj",
"torch.nn.functional.nll_loss",
"utils_structural.generate_dijkstra",
"json.dumps",
"numpy.min",
"numpy.max",
"torch.eq",
"... | [((1476, 1501), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1499, 1501), False, 'import argparse\n'), ((5637, 6189), 'utils_layers.WGCNNet', 'WGCNNet', ([], {'g': 'g', 'num_input_features': 'num_features', 'num_output_classes': 'num_labels', 'num_hidden': 'args.num_hidden', 'num_divisions': 'num_devisions', 'pairs': 'pairs', 'dropout_rate': 'args.dropout_rate', 'num_heads_layer_one': 'args.num_heads_layer_one', 'num_heads_layer_two': 'args.num_heads_layer_two', 'layer_one_ggcn_merge': 'args.layer_one_ggcn_merge', 'layer_one_channel_merge': 'args.layer_one_channel_merge', 'layer_two_ggcn_merge': 'args.layer_two_ggcn_merge', 'layer_two_channel_merge': 'args.layer_two_channel_merge', 'attention': 'args.attention', 'layers': 'args.layers'}), '(g=g, num_input_features=num_features, num_output_classes=num_labels,\n num_hidden=args.num_hidden, num_divisions=num_devisions, pairs=pairs,\n dropout_rate=args.dropout_rate, num_heads_layer_one=args.\n num_heads_layer_one, num_heads_layer_two=args.num_heads_layer_two,\n layer_one_ggcn_merge=args.layer_one_ggcn_merge, layer_one_channel_merge\n =args.layer_one_channel_merge, layer_two_ggcn_merge=args.\n layer_two_ggcn_merge, layer_two_channel_merge=args.\n layer_two_channel_merge, attention=args.attention, layers=args.layers)\n', (5644, 6189), False, 'from utils_layers import WGCNNet\n'), ((6594, 6743), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'th.optim.lr_scheduler.ReduceLROnPlateau', ([], {'optimizer': 'optimizer', 'factor': 'args.learning_rate_decay_factor', 'patience': 'args.learning_rate_decay_patience'}), '(optimizer=optimizer, factor=args.\n learning_rate_decay_factor, patience=args.learning_rate_decay_patience)\n', (6633, 6743), True, 'import torch as th\n'), ((6948, 6977), 'torch.tensor', 'torch.tensor', (['structural_info'], {}), '(structural_info)\n', (6960, 6977), False, 'import torch\n'), ((4242, 4264), 'utils_structural.load_adj', 'load_adj', (['args.dataset'], {}), '(args.dataset)\n', (4250, 4264), False, 'from utils_structural import generate_dijkstra, load_adj, compute_structural_infot\n'), ((4291, 4422), 'utils_structural.compute_structural_infot', 'compute_structural_infot', (['args.dataset', 'args.directed', 'args.dijkstra_k', 'args.in_out_ratio', 'args.restart_rate', 'args.in_out_peak'], {}), '(args.dataset, args.directed, args.dijkstra_k, args\n .in_out_ratio, args.restart_rate, args.in_out_peak)\n', (4315, 4422), False, 'from utils_structural import generate_dijkstra, load_adj, compute_structural_infot\n'), ((5063, 5218), 'utils_data.load_data', 'utils_data.load_data', (['args.dataset', 'args.dijkstra_k', 'args.nfold', 'None', '(0.6)', '(0.2)', '"""WGCN"""', 'args.dataset_embedding', 'structural_info', 'args.latent', 'args.ng'], {}), "(args.dataset, args.dijkstra_k, args.nfold, None, 0.6, \n 0.2, 'WGCN', args.dataset_embedding, structural_info, args.latent, args.ng)\n", (5083, 5218), False, 'import utils_data\n'), ((5346, 5521), 'utils_data.load_data', 'utils_data.load_data', (['args.dataset', 'args.dijkstra_k', 'args.nfold', 'args.dataset_split', 'None', 'None', '"""WGCN"""', 'args.dataset_embedding', 'structural_info', 'args.latent', 'args.ng'], {}), "(args.dataset, args.dijkstra_k, args.nfold, args.\n dataset_split, None, None, 'WGCN', args.dataset_embedding,\n structural_info, args.latent, args.ng)\n", (5366, 5521), False, 'import utils_data\n'), ((7629, 7640), 'time.time', 'time.time', ([], {}), '()\n', (7638, 7640), False, 'import time\n'), ((7720, 7750), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['train_logits', '(1)'], {}), '(train_logits, 1)\n', (7733, 7750), True, 'import torch.nn.functional as F\n'), ((7772, 7826), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['train_logp[train_mask]', 'labels[train_mask]'], {}), '(train_logp[train_mask], labels[train_mask])\n', (7782, 7826), True, 'import torch.nn.functional as F\n'), ((4172, 4220), 'utils_structural.generate_dijkstra', 'generate_dijkstra', (['args.dataset', 'args.dijkstra_k'], {}), '(args.dataset, args.dijkstra_k)\n', (4189, 4220), False, 'from utils_structural import generate_dijkstra, load_adj, compute_structural_infot\n'), ((8084, 8096), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (8094, 8096), True, 'import torch as th\n'), ((8160, 8188), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['val_logits', '(1)'], {}), '(val_logits, 1)\n', (8173, 8188), True, 'import torch.nn.functional as F\n'), ((8426, 8454), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['val_logits', '(1)'], {}), '(val_logits, 1)\n', (8439, 8454), True, 'import torch.nn.functional as F\n'), ((9657, 9683), 'numpy.max', 'np.max', (['(val_acc, vacc_mx)'], {}), '((val_acc, vacc_mx))\n', (9663, 9683), True, 'import numpy as np\n'), ((9706, 9733), 'numpy.min', 'np.min', (['(val_loss, vlss_mn)'], {}), '((val_loss, vlss_mn))\n', (9712, 9733), True, 'import numpy as np\n'), ((8745, 8756), 'time.time', 'time.time', ([], {}), '()\n', (8754, 8756), False, 'import time\n'), ((10193, 10213), 'json.dumps', 'json.dumps', (['printres'], {}), '(printres)\n', (10203, 10213), False, 'import json\n'), ((8212, 8260), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['val_logp[val_mask]', 'labels[val_mask]'], {}), '(val_logp[val_mask], labels[val_mask])\n', (8222, 8260), True, 'import torch.nn.functional as F\n'), ((8479, 8530), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['test_logp[test_mask]', 'labels[test_mask]'], {}), '(test_logp[test_mask], labels[test_mask])\n', (8489, 8530), True, 'import torch.nn.functional as F\n'), ((7893, 7942), 'torch.eq', 'th.eq', (['train_pred[train_mask]', 'labels[train_mask]'], {}), '(train_pred[train_mask], labels[train_mask])\n', (7898, 7942), True, 'import torch as th\n'), ((8336, 8379), 'torch.eq', 'th.eq', (['val_pred[val_mask]', 'labels[val_mask]'], {}), '(val_pred[val_mask], labels[val_mask])\n', (8341, 8379), True, 'import torch as th\n'), ((8609, 8655), 'torch.eq', 'th.eq', (['test_pred[test_mask]', 'labels[test_mask]'], {}), '(test_pred[test_mask], labels[test_mask])\n', (8614, 8655), True, 'import torch as th\n')] |
# run_hadamard
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib
import matplotlib.pyplot as plt
import os
import sys
import errno
sys.path.insert(0, '../../src')
import chaospy as cp
from stochastic_collocation import StochasticCollocation
from quantity_of_interest import QuantityOfInterest
from dimension_reduction import DimensionReduction
from stochastic_arnoldi.arnoldi_sample import ArnoldiSampling
import examples
nx = 100
x1 = np.linspace(-5,5, num=nx)
x2 = np.linspace(-5,5, num=nx)
val = np.zeros([nx,nx])
QoI = examples.Rosenbrock(2)
xi = np.zeros(2)
std_dev_xi = np.ones(2)
collocation_QoI = StochasticCollocation(3, "Normal")
for i in xrange(0, nx):
for j in xrange(0, nx):
mu = np.array([x1[i], x2[j]])
QoI_func = QoI.eval_QoI
val[j,i] = collocation_QoI.normal.mean(mu, std_dev_xi, QoI_func)
# val[j,i] = QoI.eval_QoI(mu, xi)
fname = "rosenbrock_exact.pdf"
plt.rc('text', usetex=True)
matplotlib.rcParams['mathtext.fontset'] = 'cm'
fig = plt.figure("rosenbrock", figsize=(6,6))
ax = plt.axes()
# cp = ax.contour(x1, x2, val, levels=[2,4,8,16,32,64,128,256, 512], cmap="coolwarm", linewidths=0.5)
cp = ax.contour(x1, x2, val, cmap="coolwarm", levels=[500, 1000, 5000, 10000, 15000, 30000, 45000, 90000], linewidths=0.5)
ax.clabel(cp, inline=1, fmt='%1.1f', fontsize=8)
ax.set_xlabel(r'$\xi_1$', fontsize=16)
ax.set_ylabel(r'$\xi_2$', fontsize=16)
plt.tight_layout()
plt.show()
# fig.savefig(fname, format="pdf")
| [
"sys.path.insert",
"stochastic_collocation.StochasticCollocation",
"numpy.ones",
"numpy.array",
"examples.Rosenbrock",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] | [((152, 183), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../src"""'], {}), "(0, '../../src')\n", (167, 183), False, 'import sys\n'), ((461, 487), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)'], {'num': 'nx'}), '(-5, 5, num=nx)\n', (472, 487), True, 'import numpy as np\n'), ((492, 518), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)'], {'num': 'nx'}), '(-5, 5, num=nx)\n', (503, 518), True, 'import numpy as np\n'), ((524, 542), 'numpy.zeros', 'np.zeros', (['[nx, nx]'], {}), '([nx, nx])\n', (532, 542), True, 'import numpy as np\n'), ((548, 570), 'examples.Rosenbrock', 'examples.Rosenbrock', (['(2)'], {}), '(2)\n', (567, 570), False, 'import examples\n'), ((576, 587), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (584, 587), True, 'import numpy as np\n'), ((601, 611), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (608, 611), True, 'import numpy as np\n'), ((630, 664), 'stochastic_collocation.StochasticCollocation', 'StochasticCollocation', (['(3)', '"""Normal"""'], {}), "(3, 'Normal')\n", (651, 664), False, 'from stochastic_collocation import StochasticCollocation\n'), ((935, 962), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (941, 962), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1056), 'matplotlib.pyplot.figure', 'plt.figure', (['"""rosenbrock"""'], {'figsize': '(6, 6)'}), "('rosenbrock', figsize=(6, 6))\n", (1026, 1056), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1071), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1069, 1071), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1442), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1440, 1442), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1453), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1451, 1453), True, 'import matplotlib.pyplot as plt\n'), ((730, 754), 'numpy.array', 'np.array', (['[x1[i], x2[j]]'], {}), '([x1[i], x2[j]])\n', (738, 754), True, 'import numpy as np\n')] |
import torch
from torch import nn
import numpy as np
import cv2
### FB Global Reasoning Block ###
# From: https://github.com/facebookresearch/GloRe
class GCN(nn.Module):
""" Graph convolution unit (single layer)
"""
def __init__(self, num_state, num_node, bias=False):
super(GCN, self).__init__()
self.conv1 = nn.Conv1d(num_node, num_node, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv1d(num_state, num_state, kernel_size=1, bias=bias)
def forward(self, x):
# (n, num_state, num_node) -> (n, num_node, num_state)
# -> (n, num_state, num_node)
h = self.conv1(x.permute(0, 2, 1).contiguous()).permute(0, 2, 1)
h = h + x
# (n, num_state, num_node) -> (n, num_state, num_node)
h = self.conv2(self.relu(h))
return h
############### GloRe ################################
class GloRe_Unit(nn.Module):
"""
Graph-based Global Reasoning Unit
Parameter:
'normalize' is not necessary if the input size is fixed
"""
def __init__(self, num_in, num_mid,
ConvNd=nn.Conv3d,
BatchNormNd=nn.BatchNorm3d,
normalize=False):
super(GloRe_Unit, self).__init__()
self.normalize = normalize
self.num_s = int(2 * num_mid)
self.num_n = int(1 * num_mid)
# reduce dim
self.conv_state1 = ConvNd(num_in, self.num_s, kernel_size=1)
self.conv_state3 = ConvNd(num_in, self.num_s, kernel_size=3, padding=1)
self.conv_state5 = ConvNd(num_in, self.num_s, kernel_size=5, padding=2)
self.maxpool_state = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
self.conv_statem = ConvNd(num_in, self.num_s, kernel_size=1)
# projection map
self.conv_proj1 = ConvNd(int(num_in/2), self.num_n, kernel_size=1)
self.conv_proj3 = ConvNd(int(num_in/2), self.num_n, kernel_size=3, padding=1)
self.conv_proj5 = ConvNd(int(num_in/2), self.num_n, kernel_size=5, padding=2)
self.maxpool_proj = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
self.conv_projm = ConvNd(int(num_in/2), self.num_n, kernel_size=1)
# ----------
# reasoning via graph convolution
self.gcn1 = GCN(num_state=self.num_s, num_node=self.num_n)
self.gcn3 = GCN(num_state=self.num_s, num_node=self.num_n)
self.gcn5 = GCN(num_state=self.num_s, num_node=self.num_n)
self.gcnm = GCN(num_state=self.num_s, num_node=self.num_n)
# ----------
# extend dimension
self.conv_extend1 = ConvNd(self.num_s, num_in, kernel_size=1, bias=False)
self.conv_extend3 = ConvNd(self.num_s, num_in, kernel_size=3, padding=1, bias=False)
self.conv_extend5 = ConvNd(self.num_s, num_in, kernel_size=5, padding=2, bias=False)
self.conv_extendm = ConvNd(self.num_s, num_in, kernel_size=1, bias=False)
#Concatenation and reduction
self.original_size = ConvNd(5*num_in, num_in, kernel_size=1, bias=False)
self.blocker = BatchNormNd(num_in, eps=1e-04) # should be zero initialized
def forward(self, x, x_proj):
'''
:param x: (n, c, d, h, w)
'''
n = x.size(0)
#print(x.shape)
# (n, num_in, h, w) --> (n, num_state, h, w)
# --> (n, num_state, h*w)
x_state_reshaped1 = self.conv_state1(x).view(n, self.num_s, -1)
x_state_reshaped3 = self.conv_state3(x).view(n, self.num_s, -1)
x_state_reshaped5 = self.conv_state5(x).view(n, self.num_s, -1)
x_state_reshapedm = self.conv_statem(self.maxpool_state(x)).view(n, self.num_s, -1)
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_proj_reshaped1 = self.conv_proj1(x_proj).view(n, self.num_n, -1)
x_proj_reshaped3 = self.conv_proj3(x_proj).view(n, self.num_n, -1)
x_proj_reshaped5 = self.conv_proj5(x_proj).view(n, self.num_n, -1)
x_proj_reshapedm = self.conv_projm(self.maxpool_proj(x_proj)).view(n, self.num_n, -1)
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_rproj_reshaped1 = x_proj_reshaped1
x_rproj_reshaped3 = x_proj_reshaped3
x_rproj_reshaped5 = x_proj_reshaped5
x_rproj_reshapedm = x_proj_reshapedm
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# projection: coordinate space -> interaction space
# (n, num_state, h*w) x (n, num_node, h*w)T --> (n, num_state, num_node)
x_n_state1 = torch.matmul(x_state_reshaped1, x_proj_reshaped1.permute(0, 2, 1))
if self.normalize:
x_n_state1 = x_n_state1 * (1. / x_state_reshaped1.size(2))
x_n_state3 = torch.matmul(x_state_reshaped3, x_proj_reshaped3.permute(0, 2, 1))
if self.normalize:
x_n_state3 = x_n_state3 * (1. / x_state_reshaped3.size(2))
x_n_state5 = torch.matmul(x_state_reshaped5, x_proj_reshaped5.permute(0, 2, 1))
if self.normalize:
x_n_state5 = x_n_state5 * (1. / x_state_reshaped5.size(2))
x_n_statem = torch.matmul(x_state_reshapedm, x_proj_reshapedm.permute(0, 2, 1))
if self.normalize:
x_n_statem = x_n_statem * (1. / x_state_reshapedm.size(2))
# reasoning: (n, num_state, num_node) -> (n, num_state, num_node)
x_n_rel1 = self.gcn1(x_n_state1)
x_n_rel3 = self.gcn3(x_n_state3)
x_n_rel5 = self.gcn5(x_n_state5)
x_n_relm = self.gcnm(x_n_statem)
# reverse projection: interaction space -> coordinate space
# (n, num_state, num_node) x (n, num_node, h*w) --> (n, num_state, h*w)
x_state_reshaped1 = torch.matmul(x_n_rel1, x_rproj_reshaped1)
x_state_reshaped3 = torch.matmul(x_n_rel3, x_rproj_reshaped3)
x_state_reshaped5 = torch.matmul(x_n_rel5, x_rproj_reshaped5)
x_state_reshapedm = torch.matmul(x_n_relm, x_rproj_reshapedm)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# (n, num_state, h*w) --> (n, num_state, h, w)
x_state1 = x_state_reshaped1.view(n, self.num_s, *x.size()[2:])
x_state3 = x_state_reshaped3.view(n, self.num_s, *x.size()[2:])
x_state5 = x_state_reshaped5.view(n, self.num_s, *x.size()[2:])
x_statem = x_state_reshapedm.view(n, self.num_s, *x.size()[2:])
# -----------------
# (n, num_state, h, w) -> (n, num_in, h, w)
x_reasoned1 = self.blocker(self.conv_extend1(x_state1))
x_reasoned3 = self.blocker(self.conv_extend3(x_state3))
x_reasoned5 = self.blocker(self.conv_extend5(x_state5))
x_reasonedm = self.blocker(self.conv_extendm(x_statem))
out = x + x_reasoned1 + x_reasoned3 + x_reasoned5 + x_reasonedm
#out = torch.cat((x, x_reasoned1, x_reasoned3, x_reasoned5, x_reasonedm),1)
#out = self.original_size(out)
# for i in range(3):
# img = np.asarray(x_proj_reshaped1[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/projection_1_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x_proj_reshaped3[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/projection_3_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x_proj_reshaped5[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/projection_5_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x_proj_reshapedm[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/projection_max_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x_proj[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/x_proj_in_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/x_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(out[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/out_{}.jpg".format(i),np.asarray(img))
return out
class Inception_GloRe_Unit_2D(GloRe_Unit):
def __init__(self, num_in, num_mid, normalize=False):
"""
Set 'normalize = True' if the input size is not fixed
"""
super(Inception_GloRe_Unit_2D, self).__init__(num_in, num_mid,
ConvNd=nn.Conv2d,
BatchNormNd=nn.BatchNorm2d,
normalize=normalize)
############### GloRe ################################
class GloRe_Unit_v2(nn.Module):
"""
Graph-based Global Reasoning Unit
Parameter:
'normalize' is not necessary if the input size is fixed
"""
def __init__(self, num_in, num_mid,
ConvNd=nn.Conv3d,
BatchNormNd=nn.BatchNorm3d,
normalize=False):
super(GloRe_Unit_v2, self).__init__()
self.normalize = normalize
self.num_s = int(2 * num_mid)
self.num_n = int(1 * num_mid)
# reduce dim
self.conv1_state = ConvNd(num_in, self.num_s, kernel_size=1) #1x1 Convolutional layer (reduce dim)
self.conv3_state = ConvNd(num_in, self.num_s, kernel_size=3, padding=1) #3x3 Convolutional layer (reduce dim)
self.conv5_state = ConvNd(num_in, self.num_s, kernel_size=5, padding=2) #5x5 Convolutional layer (reduce dim)
self.maxpool_state = nn.MaxPool2d(kernel_size=3, padding=1, stride=1) #Max pooling layer (reduce dim)
self.maxconv1_state = ConvNd(num_in, self.num_s, kernel_size=1) #max pooling 1x1 conv layer (reduce dim)
self.concat1_state = ConvNd(4, 1, kernel_size=1) #concat 1x1 conv layer (reduce dim)
# projection map
self.conv1_proj = ConvNd(num_in, self.num_n, kernel_size=1) #1x1 Convolutional layer (proj)
self.conv3_proj = ConvNd(num_in, self.num_n, kernel_size=3, padding=1) #3x3 Convolutional layer (proj)
self.conv5_proj = ConvNd(num_in, self.num_n, kernel_size=5, padding=2) #5x5 Convolutional layer (proj)
self.maxpool_proj = nn.MaxPool2d(kernel_size=3, padding=1, stride=1) #Max pooling layer (proj)
self.maxconv1_proj = ConvNd(num_in, self.num_n, kernel_size=1) #max pooling 1x1 conv layer (proj)
self.concat1_proj = ConvNd(4, 1, kernel_size=1) #concat 1x1 conv layer (proj)
# ----------
# reasoning via graph convolution
self.gcn = GCN(num_state=self.num_s, num_node=self.num_n)
# ----------
# extend dimension
self.conv_extend = ConvNd(self.num_s, num_in, kernel_size=1, bias=False)
self.blocker = BatchNormNd(num_in, eps=1e-04) # should be zero initialized
def forward(self, x, print_features=False):
'''
:param x: (n, c, d, h, w)
'''
n = x.size(0)
#print(x.shape)
# (n, num_in, h, w) --> (n, num_state, h, w)
# --> (n, num_state, h*w)
x_state_reshaped1 = self.conv1_state(x).view(n, 1, self.num_s, -1)
x_state_reshaped3 = self.conv3_state(x).view(n, 1, self.num_s, -1)
x_state_reshaped5 = self.conv5_state(x).view(n, 1, self.num_s, -1)
x_state_reshapedm = self.maxconv1_state(self.maxpool_state(x)).view(n, 1, self.num_s, -1)
x_state_concat = torch.cat((x_state_reshaped1, x_state_reshaped3, x_state_reshaped5, x_state_reshapedm), 1)
x_state_reshaped = self.concat1_state(x_state_concat).view(n, self.num_s, -1)
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_proj_reshaped1 = self.conv1_proj(x).view(n, 1, self.num_n, -1)
x_proj_reshaped3 = self.conv3_proj(x).view(n, 1, self.num_n, -1)
x_proj_reshaped5 = self.conv5_proj(x).view(n, 1, self.num_n, -1)
x_proj_reshapedm = self.maxconv1_proj(self.maxpool_proj(x)).view(n, 1, self.num_n, -1)
x_proj_concat = torch.cat((x_proj_reshaped1, x_proj_reshaped3, x_proj_reshaped5, x_proj_reshapedm), 1)
x_proj_reshaped = self.concat1_proj(x_proj_concat).view(n, self.num_n, -1)
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_rproj_reshaped = x_proj_reshaped
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# projection: coordinate space -> interaction space
# (n, num_state, h*w) x (n, num_node, h*w)T --> (n, num_state, num_node)
x_n_state = torch.matmul(x_state_reshaped, x_proj_reshaped.permute(0, 2, 1))
if self.normalize:
x_n_state = x_n_state * (1. / x_state_reshaped.size(2))
# reasoning: (n, num_state, num_node) -> (n, num_state, num_node)
x_n_rel = self.gcn(x_n_state)
# reverse projection: interaction space -> coordinate space
# (n, num_state, num_node) x (n, num_node, h*w) --> (n, num_state, h*w)
x_state_reshaped = torch.matmul(x_n_rel, x_rproj_reshaped)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# (n, num_state, h*w) --> (n, num_state, h, w)
x_state = x_state_reshaped.view(n, self.num_s, *x.size()[2:])
# -----------------
# (n, num_state, h, w) -> (n, num_in, h, w)
x_reasoned = self.blocker(self.conv_extend(x_state))
out = x + x_reasoned
if print_features:
for i in range(4):
img = np.asarray(x_state_reshaped[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
img = ((255.0*(img-img.min()))/(img.max()-img.min()))
cv2.imwrite("./deepglobe_exp/Inception_Glore_seg_v2/projection/x_state_{}.jpg".format(i),np.asarray(img))
img = np.asarray(x_state_reshaped[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
img = ((255.0*(img-img.min()))/(img.max()-img.min()))
cv2.imwrite("./deepglobe_exp/Inception_Glore_seg_v2/projection/x_proj_{}.jpg".format(i),np.asarray(img))
img = np.asarray(x[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
img = ((255.0*(img-img.min()))/(img.max()-img.min()))
cv2.imwrite("./deepglobe_exp/Inception_Glore_seg_v2/projection/x_{}.jpg".format(i),np.asarray(img))
img = np.asarray(out[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
img = ((255.0*(img-img.min()))/(img.max()-img.min()))
cv2.imwrite("./deepglobe_exp/Inception_Glore_seg_v2/projection/out_{}.jpg".format(i),np.asarray(img))
return out
class Inception_GloRe_Unit_2D_v2(GloRe_Unit_v2):
def __init__(self, num_in, num_mid, normalize=False):
"""
Set 'normalize = True' if the input size is not fixed
"""
super(Inception_GloRe_Unit_2D_v2, self).__init__(num_in, num_mid,
ConvNd=nn.Conv2d,
BatchNormNd=nn.BatchNorm2d,
normalize=normalize) | [
"torch.nn.ReLU",
"numpy.asarray",
"torch.nn.MaxPool2d",
"torch.matmul",
"torch.nn.Conv1d",
"torch.cat"
] | [((340, 384), 'torch.nn.Conv1d', 'nn.Conv1d', (['num_node', 'num_node'], {'kernel_size': '(1)'}), '(num_node, num_node, kernel_size=1)\n', (349, 384), False, 'from torch import nn\n'), ((405, 426), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (412, 426), False, 'from torch import nn\n'), ((448, 505), 'torch.nn.Conv1d', 'nn.Conv1d', (['num_state', 'num_state'], {'kernel_size': '(1)', 'bias': 'bias'}), '(num_state, num_state, kernel_size=1, bias=bias)\n', (457, 505), False, 'from torch import nn\n'), ((1685, 1733), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)'}), '(kernel_size=3, padding=1, stride=1)\n', (1697, 1733), False, 'from torch import nn\n'), ((2104, 2152), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)'}), '(kernel_size=3, padding=1, stride=1)\n', (2116, 2152), False, 'from torch import nn\n'), ((5806, 5847), 'torch.matmul', 'torch.matmul', (['x_n_rel1', 'x_rproj_reshaped1'], {}), '(x_n_rel1, x_rproj_reshaped1)\n', (5818, 5847), False, 'import torch\n'), ((5876, 5917), 'torch.matmul', 'torch.matmul', (['x_n_rel3', 'x_rproj_reshaped3'], {}), '(x_n_rel3, x_rproj_reshaped3)\n', (5888, 5917), False, 'import torch\n'), ((5946, 5987), 'torch.matmul', 'torch.matmul', (['x_n_rel5', 'x_rproj_reshaped5'], {}), '(x_n_rel5, x_rproj_reshaped5)\n', (5958, 5987), False, 'import torch\n'), ((6016, 6057), 'torch.matmul', 'torch.matmul', (['x_n_relm', 'x_rproj_reshapedm'], {}), '(x_n_relm, x_rproj_reshapedm)\n', (6028, 6057), False, 'import torch\n'), ((10457, 10505), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)'}), '(kernel_size=3, padding=1, stride=1)\n', (10469, 10505), False, 'from torch import nn\n'), ((11192, 11240), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)'}), '(kernel_size=3, padding=1, stride=1)\n', (11204, 11240), False, 'from torch import nn\n'), ((12457, 12551), 'torch.cat', 'torch.cat', (['(x_state_reshaped1, x_state_reshaped3, x_state_reshaped5, x_state_reshapedm)', '(1)'], {}), '((x_state_reshaped1, x_state_reshaped3, x_state_reshaped5,\n x_state_reshapedm), 1)\n', (12466, 12551), False, 'import torch\n'), ((13083, 13173), 'torch.cat', 'torch.cat', (['(x_proj_reshaped1, x_proj_reshaped3, x_proj_reshaped5, x_proj_reshapedm)', '(1)'], {}), '((x_proj_reshaped1, x_proj_reshaped3, x_proj_reshaped5,\n x_proj_reshapedm), 1)\n', (13092, 13173), False, 'import torch\n'), ((14106, 14145), 'torch.matmul', 'torch.matmul', (['x_n_rel', 'x_rproj_reshaped'], {}), '(x_n_rel, x_rproj_reshaped)\n', (14118, 14145), False, 'import torch\n'), ((14868, 14883), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (14878, 14883), True, 'import numpy as np\n'), ((15160, 15175), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (15170, 15175), True, 'import numpy as np\n'), ((15432, 15447), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (15442, 15447), True, 'import numpy as np\n'), ((15708, 15723), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (15718, 15723), True, 'import numpy as np\n')] |
'''
TODO: Fill this comment
'''
from collections import OrderedDict
import os
import json
import numpy as np
import matplotlib.pyplot as plt
ROOT_FOLDER = ['', '','']
PATH_SUFFIX = ['', '','']
OUTPUT_FN = ['', '','']
def retrieve_clogs(folder):
QUIC_PREFIX = 'quic' + '/' + '1'
CLIENT_FILE = 'quic_client.log'
subfolders = [ f.path for f in sorted(os.scandir(folder), key=os.path.getmtime) if f.is_dir() ]
curated_file_list = []
for s in subfolders:
if 'https_quic_' in s:
dir_0 = os.listdir(s)
for d in dir_0:
if '0_d' in d:
fp = s + '/' + d + '/' + QUIC_PREFIX + '/' + CLIENT_FILE
if os.path.isfile(fp):
curated_file_list.append(fp)
else:
print ("Error in finding client_log file")
print (fp)
curated_file_list.append("error")
return curated_file_list
def load_data(filename: str, client_logs: list):
import re
import numpy as np
regex = r'^obj[ \t]r[0-9]{1,2}'
r = re.compile(regex)
# open batch_run and store each run info
batch_run_info = open('./batch_run_rl.txt').read().splitlines()
all_data = []
for idx, cl in enumerate(client_logs):
completion_times = []
if not os.path.isfile(cl):
all_data.append({
'graph': batch_run_info[idx],
'error': 'no data'
})
continue
with open(cl, 'r') as fp:
for line in fp:
if r.match(line):
completion_times.append(float(line.split()[4][:-2]))
elif 'Page Load Time' in line:
total_completion_time = float(line.split()[-1])
# load into memory
try:
all_data.append({
'graph': batch_run_info[idx],
'c_times': completion_times,
'avg_c_times': np.mean(completion_times), #, dtype=np.float32),
'total_c_time': total_completion_time
})
except Exception as ex:
print(filename)
print(ex)
return all_data
def main():
for idx, fp in enumerate(ROOT_FOLDER):
output_data = []
for i in range(1, 11):
prefixfp = str(i) + PATH_SUFFIX[idx]
full_path = fp + prefixfp
c_logs_sorted = retrieve_clogs(full_path)
output_data.append(load_data('', c_logs_sorted))
with open(OUTPUT_FN[idx], 'w') as fp:
fp.write(json.dumps(output_data, indent=4))
if __name__ == "__main__":
main() | [
"numpy.mean",
"os.listdir",
"re.compile",
"json.dumps",
"os.scandir",
"os.path.isfile"
] | [((1125, 1142), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (1135, 1142), False, 'import re\n'), ((532, 545), 'os.listdir', 'os.listdir', (['s'], {}), '(s)\n', (542, 545), False, 'import os\n'), ((1364, 1382), 'os.path.isfile', 'os.path.isfile', (['cl'], {}), '(cl)\n', (1378, 1382), False, 'import os\n'), ((370, 388), 'os.scandir', 'os.scandir', (['folder'], {}), '(folder)\n', (380, 388), False, 'import os\n'), ((2614, 2647), 'json.dumps', 'json.dumps', (['output_data'], {'indent': '(4)'}), '(output_data, indent=4)\n', (2624, 2647), False, 'import json\n'), ((705, 723), 'os.path.isfile', 'os.path.isfile', (['fp'], {}), '(fp)\n', (719, 723), False, 'import os\n'), ((2007, 2032), 'numpy.mean', 'np.mean', (['completion_times'], {}), '(completion_times)\n', (2014, 2032), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
TopK for text generation
"""
import numpy as np
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor
def generate(model, origin_inputs, seq_length, end_token=50256):
"""
TopK for text generation
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
seq_length: seq_length for the model
end_token: end of sentence token id
Returns:
outputs: the ids for the generated text
"""
TOPK = 5
seq_length = seq_length
bs, valid_length = origin_inputs.shape
pad_length = seq_length - origin_inputs.shape[-1]
input_ids = np.pad(origin_inputs, ((0, 0), (0, pad_length)), 'constant', constant_values=(0, 0))
print("input_ids is ", input_ids)
while valid_length < seq_length:
inputs = Tensor(input_ids, mstype.int32)
logits = model(inputs).asnumpy()
logits = logits.reshape(bs, seq_length, -1)
probs = logits[0, valid_length-1, :]
p_args = probs.argsort()[::-1][:TOPK]
p = probs[p_args]
p = p / sum(p)
target_index = np.random.choice(len(p), p=p)
if p_args[target_index] == end_token or valid_length == seq_length-1:
outputs = input_ids
break
input_ids[0][valid_length] = p_args[target_index]
valid_length += 1
length = np.sum(outputs != 0)
outputs = outputs[0][:length]
return outputs
| [
"numpy.sum",
"numpy.pad",
"mindspore.common.tensor.Tensor"
] | [((1364, 1452), 'numpy.pad', 'np.pad', (['origin_inputs', '((0, 0), (0, pad_length))', '"""constant"""'], {'constant_values': '(0, 0)'}), "(origin_inputs, ((0, 0), (0, pad_length)), 'constant',\n constant_values=(0, 0))\n", (1370, 1452), True, 'import numpy as np\n'), ((2085, 2105), 'numpy.sum', 'np.sum', (['(outputs != 0)'], {}), '(outputs != 0)\n', (2091, 2105), True, 'import numpy as np\n'), ((1541, 1572), 'mindspore.common.tensor.Tensor', 'Tensor', (['input_ids', 'mstype.int32'], {}), '(input_ids, mstype.int32)\n', (1547, 1572), False, 'from mindspore.common.tensor import Tensor\n')] |
import tensorflow as tf
import numpy as np
from tfmonopoles.theories import GeorgiGlashowRadialTheory
from tfmonopoles import FieldTools
import argparse
parser = argparse.ArgumentParser(description="Generate a monopole ring")
parser.add_argument("--vev", "-v", default=1.0, type=float)
parser.add_argument("--gaugeCoupling", "-g", default=1.0, type=float)
parser.add_argument("--selfCoupling", "-l", default=0.5, type=float)
parser.add_argument("--tol", "-t", default=1e-3, type=float)
parser.add_argument("--outputPath", "-o", default="", type=str)
parser.add_argument("--inputPath", "-i", default="", type=str)
parser.add_argument("--numCores", "-n", default=0, type=int)
parser.add_argument("--externalField", "-B", default=0, type=int)
parser.add_argument("--maxSteps", "-M", default=100000, type=int)
parser.add_argument("--momentum", "-p", default=0.95, type=float)
args = parser.parse_args()
if args.numCores != 0:
tf.config.threading.set_intra_op_parallelism_threads(args.numCores)
tf.config.threading.set_inter_op_parallelism_threads(args.numCores)
# Load data from input path
inputPath = args.inputPath
R = tf.constant(np.load(inputPath + "/R.npy", allow_pickle=True))
Y = tf.constant(np.load(inputPath + "/Y.npy", allow_pickle=True))
Z = tf.constant(np.load(inputPath + "/Z.npy", allow_pickle=True))
scalarField = np.load(inputPath + "/scalarField.npy", allow_pickle=True)
gaugeField = np.load(inputPath + "/gaugeField.npy", allow_pickle=True)
inputParams = np.load(inputPath + "/params.npy", allow_pickle=True).item()
latShape = tf.shape(R)
# Add magnetic field if required
numFluxQuanta = args.externalField
magField = FieldTools.constantMagneticField(R, Y, Z, 0, numFluxQuanta)
gaugeField = FieldTools.linearSuperpose(gaugeField, magField)
# Theory parameters
params = {
"vev" : args.vev,
"selfCoupling" : args.selfCoupling,
"gaugeCoupling" : args.gaugeCoupling,
"latShape" : latShape
}
scalarField = scalarField * tf.cast(params["vev"] / inputParams["vev"], tf.complex128)
theory = GeorgiGlashowRadialTheory(params)
scalarFieldVar = tf.Variable(scalarField)
gaugeFieldVar = tf.Variable(gaugeField)
@tf.function
def lossFn():
return theory.energy(scalarFieldVar, gaugeFieldVar)
energy = lossFn()
tf.print(energy)
# Stopping criterion on RSS gradient
tol = args.tol
# Just need to satisfy rssGrad < rssGradOld to start the loop
rssGrad = 1e6
rssGradOld = 1e7
numSteps = 0
maxSteps = args.maxSteps
printIncrement = 10
minSteps = 100
# First perform standard gradient descent to get close to the saddle point
opt = tf.keras.optimizers.SGD(learning_rate=0.01*args.gaugeCoupling*args.vev)
while numSteps < minSteps or (rssGrad < rssGradOld and numSteps < maxSteps and rssGrad > tol):
# Compute the field energy, with tf watching the variables
with tf.GradientTape() as tape:
energy = lossFn()
vars = [scalarFieldVar, gaugeFieldVar]
# Compute the gradients using automatic differentiation
grads = tape.gradient(energy, vars)
# Postprocess the gauge field gradients
grads = theory.processGradients(grads, vars)
# Compute RSS gradient for stopping criterion
gradSq = FieldTools.innerProduct(grads[0], grads[0], tr=True)
gradSq += FieldTools.innerProduct(grads[1], grads[1], tr=True, adj=True)
rssGradOld = rssGrad
rssGrad = tf.math.sqrt(gradSq)
# rssGrad = tf.reduce_max(tf.abs(grads[1]))
if (numSteps % printIncrement == 0):
print("Energy after " + str(numSteps) + " iterations: " +\
str(energy.numpy()))
print("RSS gradient after " + str(numSteps) + " iterations: " +\
str(rssGrad.numpy()))
# Perform the gradient descent step
opt.apply_gradients(zip(grads, vars))
numSteps += 1
# Postprocess the fields
scalarFieldVar.assign(0.5*(scalarFieldVar + tf.math.conj(scalarFieldVar)))
gaugeFieldVar.assign(FieldTools.projectToSu2(gaugeFieldVar))
print("First gradient descent completed in " + str(numSteps) + " iterations")
print("Energy reached: " + str(energy.numpy()))
# Intermediate save
outputPath = args.outputPath
if outputPath != "":
np.save(outputPath + "/R", R.numpy())
np.save(outputPath + "/Y", Y.numpy())
np.save(outputPath + "/Z", Z.numpy())
np.save(outputPath + "/scalarField", scalarFieldVar.numpy())
np.save(outputPath + "/gaugeField", gaugeFieldVar.numpy())
np.save(outputPath + "/params", params)
# Now minimise the RSS gradient summed over all sites
opt = tf.keras.optimizers.SGD(learning_rate=1e-5, momentum=args.momentum)
numSteps = 0
while rssGrad > tol and numSteps < maxSteps:
vars = [scalarFieldVar, gaugeFieldVar]
# Compute the field energy, with tf watching the variables
with tf.GradientTape() as outterTape:
with tf.GradientTape() as innerTape:
energy = lossFn()
# Compute the gradients using automatic differentiation
grads = innerTape.gradient(energy, vars)
# Postprocess the gauge field gradients
grads = theory.processGradients(grads, vars)
# Compute squared gradients (note that as this is being tracked we can't
# use the innerProduct function due to passing by value)
gradSq = tf.math.real(
tf.reduce_sum(tf.linalg.adjoint(grads[0]) @ grads[0])
)
gradSq += tf.math.real(
tf.reduce_sum(
tf.linalg.trace(tf.linalg.adjoint(grads[1]) @ grads[1])
)
)
rssGrad = tf.sqrt(gradSq)
# Compute the second-level gradients (gradient of gradient squared)
ggrads = outterTape.gradient(gradSq, vars)
ggrads = theory.processGradients(ggrads, vars)
# Normalise second-level gradients on a field-by-field basis
scalarGGradSq = FieldTools.innerProduct(ggrads[0], ggrads[0], adj=True)
gaugeGGradSq = FieldTools.innerProduct(
ggrads[1], ggrads[1], tr=True, adj=True
)
ggrads[0] /= tf.cast(tf.math.sqrt(scalarGGradSq) + 1e-6, tf.complex128)
ggrads[1] /= tf.cast(tf.math.sqrt(gaugeGGradSq) + 1e-6, tf.complex128)
if (numSteps % printIncrement == 0):
print("Energy after " + str(numSteps) + " iterations: " +\
str(energy.numpy()))
print("RSS gradient after " + str(numSteps) + " iterations: " +\
str(rssGrad.numpy()))
# Perform the gradient descent step
opt.apply_gradients(zip(ggrads, vars))
numSteps += 1
# Postprocess the fields to avoid drift away from SU(2)/its Lie algebra
scalarFieldVar.assign(0.5*(scalarFieldVar + tf.math.conj(scalarFieldVar)))
gaugeFieldVar.assign(FieldTools.projectToSu2(gaugeFieldVar))
print("Gradient descent finished in " + str(numSteps) + " iterations")
print("Final energy: " + str(energy.numpy()))
# Save fields as .npy files for plotting and further analysis
if outputPath != "":
np.save(outputPath + "/R", R.numpy())
np.save(outputPath + "/Y", Y.numpy())
np.save(outputPath + "/Z", Z.numpy())
np.save(outputPath + "/scalarField", scalarFieldVar.numpy())
np.save(outputPath + "/gaugeField", gaugeFieldVar.numpy())
np.save(outputPath + "/params", params)
| [
"tensorflow.shape",
"tensorflow.math.conj",
"tensorflow.GradientTape",
"tfmonopoles.FieldTools.projectToSu2",
"tfmonopoles.FieldTools.linearSuperpose",
"tensorflow.cast",
"numpy.save",
"argparse.ArgumentParser",
"tensorflow.math.sqrt",
"tensorflow.keras.optimizers.SGD",
"tensorflow.config.thread... | [((163, 226), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate a monopole ring"""'}), "(description='Generate a monopole ring')\n", (186, 226), False, 'import argparse\n'), ((1337, 1395), 'numpy.load', 'np.load', (["(inputPath + '/scalarField.npy')"], {'allow_pickle': '(True)'}), "(inputPath + '/scalarField.npy', allow_pickle=True)\n", (1344, 1395), True, 'import numpy as np\n'), ((1409, 1466), 'numpy.load', 'np.load', (["(inputPath + '/gaugeField.npy')"], {'allow_pickle': '(True)'}), "(inputPath + '/gaugeField.npy', allow_pickle=True)\n", (1416, 1466), True, 'import numpy as np\n'), ((1553, 1564), 'tensorflow.shape', 'tf.shape', (['R'], {}), '(R)\n', (1561, 1564), True, 'import tensorflow as tf\n'), ((1645, 1704), 'tfmonopoles.FieldTools.constantMagneticField', 'FieldTools.constantMagneticField', (['R', 'Y', 'Z', '(0)', 'numFluxQuanta'], {}), '(R, Y, Z, 0, numFluxQuanta)\n', (1677, 1704), False, 'from tfmonopoles import FieldTools\n'), ((1718, 1766), 'tfmonopoles.FieldTools.linearSuperpose', 'FieldTools.linearSuperpose', (['gaugeField', 'magField'], {}), '(gaugeField, magField)\n', (1744, 1766), False, 'from tfmonopoles import FieldTools\n'), ((2029, 2062), 'tfmonopoles.theories.GeorgiGlashowRadialTheory', 'GeorgiGlashowRadialTheory', (['params'], {}), '(params)\n', (2054, 2062), False, 'from tfmonopoles.theories import GeorgiGlashowRadialTheory\n'), ((2081, 2105), 'tensorflow.Variable', 'tf.Variable', (['scalarField'], {}), '(scalarField)\n', (2092, 2105), True, 'import tensorflow as tf\n'), ((2122, 2145), 'tensorflow.Variable', 'tf.Variable', (['gaugeField'], {}), '(gaugeField)\n', (2133, 2145), True, 'import tensorflow as tf\n'), ((2249, 2265), 'tensorflow.print', 'tf.print', (['energy'], {}), '(energy)\n', (2257, 2265), True, 'import tensorflow as tf\n'), ((2569, 2644), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.01 * args.gaugeCoupling * args.vev)'}), '(learning_rate=0.01 * args.gaugeCoupling * args.vev)\n', (2592, 2644), True, 'import tensorflow as tf\n'), ((4491, 4559), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(1e-05)', 'momentum': 'args.momentum'}), '(learning_rate=1e-05, momentum=args.momentum)\n', (4514, 4559), True, 'import tensorflow as tf\n'), ((929, 996), 'tensorflow.config.threading.set_intra_op_parallelism_threads', 'tf.config.threading.set_intra_op_parallelism_threads', (['args.numCores'], {}), '(args.numCores)\n', (981, 996), True, 'import tensorflow as tf\n'), ((1001, 1068), 'tensorflow.config.threading.set_inter_op_parallelism_threads', 'tf.config.threading.set_inter_op_parallelism_threads', (['args.numCores'], {}), '(args.numCores)\n', (1053, 1068), True, 'import tensorflow as tf\n'), ((1141, 1189), 'numpy.load', 'np.load', (["(inputPath + '/R.npy')"], {'allow_pickle': '(True)'}), "(inputPath + '/R.npy', allow_pickle=True)\n", (1148, 1189), True, 'import numpy as np\n'), ((1207, 1255), 'numpy.load', 'np.load', (["(inputPath + '/Y.npy')"], {'allow_pickle': '(True)'}), "(inputPath + '/Y.npy', allow_pickle=True)\n", (1214, 1255), True, 'import numpy as np\n'), ((1273, 1321), 'numpy.load', 'np.load', (["(inputPath + '/Z.npy')"], {'allow_pickle': '(True)'}), "(inputPath + '/Z.npy', allow_pickle=True)\n", (1280, 1321), True, 'import numpy as np\n'), ((1960, 2018), 'tensorflow.cast', 'tf.cast', (["(params['vev'] / inputParams['vev'])", 'tf.complex128'], {}), "(params['vev'] / inputParams['vev'], tf.complex128)\n", (1967, 2018), True, 'import tensorflow as tf\n'), ((3164, 3216), 'tfmonopoles.FieldTools.innerProduct', 'FieldTools.innerProduct', (['grads[0]', 'grads[0]'], {'tr': '(True)'}), '(grads[0], grads[0], tr=True)\n', (3187, 3216), False, 'from tfmonopoles import FieldTools\n'), ((3231, 3293), 'tfmonopoles.FieldTools.innerProduct', 'FieldTools.innerProduct', (['grads[1]', 'grads[1]'], {'tr': '(True)', 'adj': '(True)'}), '(grads[1], grads[1], tr=True, adj=True)\n', (3254, 3293), False, 'from tfmonopoles import FieldTools\n'), ((3334, 3354), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['gradSq'], {}), '(gradSq)\n', (3346, 3354), True, 'import tensorflow as tf\n'), ((4389, 4428), 'numpy.save', 'np.save', (["(outputPath + '/params')", 'params'], {}), "(outputPath + '/params', params)\n", (4396, 4428), True, 'import numpy as np\n'), ((5769, 5824), 'tfmonopoles.FieldTools.innerProduct', 'FieldTools.innerProduct', (['ggrads[0]', 'ggrads[0]'], {'adj': '(True)'}), '(ggrads[0], ggrads[0], adj=True)\n', (5792, 5824), False, 'from tfmonopoles import FieldTools\n'), ((5844, 5908), 'tfmonopoles.FieldTools.innerProduct', 'FieldTools.innerProduct', (['ggrads[1]', 'ggrads[1]'], {'tr': '(True)', 'adj': '(True)'}), '(ggrads[1], ggrads[1], tr=True, adj=True)\n', (5867, 5908), False, 'from tfmonopoles import FieldTools\n'), ((7117, 7156), 'numpy.save', 'np.save', (["(outputPath + '/params')", 'params'], {}), "(outputPath + '/params', params)\n", (7124, 7156), True, 'import numpy as np\n'), ((1481, 1534), 'numpy.load', 'np.load', (["(inputPath + '/params.npy')"], {'allow_pickle': '(True)'}), "(inputPath + '/params.npy', allow_pickle=True)\n", (1488, 1534), True, 'import numpy as np\n'), ((2808, 2825), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2823, 2825), True, 'import tensorflow as tf\n'), ((3893, 3931), 'tfmonopoles.FieldTools.projectToSu2', 'FieldTools.projectToSu2', (['gaugeFieldVar'], {}), '(gaugeFieldVar)\n', (3916, 3931), False, 'from tfmonopoles import FieldTools\n'), ((4733, 4750), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4748, 4750), True, 'import tensorflow as tf\n'), ((5496, 5511), 'tensorflow.sqrt', 'tf.sqrt', (['gradSq'], {}), '(gradSq)\n', (5503, 5511), True, 'import tensorflow as tf\n'), ((6617, 6655), 'tfmonopoles.FieldTools.projectToSu2', 'FieldTools.projectToSu2', (['gaugeFieldVar'], {}), '(gaugeFieldVar)\n', (6640, 6655), False, 'from tfmonopoles import FieldTools\n'), ((4779, 4796), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4794, 4796), True, 'import tensorflow as tf\n'), ((5953, 5980), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['scalarGGradSq'], {}), '(scalarGGradSq)\n', (5965, 5980), True, 'import tensorflow as tf\n'), ((6029, 6055), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['gaugeGGradSq'], {}), '(gaugeGGradSq)\n', (6041, 6055), True, 'import tensorflow as tf\n'), ((3837, 3865), 'tensorflow.math.conj', 'tf.math.conj', (['scalarFieldVar'], {}), '(scalarFieldVar)\n', (3849, 3865), True, 'import tensorflow as tf\n'), ((6561, 6589), 'tensorflow.math.conj', 'tf.math.conj', (['scalarFieldVar'], {}), '(scalarFieldVar)\n', (6573, 6589), True, 'import tensorflow as tf\n'), ((5261, 5288), 'tensorflow.linalg.adjoint', 'tf.linalg.adjoint', (['grads[0]'], {}), '(grads[0])\n', (5278, 5288), True, 'import tensorflow as tf\n'), ((5406, 5433), 'tensorflow.linalg.adjoint', 'tf.linalg.adjoint', (['grads[1]'], {}), '(grads[1])\n', (5423, 5433), True, 'import tensorflow as tf\n')] |
import sys
import importlib
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--SRN', required=True)
args = parser.parse_args()
subname = args.SRN
try:
mymodule = importlib.import_module(subname)
except Exception as e:
print(e)
print("Rename your written program as YOUR_SRN.py and run python3.7 SampleTest.py --SRN YOUR_SRN ")
sys.exit()
Tensor = mymodule.Tensor
a = Tensor(np.array([[1.0, 2.0], [3.0, 4.0]]))
b = Tensor(np.array([[3.0, 2.0], [1.0, 5.0]]), requires_grad=False)
c = Tensor(np.array([[3.2, 4.5], [6.1, 4.2]]))
z = np.array([[0.0, 0.0], [0.0, 0.0]])
sans = a+b
sans2 = a+a
mulans = a@b
mulans2 = (a+b)@c
d = a + c + a + c + a + a
sgrad = np.array([[1.0, 1.0], [1.0, 1.0]])
sgrad2 = np.array([[2.0, 2.0], [2.0, 2.0]])
sgrad3 = np.array([])
mulgrad = np.array([[5.0, 6.0], [5.0, 6.0]])
mulgrad2 = np.array([[4.0, 4.0], [6.0, 6.0]])
mulgrad3 = np.array([[7.7, 10.29], [7.7, 10.29]])
mulgrad4 = np.array([[8.0, 8.0], [13.0, 13.0]])
x_tensor = Tensor(np.array([[ 0.5606, -0.7506, -0.9142, -1.1527, 0.0224],
[-2.4439, 1.5957, -0.2165, -0.2130, 1.0369],
[ 0.3105, -0.7681, -0.7885, -0.4243, -0.3060],
[ 0.6828, -0.3238, -1.9912, 1.5819, -1.5010],
[ 0.2657, 1.1983, 0.3082, -0.6292, -1.2506]]))
y_tensor = Tensor(np.array([[-1.0242, -2.2796, -1.0042, -0.6544, -0.0104],
[-2.4508, 1.0339, 0.3305, 1.0350, -0.9931],
[ 1.1692, -0.8511, 0.9211, 2.3074, 0.3165],
[ 0.3364, -0.3250, 1.0391, 0.0773, -0.3774],
[ 0.7861, -1.4565, 0.2544, 1.1455, -0.1651]]))
i_tensor = Tensor(np.array([[ 0.7377, -0.0076, -0.6924, 0.7849, -0.3795],
[ 1.2860, 0.4247, 0.5646, -0.1582, 0.5034],
[ 1.1832, -0.7087, -2.2267, 0.0550, 0.7731],
[ 1.6035, 1.0802, 1.1341, 0.3498, 1.5319],
[-1.3249, 0.6984, 0.3353, 0.5496, -0.1019]]))
j_tensor = Tensor(np.array([[-0.7789, -1.9501, -0.0298, 0.2694, 0.9825],
[ 0.9122, 0.0995, 0.1650, 0.1503, 0.2796],
[-1.1105, -0.1824, 0.4612, -1.6591, -0.9712],
[-0.1923, -1.7089, -1.2546, 0.5906, 0.0530],
[-0.7918, -0.1133, -0.7857, -0.6806, -0.8384]]))
a_tensor = x_tensor@y_tensor + x_tensor + y_tensor
b_tensor = i_tensor + j_tensor + j_tensor
c_tensor = a_tensor@b_tensor + b_tensor@a_tensor
d_tensor = c_tensor + a_tensor + b_tensor
e_tensor = d_tensor@c_tensor@a_tensor@b_tensor
f_tensor = a_tensor + b_tensor + c_tensor@d_tensor
xgrad = np.array([[ -259.0974, 1300.5018, -1644.5557, -558.8008, -1131.4475],
[ 1383.8822, -224.8970, -1311.9617, -272.2783, -500.5319],
[ 360.7891, 177.9477, -728.4351, -139.5216, -434.6699],
[ 429.7754, -537.6080, 35.7628, 18.1170, 166.5874],
[ -253.5349, 26.9809, -8.5875, -69.4854, -36.2280]])
ygrad = np.array([[ -240.5982, 1250.8086, 429.9676, 241.3056, 1259.6229],
[ 131.6015, -927.0339, -718.6907, -337.8886, -1314.1227],
[ -5.1307, 110.0491, 542.3163, 180.9906, 631.1237],
[ 255.5849, -847.7955, 912.8635, -179.5526, 701.6783],
[ -80.7825, -93.6348, -510.9658, -2.0664, -710.0487]])
igrad = np.array([[-445.9666, 650.2254, -533.0087, -543.5874, -947.6356],
[-674.1143, 497.9639, -117.9936, -520.1741, 306.8812],
[-340.8250, 173.8932, -570.5732, -440.7168, -787.2073],
[ 253.8853, -175.9523, -463.7092, 47.2973, -713.4709],
[ 256.3283, 328.8215, -148.9896, 127.7384, -307.9315]])
jgrad = np.array([[ -891.9332, 1300.4508, -1066.0175, -1087.1748, -1895.2711],
[-1348.2286, 995.9277, -235.9871, -1040.3481, 613.7623],
[ -681.6499, 347.7864, -1141.1464, -881.4337, -1574.4147],
[ 507.7706, -351.9045, -927.4185, 94.5947, -1426.9418],
[ 512.6566, 657.6430, -297.9792, 255.4767, -615.8631]])
a_backward_xgrad = np.array([[-3.9728, -0.0445, 4.8631, 1.7504, 1.5644],
[-3.9728, -0.0445, 4.8631, 1.7504, 1.5644],
[-3.9728, -0.0445, 4.8631, 1.7504, 1.5644],
[-3.9728, -0.0445, 4.8631, 1.7504, 1.5644],
[-3.9728, -0.0445, 4.8631, 1.7504, 1.5644]])
a_backward_ygrad = np.array([[ 0.3757, 0.3757, 0.3757, 0.3757, 0.3757],
[ 1.9515, 1.9515, 1.9515, 1.9515, 1.9515],
[-2.6022, -2.6022, -2.6022, -2.6022, -2.6022],
[ 0.1627, 0.1627, 0.1627, 0.1627, 0.1627],
[-0.9983, -0.9983, -0.9983, -0.9983, -0.9983]])
# >>> x.grad
c_backward_xgrad = np.array([[ -3.9954, 22.5097, -25.5979, -8.5898, -17.6537],
[ 18.9924, 22.7672, -53.7373, -18.7182, -26.7058],
[ 9.2571, 22.6581, -41.8202, -14.4288, -22.8722],
[ -1.4504, 22.5382, -28.7132, -9.7111, -18.6559],
[-11.0475, 22.4307, -16.9654, -5.4827, -14.8767]])
# >>> y.grad
c_backward_ygrad = np.array([[ 12.0096, 15.1671, 10.0269, 13.2290, 10.6223],
[-15.9925, 0.4087, -26.2913, -9.6583, -23.1982],
[ 10.1968, -11.6732, 23.9296, 1.7505, 19.8051],
[ -0.6124, 0.7550, -1.4710, -0.0843, -1.2131],
[ -1.4597, -9.8498, 3.8088, -4.7000, 2.2264]])
# >>> i.grad
c_backward_igrad = np.array([[-23.1594, -0.7743, -11.9566, -20.7748, -11.6678],
[ -7.8664, 14.5186, 3.3364, -5.4818, 3.6251],
[-19.4113, 2.9737, -8.2085, -17.0267, -7.9198],
[-19.7937, 2.5914, -8.5909, -17.4091, -8.3021],
[-18.2555, 4.1296, -7.0527, -15.8708, -6.7639]])
# >>> j.grad
c_backward_jgrad = np.array([[-46.3188, -1.5486, -23.9132, -41.5496, -23.3357],
[-15.7329, 29.0373, 6.6727, -10.9637, 7.2502],
[-38.8227, 5.9475, -16.4171, -34.0535, -15.8396],
[-39.5874, 5.1828, -17.1818, -34.8181, -16.6043],
[-36.5109, 8.2593, -14.1053, -31.7417, -13.5278]])
def test_case():
try:
sans.backward()
np.testing.assert_array_almost_equal(a.grad, sgrad, decimal=2)
print("Test Case 1 for the function Add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 1 for the function Add Grad FAILED")
try:
np.testing.assert_array_almost_equal(b.grad, z, decimal=2)
print("Test Case 2 for the function Add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 2 for the function Add Grad FAILED")
a.zero_grad()
b.zero_grad()
try:
sans2.backward()
np.testing.assert_array_almost_equal(a.grad, sgrad2, decimal=2)
print("Test Case 3 for the function Add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 3 for the function Add Grad FAILED")
a.zero_grad()
b.zero_grad()
try:
mulans.backward()
np.testing.assert_array_almost_equal(a.grad, mulgrad, decimal=2)
print("Test Case 4 for the function Matmul Grad PASSED")
except Exception as e:
print(e)
print("Test Case 4 for the function Matmul Grad FAILED")
try:
np.testing.assert_array_almost_equal(b.grad, z, decimal=2)
print("Test Case 5 for the function Matmul Grad PASSED")
except Exception as e:
print(e)
print("Test Case 5 for the function Matmul Grad FAILED")
a.zero_grad()
b.zero_grad()
b.requires_grad = True
try:
mulans.backward()
np.testing.assert_array_almost_equal(b.grad, mulgrad2, decimal=2)
print("Test Case 6 for the function Matmul Grad PASSED")
except Exception as e:
print(e)
print("Test Case 6 for the function Matmul Grad FAILED")
a.zero_grad()
b.zero_grad()
c.zero_grad()
d.zero_grad()
try:
mulans2.backward()
np.testing.assert_array_almost_equal(a.grad, mulgrad3, decimal=2)
np.testing.assert_array_almost_equal(b.grad, mulgrad3, decimal=2)
print("Test Case 7 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 7 for the function Matmul and add Grad FAILED")
try:
np.testing.assert_array_almost_equal(c.grad, mulgrad4, decimal=2)
print("Test Case 8 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 8 for the function Matmul and add Grad FAILED")
a.zero_grad()
b.zero_grad()
c.zero_grad()
# d.zero_grad()
try:
d.backward()
np.testing.assert_array_almost_equal(a.grad, sgrad*4, decimal=2)
print("Test Case 9 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 9 for the function Matmul and add Grad FAILED")
try:
np.testing.assert_array_almost_equal(c.grad, sgrad*2, decimal=2)
print("Test Case 10 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 10 for the function Matmul and add Grad FAILED")
x_tensor.zero_grad()
y_tensor.zero_grad()
i_tensor.zero_grad()
j_tensor.zero_grad()
try:
a_tensor.backward()
np.testing.assert_array_almost_equal(x_tensor.grad, a_backward_xgrad, decimal=2)
print("Test Case 11 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 11 for the function Matmul and add Grad FAILED")
try:
np.testing.assert_array_almost_equal(y_tensor.grad, a_backward_ygrad, decimal=2)
print("Test Case 12 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 12 for the function Matmul and add Grad FAILED")
try:
b_tensor.backward()
np.testing.assert_array_almost_equal(i_tensor.grad, np.ones_like(i_tensor), decimal=2)
np.testing.assert_array_almost_equal(j_tensor.grad, np.ones_like(j_tensor)*2, decimal=2)
print("Test Case 13 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 13 for the function Matmul and add Grad FAILED")
x_tensor.zero_grad()
y_tensor.zero_grad()
i_tensor.zero_grad()
j_tensor.zero_grad()
a_tensor.zero_grad()
b_tensor.zero_grad()
c_tensor.zero_grad()
try:
c_tensor.backward()
np.testing.assert_array_almost_equal(x_tensor.grad, c_backward_xgrad, decimal=2)
print("Test Case 14 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 14 for the function Matmul and add Grad FAILED")
try:
np.testing.assert_array_almost_equal(y_tensor.grad, c_backward_ygrad, decimal=2)
print("Test Case 15 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 15 for the function Matmul and add Grad FAILED")
try:
np.testing.assert_array_almost_equal(i_tensor.grad, c_backward_igrad, decimal=2)
print("Test Case 16 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 16 for the function Matmul and add Grad FAILED")
try:
np.testing.assert_array_almost_equal(j_tensor.grad, c_backward_jgrad, decimal=2)
print("Test Case 17 for the function Matmul and add Grad PASSED")
except Exception as e:
print(e)
print("Test Case 17 for the function Matmul and add Grad FAILED")
x_tensor.zero_grad()
y_tensor.zero_grad()
i_tensor.zero_grad()
j_tensor.zero_grad()
a_tensor.zero_grad()
b_tensor.zero_grad()
c_tensor.zero_grad()
d_tensor.zero_grad()
e_tensor.zero_grad()
f_tensor.zero_grad()
# try:
# f_tensor.backward()
# np.testing.assert_array_almost_equal(x_tensor.grad, xgrad, decimal=2)
# print("Test Case 18 for the function Matmul and add Grad PASSED")
# except Exception as e:
# print(e)
# print("Test Case 18 for the function Matmul and add Grad FAILED")
# try:
# np.testing.assert_array_almost_equal(y_tensor.grad, ygrad, decimal=2)
# print("Test Case 19 for the function Matmul and add Grad PASSED")
# except Exception as e:
# print(e)
# print("Test Case 19 for the function Matmul and add Grad FAILED")
# try:
# np.testing.assert_array_almost_equal(i_tensor.grad, igrad, decimal=2)
# print("Test Case 20 for the function Matmul and add Grad PASSED")
# except Exception as e:
# print(e)
# print("Test Case 20 for the function Matmul and add Grad FAILED")
# try:
# np.testing.assert_array_almost_equal(j_tensor.grad, jgrad, decimal=2)
# print("Test Case 21 for the function Matmul and add Grad PASSED")
# except Exception as e:
# print(e)
# print("Test Case 21 for the function Matmul and add Grad FAILED")
if __name__ == "__main__":
test_case()
| [
"numpy.ones_like",
"numpy.testing.assert_array_almost_equal",
"importlib.import_module",
"argparse.ArgumentParser",
"numpy.array",
"sys.exit"
] | [((80, 105), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (103, 105), False, 'import argparse\n'), ((621, 655), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 0.0]]'], {}), '([[0.0, 0.0], [0.0, 0.0]])\n', (629, 655), True, 'import numpy as np\n'), ((750, 784), 'numpy.array', 'np.array', (['[[1.0, 1.0], [1.0, 1.0]]'], {}), '([[1.0, 1.0], [1.0, 1.0]])\n', (758, 784), True, 'import numpy as np\n'), ((795, 829), 'numpy.array', 'np.array', (['[[2.0, 2.0], [2.0, 2.0]]'], {}), '([[2.0, 2.0], [2.0, 2.0]])\n', (803, 829), True, 'import numpy as np\n'), ((840, 852), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (848, 852), True, 'import numpy as np\n'), ((864, 898), 'numpy.array', 'np.array', (['[[5.0, 6.0], [5.0, 6.0]]'], {}), '([[5.0, 6.0], [5.0, 6.0]])\n', (872, 898), True, 'import numpy as np\n'), ((911, 945), 'numpy.array', 'np.array', (['[[4.0, 4.0], [6.0, 6.0]]'], {}), '([[4.0, 4.0], [6.0, 6.0]])\n', (919, 945), True, 'import numpy as np\n'), ((958, 996), 'numpy.array', 'np.array', (['[[7.7, 10.29], [7.7, 10.29]]'], {}), '([[7.7, 10.29], [7.7, 10.29]])\n', (966, 996), True, 'import numpy as np\n'), ((1009, 1045), 'numpy.array', 'np.array', (['[[8.0, 8.0], [13.0, 13.0]]'], {}), '([[8.0, 8.0], [13.0, 13.0]])\n', (1017, 1045), True, 'import numpy as np\n'), ((2555, 2855), 'numpy.array', 'np.array', (['[[-259.0974, 1300.5018, -1644.5557, -558.8008, -1131.4475], [1383.8822, -\n 224.897, -1311.9617, -272.2783, -500.5319], [360.7891, 177.9477, -\n 728.4351, -139.5216, -434.6699], [429.7754, -537.608, 35.7628, 18.117, \n 166.5874], [-253.5349, 26.9809, -8.5875, -69.4854, -36.228]]'], {}), '([[-259.0974, 1300.5018, -1644.5557, -558.8008, -1131.4475], [\n 1383.8822, -224.897, -1311.9617, -272.2783, -500.5319], [360.7891, \n 177.9477, -728.4351, -139.5216, -434.6699], [429.7754, -537.608, \n 35.7628, 18.117, 166.5874], [-253.5349, 26.9809, -8.5875, -69.4854, -\n 36.228]])\n', (2563, 2855), True, 'import numpy as np\n'), ((2921, 3215), 'numpy.array', 'np.array', (['[[-240.5982, 1250.8086, 429.9676, 241.3056, 1259.6229], [131.6015, -\n 927.0339, -718.6907, -337.8886, -1314.1227], [-5.1307, 110.0491, \n 542.3163, 180.9906, 631.1237], [255.5849, -847.7955, 912.8635, -\n 179.5526, 701.6783], [-80.7825, -93.6348, -510.9658, -2.0664, -710.0487]]'], {}), '([[-240.5982, 1250.8086, 429.9676, 241.3056, 1259.6229], [131.6015,\n -927.0339, -718.6907, -337.8886, -1314.1227], [-5.1307, 110.0491, \n 542.3163, 180.9906, 631.1237], [255.5849, -847.7955, 912.8635, -\n 179.5526, 701.6783], [-80.7825, -93.6348, -510.9658, -2.0664, -710.0487]])\n', (2929, 3215), True, 'import numpy as np\n'), ((3287, 3590), 'numpy.array', 'np.array', (['[[-445.9666, 650.2254, -533.0087, -543.5874, -947.6356], [-674.1143, \n 497.9639, -117.9936, -520.1741, 306.8812], [-340.825, 173.8932, -\n 570.5732, -440.7168, -787.2073], [253.8853, -175.9523, -463.7092, \n 47.2973, -713.4709], [256.3283, 328.8215, -148.9896, 127.7384, -307.9315]]'], {}), '([[-445.9666, 650.2254, -533.0087, -543.5874, -947.6356], [-\n 674.1143, 497.9639, -117.9936, -520.1741, 306.8812], [-340.825, \n 173.8932, -570.5732, -440.7168, -787.2073], [253.8853, -175.9523, -\n 463.7092, 47.2973, -713.4709], [256.3283, 328.8215, -148.9896, 127.7384,\n -307.9315]])\n', (3295, 3590), True, 'import numpy as np\n'), ((3628, 3940), 'numpy.array', 'np.array', (['[[-891.9332, 1300.4508, -1066.0175, -1087.1748, -1895.2711], [-1348.2286, \n 995.9277, -235.9871, -1040.3481, 613.7623], [-681.6499, 347.7864, -\n 1141.1464, -881.4337, -1574.4147], [507.7706, -351.9045, -927.4185, \n 94.5947, -1426.9418], [512.6566, 657.643, -297.9792, 255.4767, -615.8631]]'], {}), '([[-891.9332, 1300.4508, -1066.0175, -1087.1748, -1895.2711], [-\n 1348.2286, 995.9277, -235.9871, -1040.3481, 613.7623], [-681.6499, \n 347.7864, -1141.1464, -881.4337, -1574.4147], [507.7706, -351.9045, -\n 927.4185, 94.5947, -1426.9418], [512.6566, 657.643, -297.9792, 255.4767,\n -615.8631]])\n', (3636, 3940), True, 'import numpy as np\n'), ((4007, 4252), 'numpy.array', 'np.array', (['[[-3.9728, -0.0445, 4.8631, 1.7504, 1.5644], [-3.9728, -0.0445, 4.8631, \n 1.7504, 1.5644], [-3.9728, -0.0445, 4.8631, 1.7504, 1.5644], [-3.9728, \n -0.0445, 4.8631, 1.7504, 1.5644], [-3.9728, -0.0445, 4.8631, 1.7504, \n 1.5644]]'], {}), '([[-3.9728, -0.0445, 4.8631, 1.7504, 1.5644], [-3.9728, -0.0445, \n 4.8631, 1.7504, 1.5644], [-3.9728, -0.0445, 4.8631, 1.7504, 1.5644], [-\n 3.9728, -0.0445, 4.8631, 1.7504, 1.5644], [-3.9728, -0.0445, 4.8631, \n 1.7504, 1.5644]])\n', (4015, 4252), True, 'import numpy as np\n'), ((4311, 4554), 'numpy.array', 'np.array', (['[[0.3757, 0.3757, 0.3757, 0.3757, 0.3757], [1.9515, 1.9515, 1.9515, 1.9515,\n 1.9515], [-2.6022, -2.6022, -2.6022, -2.6022, -2.6022], [0.1627, 0.1627,\n 0.1627, 0.1627, 0.1627], [-0.9983, -0.9983, -0.9983, -0.9983, -0.9983]]'], {}), '([[0.3757, 0.3757, 0.3757, 0.3757, 0.3757], [1.9515, 1.9515, 1.9515,\n 1.9515, 1.9515], [-2.6022, -2.6022, -2.6022, -2.6022, -2.6022], [0.1627,\n 0.1627, 0.1627, 0.1627, 0.1627], [-0.9983, -0.9983, -0.9983, -0.9983, -\n 0.9983]])\n', (4319, 4554), True, 'import numpy as np\n'), ((4629, 4901), 'numpy.array', 'np.array', (['[[-3.9954, 22.5097, -25.5979, -8.5898, -17.6537], [18.9924, 22.7672, -\n 53.7373, -18.7182, -26.7058], [9.2571, 22.6581, -41.8202, -14.4288, -\n 22.8722], [-1.4504, 22.5382, -28.7132, -9.7111, -18.6559], [-11.0475, \n 22.4307, -16.9654, -5.4827, -14.8767]]'], {}), '([[-3.9954, 22.5097, -25.5979, -8.5898, -17.6537], [18.9924, \n 22.7672, -53.7373, -18.7182, -26.7058], [9.2571, 22.6581, -41.8202, -\n 14.4288, -22.8722], [-1.4504, 22.5382, -28.7132, -9.7111, -18.6559], [-\n 11.0475, 22.4307, -16.9654, -5.4827, -14.8767]])\n', (4637, 4901), True, 'import numpy as np\n'), ((4970, 5222), 'numpy.array', 'np.array', (['[[12.0096, 15.1671, 10.0269, 13.229, 10.6223], [-15.9925, 0.4087, -26.2913,\n -9.6583, -23.1982], [10.1968, -11.6732, 23.9296, 1.7505, 19.8051], [-\n 0.6124, 0.755, -1.471, -0.0843, -1.2131], [-1.4597, -9.8498, 3.8088, -\n 4.7, 2.2264]]'], {}), '([[12.0096, 15.1671, 10.0269, 13.229, 10.6223], [-15.9925, 0.4087, \n -26.2913, -9.6583, -23.1982], [10.1968, -11.6732, 23.9296, 1.7505, \n 19.8051], [-0.6124, 0.755, -1.471, -0.0843, -1.2131], [-1.4597, -9.8498,\n 3.8088, -4.7, 2.2264]])\n', (4978, 5222), True, 'import numpy as np\n'), ((5311, 5575), 'numpy.array', 'np.array', (['[[-23.1594, -0.7743, -11.9566, -20.7748, -11.6678], [-7.8664, 14.5186, \n 3.3364, -5.4818, 3.6251], [-19.4113, 2.9737, -8.2085, -17.0267, -7.9198\n ], [-19.7937, 2.5914, -8.5909, -17.4091, -8.3021], [-18.2555, 4.1296, -\n 7.0527, -15.8708, -6.7639]]'], {}), '([[-23.1594, -0.7743, -11.9566, -20.7748, -11.6678], [-7.8664, \n 14.5186, 3.3364, -5.4818, 3.6251], [-19.4113, 2.9737, -8.2085, -17.0267,\n -7.9198], [-19.7937, 2.5914, -8.5909, -17.4091, -8.3021], [-18.2555, \n 4.1296, -7.0527, -15.8708, -6.7639]])\n', (5319, 5575), True, 'import numpy as np\n'), ((5652, 5925), 'numpy.array', 'np.array', (['[[-46.3188, -1.5486, -23.9132, -41.5496, -23.3357], [-15.7329, 29.0373, \n 6.6727, -10.9637, 7.2502], [-38.8227, 5.9475, -16.4171, -34.0535, -\n 15.8396], [-39.5874, 5.1828, -17.1818, -34.8181, -16.6043], [-36.5109, \n 8.2593, -14.1053, -31.7417, -13.5278]]'], {}), '([[-46.3188, -1.5486, -23.9132, -41.5496, -23.3357], [-15.7329, \n 29.0373, 6.6727, -10.9637, 7.2502], [-38.8227, 5.9475, -16.4171, -\n 34.0535, -15.8396], [-39.5874, 5.1828, -17.1818, -34.8181, -16.6043], [\n -36.5109, 8.2593, -14.1053, -31.7417, -13.5278]])\n', (5660, 5925), True, 'import numpy as np\n'), ((227, 259), 'importlib.import_module', 'importlib.import_module', (['subname'], {}), '(subname)\n', (250, 259), False, 'import importlib\n'), ((463, 497), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 4.0]]'], {}), '([[1.0, 2.0], [3.0, 4.0]])\n', (471, 497), True, 'import numpy as np\n'), ((511, 545), 'numpy.array', 'np.array', (['[[3.0, 2.0], [1.0, 5.0]]'], {}), '([[3.0, 2.0], [1.0, 5.0]])\n', (519, 545), True, 'import numpy as np\n'), ((580, 614), 'numpy.array', 'np.array', (['[[3.2, 4.5], [6.1, 4.2]]'], {}), '([[3.2, 4.5], [6.1, 4.2]])\n', (588, 614), True, 'import numpy as np\n'), ((1067, 1314), 'numpy.array', 'np.array', (['[[0.5606, -0.7506, -0.9142, -1.1527, 0.0224], [-2.4439, 1.5957, -0.2165, -\n 0.213, 1.0369], [0.3105, -0.7681, -0.7885, -0.4243, -0.306], [0.6828, -\n 0.3238, -1.9912, 1.5819, -1.501], [0.2657, 1.1983, 0.3082, -0.6292, -\n 1.2506]]'], {}), '([[0.5606, -0.7506, -0.9142, -1.1527, 0.0224], [-2.4439, 1.5957, -\n 0.2165, -0.213, 1.0369], [0.3105, -0.7681, -0.7885, -0.4243, -0.306], [\n 0.6828, -0.3238, -1.9912, 1.5819, -1.501], [0.2657, 1.1983, 0.3082, -\n 0.6292, -1.2506]])\n', (1075, 1314), True, 'import numpy as np\n'), ((1369, 1614), 'numpy.array', 'np.array', (['[[-1.0242, -2.2796, -1.0042, -0.6544, -0.0104], [-2.4508, 1.0339, 0.3305, \n 1.035, -0.9931], [1.1692, -0.8511, 0.9211, 2.3074, 0.3165], [0.3364, -\n 0.325, 1.0391, 0.0773, -0.3774], [0.7861, -1.4565, 0.2544, 1.1455, -0.1651]\n ]'], {}), '([[-1.0242, -2.2796, -1.0042, -0.6544, -0.0104], [-2.4508, 1.0339, \n 0.3305, 1.035, -0.9931], [1.1692, -0.8511, 0.9211, 2.3074, 0.3165], [\n 0.3364, -0.325, 1.0391, 0.0773, -0.3774], [0.7861, -1.4565, 0.2544, \n 1.1455, -0.1651]])\n', (1377, 1614), True, 'import numpy as np\n'), ((1671, 1912), 'numpy.array', 'np.array', (['[[0.7377, -0.0076, -0.6924, 0.7849, -0.3795], [1.286, 0.4247, 0.5646, -\n 0.1582, 0.5034], [1.1832, -0.7087, -2.2267, 0.055, 0.7731], [1.6035, \n 1.0802, 1.1341, 0.3498, 1.5319], [-1.3249, 0.6984, 0.3353, 0.5496, -0.1019]\n ]'], {}), '([[0.7377, -0.0076, -0.6924, 0.7849, -0.3795], [1.286, 0.4247, \n 0.5646, -0.1582, 0.5034], [1.1832, -0.7087, -2.2267, 0.055, 0.7731], [\n 1.6035, 1.0802, 1.1341, 0.3498, 1.5319], [-1.3249, 0.6984, 0.3353, \n 0.5496, -0.1019]])\n', (1679, 1912), True, 'import numpy as np\n'), ((1973, 2221), 'numpy.array', 'np.array', (['[[-0.7789, -1.9501, -0.0298, 0.2694, 0.9825], [0.9122, 0.0995, 0.165, \n 0.1503, 0.2796], [-1.1105, -0.1824, 0.4612, -1.6591, -0.9712], [-0.1923,\n -1.7089, -1.2546, 0.5906, 0.053], [-0.7918, -0.1133, -0.7857, -0.6806, \n -0.8384]]'], {}), '([[-0.7789, -1.9501, -0.0298, 0.2694, 0.9825], [0.9122, 0.0995, \n 0.165, 0.1503, 0.2796], [-1.1105, -0.1824, 0.4612, -1.6591, -0.9712], [\n -0.1923, -1.7089, -1.2546, 0.5906, 0.053], [-0.7918, -0.1133, -0.7857, \n -0.6806, -0.8384]])\n', (1981, 2221), True, 'import numpy as np\n'), ((408, 418), 'sys.exit', 'sys.exit', ([], {}), '()\n', (416, 418), False, 'import sys\n'), ((6027, 6089), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['a.grad', 'sgrad'], {'decimal': '(2)'}), '(a.grad, sgrad, decimal=2)\n', (6063, 6089), True, 'import numpy as np\n'), ((6283, 6341), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['b.grad', 'z'], {'decimal': '(2)'}), '(b.grad, z, decimal=2)\n', (6319, 6341), True, 'import numpy as np\n'), ((6601, 6664), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['a.grad', 'sgrad2'], {'decimal': '(2)'}), '(a.grad, sgrad2, decimal=2)\n', (6637, 6664), True, 'import numpy as np\n'), ((6925, 6989), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['a.grad', 'mulgrad'], {'decimal': '(2)'}), '(a.grad, mulgrad, decimal=2)\n', (6961, 6989), True, 'import numpy as np\n'), ((7189, 7247), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['b.grad', 'z'], {'decimal': '(2)'}), '(b.grad, z, decimal=2)\n', (7225, 7247), True, 'import numpy as np\n'), ((7542, 7607), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['b.grad', 'mulgrad2'], {'decimal': '(2)'}), '(b.grad, mulgrad2, decimal=2)\n', (7578, 7607), True, 'import numpy as np\n'), ((7913, 7978), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['a.grad', 'mulgrad3'], {'decimal': '(2)'}), '(a.grad, mulgrad3, decimal=2)\n', (7949, 7978), True, 'import numpy as np\n'), ((7988, 8053), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['b.grad', 'mulgrad3'], {'decimal': '(2)'}), '(b.grad, mulgrad3, decimal=2)\n', (8024, 8053), True, 'import numpy as np\n'), ((8269, 8334), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['c.grad', 'mulgrad4'], {'decimal': '(2)'}), '(c.grad, mulgrad4, decimal=2)\n', (8305, 8334), True, 'import numpy as np\n'), ((8652, 8718), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['a.grad', '(sgrad * 4)'], {'decimal': '(2)'}), '(a.grad, sgrad * 4, decimal=2)\n', (8688, 8718), True, 'import numpy as np\n'), ((8932, 8998), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['c.grad', '(sgrad * 2)'], {'decimal': '(2)'}), '(c.grad, sgrad * 2, decimal=2)\n', (8968, 8998), True, 'import numpy as np\n'), ((9349, 9434), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x_tensor.grad', 'a_backward_xgrad'], {'decimal': '(2)'}), '(x_tensor.grad, a_backward_xgrad, decimal=2\n )\n', (9385, 9434), True, 'import numpy as np\n'), ((9647, 9732), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y_tensor.grad', 'a_backward_ygrad'], {'decimal': '(2)'}), '(y_tensor.grad, a_backward_ygrad, decimal=2\n )\n', (9683, 9732), True, 'import numpy as np\n'), ((10589, 10674), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x_tensor.grad', 'c_backward_xgrad'], {'decimal': '(2)'}), '(x_tensor.grad, c_backward_xgrad, decimal=2\n )\n', (10625, 10674), True, 'import numpy as np\n'), ((10887, 10972), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y_tensor.grad', 'c_backward_ygrad'], {'decimal': '(2)'}), '(y_tensor.grad, c_backward_ygrad, decimal=2\n )\n', (10923, 10972), True, 'import numpy as np\n'), ((11185, 11270), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['i_tensor.grad', 'c_backward_igrad'], {'decimal': '(2)'}), '(i_tensor.grad, c_backward_igrad, decimal=2\n )\n', (11221, 11270), True, 'import numpy as np\n'), ((11483, 11568), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['j_tensor.grad', 'c_backward_jgrad'], {'decimal': '(2)'}), '(j_tensor.grad, c_backward_jgrad, decimal=2\n )\n', (11519, 11568), True, 'import numpy as np\n'), ((10026, 10048), 'numpy.ones_like', 'np.ones_like', (['i_tensor'], {}), '(i_tensor)\n', (10038, 10048), True, 'import numpy as np\n'), ((10122, 10144), 'numpy.ones_like', 'np.ones_like', (['j_tensor'], {}), '(j_tensor)\n', (10134, 10144), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 12 18:42:40 2017
@author: gianni
"""
from pythonradex import helpers
import numpy as np
from scipy import constants
def test_relative_difference_scalar():
for x in (0,1):
assert helpers.relative_difference(x,x) == 0
def test_relative_difference_arrays():
x = np.array((0,4,2,10,0,2, -1,-1))
y = np.array((0,0,4,10,2,2.1,1,-1))
relative_difference = helpers.relative_difference(x,y)
expected_relative_difference = np.array((0,1,1,0,1,0.05,2,0))
assert np.allclose(relative_difference,expected_relative_difference,atol=0,
rtol=1e-10)
def test_zero_background():
assert helpers.zero_background(10) == 0
assert np.all(helpers.zero_background(np.random.rand(10)) == 0)
def test_CMB_background():
test_nu = np.logspace(np.log10(1),np.log10(1000),20)*constants.giga
test_z = (0,2)
for z in test_z:
CMB = helpers.generate_CMB_background(z=z)
assert np.all(CMB(test_nu) == helpers.B_nu(nu=test_nu,T=2.73*(1+z))) | [
"pythonradex.helpers.relative_difference",
"numpy.allclose",
"numpy.log10",
"numpy.random.rand",
"pythonradex.helpers.B_nu",
"numpy.array",
"pythonradex.helpers.generate_CMB_background",
"pythonradex.helpers.zero_background"
] | [((326, 363), 'numpy.array', 'np.array', (['(0, 4, 2, 10, 0, 2, -1, -1)'], {}), '((0, 4, 2, 10, 0, 2, -1, -1))\n', (334, 363), True, 'import numpy as np\n'), ((366, 404), 'numpy.array', 'np.array', (['(0, 0, 4, 10, 2, 2.1, 1, -1)'], {}), '((0, 0, 4, 10, 2, 2.1, 1, -1))\n', (374, 404), True, 'import numpy as np\n'), ((424, 457), 'pythonradex.helpers.relative_difference', 'helpers.relative_difference', (['x', 'y'], {}), '(x, y)\n', (451, 457), False, 'from pythonradex import helpers\n'), ((492, 529), 'numpy.array', 'np.array', (['(0, 1, 1, 0, 1, 0.05, 2, 0)'], {}), '((0, 1, 1, 0, 1, 0.05, 2, 0))\n', (500, 529), True, 'import numpy as np\n'), ((534, 621), 'numpy.allclose', 'np.allclose', (['relative_difference', 'expected_relative_difference'], {'atol': '(0)', 'rtol': '(1e-10)'}), '(relative_difference, expected_relative_difference, atol=0, rtol\n =1e-10)\n', (545, 621), True, 'import numpy as np\n'), ((678, 705), 'pythonradex.helpers.zero_background', 'helpers.zero_background', (['(10)'], {}), '(10)\n', (701, 705), False, 'from pythonradex import helpers\n'), ((933, 969), 'pythonradex.helpers.generate_CMB_background', 'helpers.generate_CMB_background', ([], {'z': 'z'}), '(z=z)\n', (964, 969), False, 'from pythonradex import helpers\n'), ((240, 273), 'pythonradex.helpers.relative_difference', 'helpers.relative_difference', (['x', 'x'], {}), '(x, x)\n', (267, 273), False, 'from pythonradex import helpers\n'), ((833, 844), 'numpy.log10', 'np.log10', (['(1)'], {}), '(1)\n', (841, 844), True, 'import numpy as np\n'), ((845, 859), 'numpy.log10', 'np.log10', (['(1000)'], {}), '(1000)\n', (853, 859), True, 'import numpy as np\n'), ((753, 771), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (767, 771), True, 'import numpy as np\n'), ((1008, 1050), 'pythonradex.helpers.B_nu', 'helpers.B_nu', ([], {'nu': 'test_nu', 'T': '(2.73 * (1 + z))'}), '(nu=test_nu, T=2.73 * (1 + z))\n', (1020, 1050), False, 'from pythonradex import helpers\n')] |
import os
import time
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import ray
from ray.util.sgd import TorchTrainer
from ray.util.sgd.utils import AverageMeterCollection
from ray.util.sgd.torch import TrainingOperator
import dgl
from dgl.data import RedditDataset
from dgl.nn.pytorch import GATConv
from torch.utils.data import DataLoader
from dgl.dataloading import NodeCollator
print("Current Path: " + os.getcwd())
torch.manual_seed(42)
# define the model class
class GAT(nn.Module):
def __init__(self, in_feats, n_hidden, n_classes, n_layers, n_heads,
activation, feat_drop, attn_drop, negative_slope, residual):
super().__init__()
self.n_layers = n_layers
self.activation = activation
self.n_hidden = n_hidden
self.n_heads = n_heads
self.n_classes = n_classes
self.convs = nn.ModuleList()
# input layer
self.convs.append(
GATConv((in_feats, in_feats), n_hidden, n_heads, feat_drop,
attn_drop, negative_slope, residual, self.activation))
# hidden layer
for _ in range(1, n_layers - 1):
# due to multi-head, the in_dim = num_hidden * num_heads
self.convs.append(
GATConv((n_hidden * n_heads, n_hidden * n_heads), n_hidden,
n_heads, feat_drop, attn_drop, negative_slope,
residual, self.activation))
# output layer
self.convs.append(
GATConv((n_hidden * n_heads, n_hidden * n_heads), n_classes,
n_heads, feat_drop, attn_drop, negative_slope, residual,
None))
def forward(self, blocks, x):
h = x
for i, (layer, block) in enumerate(zip(self.convs, blocks)):
h_dst = h[:block.number_of_dst_nodes()]
if i != len(self.convs) - 1:
h = layer(block, (h, h_dst)).flatten(1)
h = F.dropout(h, p=0.5, training=self.training)
else:
h = layer(block, (h, h_dst))
h = h.mean(1)
return h.log_softmax(dim=-1)
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred)
class CustomTrainingOperator(TrainingOperator):
def setup(self, config):
# load reddit data
data = RedditDataset()
g = data[0]
g.ndata["features"] = g.ndata["feat"]
g.ndata["labels"] = g.ndata["label"]
self.in_feats = g.ndata["features"].shape[1]
self.n_classes = data.num_classes
# add self loop,
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
# Create csr/coo/csc formats before launching training processes
g.create_formats_()
self.g = g
train_nid = torch.nonzero(g.ndata["train_mask"], as_tuple=True)[0]
val_nid = torch.nonzero(g.ndata["val_mask"], as_tuple=True)[0]
test_nid = torch.nonzero(g.ndata["test_mask"], as_tuple=True)[0]
self.train_nid = train_nid
self.val_nid = val_nid
self.test_nid = test_nid
# Create sampler
sampler = dgl.dataloading.MultiLayerNeighborSampler(
[int(fanout) for fanout in config["fan_out"].split(",")])
# Create PyTorch DataLoader for constructing blocks
collator = NodeCollator(g, train_nid, sampler)
train_dataloader = DataLoader(
collator.dataset,
collate_fn=collator.collate,
batch_size=config["batch_size"],
shuffle=False,
drop_last=False,
num_workers=config["sampling_num_workers"])
# Define model and optimizer, residual is set to True
model = GAT(self.in_feats, config["n_hidden"], self.n_classes,
config["n_layers"], config["n_heads"], F.elu,
config["feat_drop"], config["attn_drop"],
config["negative_slope"], True)
self.convs = model.convs
# Define optimizer.
optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])
# Register model, optimizer, and loss.
self.model, self.optimizer = self.register(
models=model, optimizers=optimizer)
# Register data loaders.
self.register_data(train_loader=train_dataloader)
def train_epoch(self, iterator, info):
meter_collection = AverageMeterCollection()
iter_tput = []
model = self.model
# for batch_idx,batch in enumerate(iterator):
for step, (input_nodes, seeds, blocks) in enumerate(iterator):
tic_step = time.time()
# do some train
optimizer = self.optimizer
device = 0
if self.use_gpu:
blocks = [block.int().to(device) for block in blocks]
batch_inputs = blocks[0].srcdata["features"]
batch_labels = blocks[-1].dstdata["labels"]
batch_pred = model(blocks, batch_inputs)
loss = F.nll_loss(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append(len(seeds) / (time.time() - tic_step))
if step % 20 == 0:
acc = compute_acc(batch_pred, batch_labels)
gpu_mem_alloc = torch.cuda.max_memory_allocated(
) / 1000000 if torch.cuda.is_available() else 0
print("Epoch {:05d} | Step {:05d} | Loss {:.4f} | "
"Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU "
"{:.1f} MB".format(info["epoch_idx"] + 1, step,
loss.item(), acc.item(),
np.mean(iter_tput[3:]),
gpu_mem_alloc))
status = meter_collection.summary()
return status
def validate(self, validation_loader, info):
meter_collection = AverageMeterCollection()
model = self.model
n_layers = self.config["n_layers"]
n_hidden = self.config["n_hidden"]
n_heads = self.config["n_heads"]
batch_size = self.config["batch_size"]
num_workers = self.config["sampling_num_workers"]
g = self.g
train_nid = self.train_nid
val_nid = self.val_nid
test_nid = self.test_nid
device = 0
model.eval()
with torch.no_grad():
x = g.ndata["features"]
for i, layer in enumerate(self.convs):
if i < n_layers - 1:
y = torch.zeros(
g.number_of_nodes(), n_hidden * n_heads
if i != len(self.convs) - 1 else self.n_classes)
else:
y = torch.zeros(
g.number_of_nodes(), n_hidden
if i != len(self.convs) - 1 else self.n_classes)
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
collator = NodeCollator(g, torch.arange(g.number_of_nodes()),
sampler)
dataloader = DataLoader(
collator.dataset,
collate_fn=collator.collate,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers)
for input_nodes, output_nodes, blocks in dataloader:
block = blocks[0]
# print("block:",block)
block = block.int().to(device)
h = x[input_nodes].to(device)
h_dst = x[output_nodes].to(device)
if i != len(self.convs) - 1:
h = layer(block, (h, h_dst)).flatten(1)
else:
h = layer(block, (h, h_dst)).mean(1)
h = h.log_softmax(dim=-1)
y[output_nodes] = h.cpu()
x = y
pred = y
labels = g.ndata["labels"]
_, val_acc, test_acc = compute_acc(pred[train_nid], labels[
train_nid]), compute_acc(pred[val_nid], labels[val_nid]), \
compute_acc(pred[test_nid], labels[test_nid])
metrics = {
"num_samples": pred.size(0),
"val_acc": val_acc.item(),
"test_acc": test_acc.item()
}
meter_collection.update(metrics, n=metrics.pop("num_samples", 1))
status = meter_collection.summary()
return status
def run(num_workers, use_gpu, num_epochs, lr, batch_size, n_hidden, n_layers,
n_heads, fan_out, feat_drop, attn_drop, negative_slope,
sampling_num_workers):
trainer = TorchTrainer(
training_operator_cls=CustomTrainingOperator,
num_workers=num_workers,
use_gpu=use_gpu,
backend="nccl",
config={
"lr": lr,
"batch_size": batch_size,
"n_hidden": n_hidden,
"n_layers": n_layers,
"n_heads": n_heads,
"fan_out": fan_out,
"feat_drop": feat_drop,
"attn_drop": attn_drop,
"negative_slope": negative_slope,
"sampling_num_workers": sampling_num_workers
})
for i in range(num_epochs):
trainer.train()
validation_results = trainer.validate()
trainer.shutdown()
print(validation_results)
print("success!")
# Use ray.init(address="auto") if running on a Ray cluster.
if __name__ == "__main__":
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument("--num-workers", type=int, default=2)
argparser.add_argument("--use-gpu", type=bool, default=True)
argparser.add_argument("--num-epochs", type=int, default=2)
argparser.add_argument("--lr", type=float, default=0.001)
argparser.add_argument("--batch-size", type=int, default=1024)
argparser.add_argument("--n-hidden", type=int, default=128)
argparser.add_argument("--n-layers", type=int, default=2)
argparser.add_argument("--n-heads", type=int, default=4)
argparser.add_argument("--fan-out", type=str, default="10,25")
argparser.add_argument("--feat-drop", type=float, default=0.)
argparser.add_argument("--attn-drop", type=float, default=0.)
argparser.add_argument("--negative-slope", type=float, default=0.2)
argparser.add_argument(
"--sampling-num-workers",
type=int,
default=0,
help="Number of sampling processes. Use 0 for no extra process.")
argparser.add_argument(
"--address",
required=False,
type=str,
help="The address to use for ray")
args = argparser.parse_args()
ray.init(address=args.address)
run(num_workers=args.num_workers,
use_gpu=args.use_gpu,
num_epochs=args.num_epochs,
lr=args.lr,
batch_size=args.batch_size,
n_hidden=args.n_hidden,
n_layers=args.n_layers,
n_heads=args.n_heads,
fan_out=args.fan_out,
feat_drop=args.feat_drop,
attn_drop=args.attn_drop,
negative_slope=args.negative_slope,
sampling_num_workers=args.sampling_num_workers)
| [
"dgl.add_self_loop",
"torch.cuda.is_available",
"dgl.data.RedditDataset",
"ray.init",
"numpy.mean",
"argparse.ArgumentParser",
"torch.nn.functional.nll_loss",
"torch.nn.ModuleList",
"dgl.dataloading.MultiLayerFullNeighborSampler",
"torch.argmax",
"dgl.dataloading.NodeCollator",
"dgl.remove_sel... | [((479, 500), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (496, 500), False, 'import torch\n'), ((8903, 9297), 'ray.util.sgd.TorchTrainer', 'TorchTrainer', ([], {'training_operator_cls': 'CustomTrainingOperator', 'num_workers': 'num_workers', 'use_gpu': 'use_gpu', 'backend': '"""nccl"""', 'config': "{'lr': lr, 'batch_size': batch_size, 'n_hidden': n_hidden, 'n_layers':\n n_layers, 'n_heads': n_heads, 'fan_out': fan_out, 'feat_drop':\n feat_drop, 'attn_drop': attn_drop, 'negative_slope': negative_slope,\n 'sampling_num_workers': sampling_num_workers}"}), "(training_operator_cls=CustomTrainingOperator, num_workers=\n num_workers, use_gpu=use_gpu, backend='nccl', config={'lr': lr,\n 'batch_size': batch_size, 'n_hidden': n_hidden, 'n_layers': n_layers,\n 'n_heads': n_heads, 'fan_out': fan_out, 'feat_drop': feat_drop,\n 'attn_drop': attn_drop, 'negative_slope': negative_slope,\n 'sampling_num_workers': sampling_num_workers})\n", (8915, 9297), False, 'from ray.util.sgd import TorchTrainer\n'), ((9729, 9774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""multi-gpu training"""'], {}), "('multi-gpu training')\n", (9752, 9774), False, 'import argparse\n'), ((10902, 10932), 'ray.init', 'ray.init', ([], {'address': 'args.address'}), '(address=args.address)\n', (10910, 10932), False, 'import ray\n'), ((466, 477), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (475, 477), False, 'import os\n'), ((919, 934), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (932, 934), True, 'import torch.nn as nn\n'), ((2477, 2492), 'dgl.data.RedditDataset', 'RedditDataset', ([], {}), '()\n', (2490, 2492), False, 'from dgl.data import RedditDataset\n'), ((2736, 2759), 'dgl.remove_self_loop', 'dgl.remove_self_loop', (['g'], {}), '(g)\n', (2756, 2759), False, 'import dgl\n'), ((2772, 2792), 'dgl.add_self_loop', 'dgl.add_self_loop', (['g'], {}), '(g)\n', (2789, 2792), False, 'import dgl\n'), ((3467, 3502), 'dgl.dataloading.NodeCollator', 'NodeCollator', (['g', 'train_nid', 'sampler'], {}), '(g, train_nid, sampler)\n', (3479, 3502), False, 'from dgl.dataloading import NodeCollator\n'), ((3530, 3706), 'torch.utils.data.DataLoader', 'DataLoader', (['collator.dataset'], {'collate_fn': 'collator.collate', 'batch_size': "config['batch_size']", 'shuffle': '(False)', 'drop_last': '(False)', 'num_workers': "config['sampling_num_workers']"}), "(collator.dataset, collate_fn=collator.collate, batch_size=config\n ['batch_size'], shuffle=False, drop_last=False, num_workers=config[\n 'sampling_num_workers'])\n", (3540, 3706), False, 'from torch.utils.data import DataLoader\n'), ((4527, 4551), 'ray.util.sgd.utils.AverageMeterCollection', 'AverageMeterCollection', ([], {}), '()\n', (4549, 4551), False, 'from ray.util.sgd.utils import AverageMeterCollection\n'), ((6098, 6122), 'ray.util.sgd.utils.AverageMeterCollection', 'AverageMeterCollection', ([], {}), '()\n', (6120, 6122), False, 'from ray.util.sgd.utils import AverageMeterCollection\n'), ((997, 1114), 'dgl.nn.pytorch.GATConv', 'GATConv', (['(in_feats, in_feats)', 'n_hidden', 'n_heads', 'feat_drop', 'attn_drop', 'negative_slope', 'residual', 'self.activation'], {}), '((in_feats, in_feats), n_hidden, n_heads, feat_drop, attn_drop,\n negative_slope, residual, self.activation)\n', (1004, 1114), False, 'from dgl.nn.pytorch import GATConv\n'), ((1557, 1684), 'dgl.nn.pytorch.GATConv', 'GATConv', (['(n_hidden * n_heads, n_hidden * n_heads)', 'n_classes', 'n_heads', 'feat_drop', 'attn_drop', 'negative_slope', 'residual', 'None'], {}), '((n_hidden * n_heads, n_hidden * n_heads), n_classes, n_heads,\n feat_drop, attn_drop, negative_slope, residual, None)\n', (1564, 1684), False, 'from dgl.nn.pytorch import GATConv\n'), ((2933, 2984), 'torch.nonzero', 'torch.nonzero', (["g.ndata['train_mask']"], {'as_tuple': '(True)'}), "(g.ndata['train_mask'], as_tuple=True)\n", (2946, 2984), False, 'import torch\n'), ((3006, 3055), 'torch.nonzero', 'torch.nonzero', (["g.ndata['val_mask']"], {'as_tuple': '(True)'}), "(g.ndata['val_mask'], as_tuple=True)\n", (3019, 3055), False, 'import torch\n'), ((3078, 3128), 'torch.nonzero', 'torch.nonzero', (["g.ndata['test_mask']"], {'as_tuple': '(True)'}), "(g.ndata['test_mask'], as_tuple=True)\n", (3091, 3128), False, 'import torch\n'), ((4750, 4761), 'time.time', 'time.time', ([], {}), '()\n', (4759, 4761), False, 'import time\n'), ((5136, 5172), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['batch_pred', 'batch_labels'], {}), '(batch_pred, batch_labels)\n', (5146, 5172), True, 'import torch.nn.functional as F\n'), ((6553, 6568), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6566, 6568), False, 'import torch\n'), ((1312, 1449), 'dgl.nn.pytorch.GATConv', 'GATConv', (['(n_hidden * n_heads, n_hidden * n_heads)', 'n_hidden', 'n_heads', 'feat_drop', 'attn_drop', 'negative_slope', 'residual', 'self.activation'], {}), '((n_hidden * n_heads, n_hidden * n_heads), n_hidden, n_heads,\n feat_drop, attn_drop, negative_slope, residual, self.activation)\n', (1319, 1449), False, 'from dgl.nn.pytorch import GATConv\n'), ((2009, 2052), 'torch.nn.functional.dropout', 'F.dropout', (['h'], {'p': '(0.5)', 'training': 'self.training'}), '(h, p=0.5, training=self.training)\n', (2018, 2052), True, 'import torch.nn.functional as F\n'), ((7080, 7128), 'dgl.dataloading.MultiLayerFullNeighborSampler', 'dgl.dataloading.MultiLayerFullNeighborSampler', (['(1)'], {}), '(1)\n', (7125, 7128), False, 'import dgl\n'), ((7285, 7427), 'torch.utils.data.DataLoader', 'DataLoader', (['collator.dataset'], {'collate_fn': 'collator.collate', 'batch_size': 'batch_size', 'shuffle': '(False)', 'drop_last': '(False)', 'num_workers': 'num_workers'}), '(collator.dataset, collate_fn=collator.collate, batch_size=\n batch_size, shuffle=False, drop_last=False, num_workers=num_workers)\n', (7295, 7427), False, 'from torch.utils.data import DataLoader\n'), ((5519, 5544), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5542, 5544), False, 'import torch\n'), ((5307, 5318), 'time.time', 'time.time', ([], {}), '()\n', (5316, 5318), False, 'import time\n'), ((5455, 5488), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (5486, 5488), False, 'import torch\n'), ((5874, 5896), 'numpy.mean', 'np.mean', (['iter_tput[3:]'], {}), '(iter_tput[3:])\n', (5881, 5896), True, 'import numpy as np\n'), ((2293, 2318), 'torch.argmax', 'torch.argmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (2305, 2318), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 15:06:02 2019
@author: Titus
"""
import pandas as pd
import numpy as np
pi=np.pi
df=pd.read_csv(r"C:\Users\tq220\Documents\Tits things\2018-2019\Data Science\Final-project-data\Data\well_survey_from_earth_model.csv")
dips=pi/2-df[df['Well ID']=='58-32']['Dip'].values/180*pi
azs=df[df['Well ID']=='58-32']['Azimuth'].values/180*pi
depths=df[df['Well ID']=='58-32']['Depth (m)'].values
max_depth=2296.9
z0=1681.616587
x=[335380.766]
y=[4263040.829]
z=np.append(depths,max_depth)
xys=[(z[i+1]-z[i])*np.tan(dips[i]) for i in range(len(z)-1)]
for ind,xy in enumerate(xys):
x.append(x[-1]+xy*np.sin(azs[ind]))
y.append(y[-1]+xy*np.cos(azs[ind]))
z=z0-z
pd.DataFrame({'x':x,'y':y,'z':z}).to_csv(r"C:\Users\tq220\Documents\Tits things\2018-2019\Data Science\Final-project-data\Data\58-32_pts.csv") | [
"numpy.tan",
"pandas.DataFrame",
"pandas.read_csv",
"numpy.append",
"numpy.cos",
"numpy.sin"
] | [((136, 286), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\tq220\\\\Documents\\\\Tits things\\\\2018-2019\\\\Data Science\\\\Final-project-data\\\\Data\\\\well_survey_from_earth_model.csv"""'], {}), "(\n 'C:\\\\Users\\\\tq220\\\\Documents\\\\Tits things\\\\2018-2019\\\\Data Science\\\\Final-project-data\\\\Data\\\\well_survey_from_earth_model.csv'\n )\n", (147, 286), True, 'import pandas as pd\n'), ((505, 533), 'numpy.append', 'np.append', (['depths', 'max_depth'], {}), '(depths, max_depth)\n', (514, 533), True, 'import numpy as np\n'), ((554, 569), 'numpy.tan', 'np.tan', (['dips[i]'], {}), '(dips[i])\n', (560, 569), True, 'import numpy as np\n'), ((710, 748), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'z': z}"], {}), "({'x': x, 'y': y, 'z': z})\n", (722, 748), True, 'import pandas as pd\n'), ((646, 662), 'numpy.sin', 'np.sin', (['azs[ind]'], {}), '(azs[ind])\n', (652, 662), True, 'import numpy as np\n'), ((684, 700), 'numpy.cos', 'np.cos', (['azs[ind]'], {}), '(azs[ind])\n', (690, 700), True, 'import numpy as np\n')] |
"""
Grab bag of tests implemented for the various CASA routines. This
isn't a systematic unit test, but if you write something useful put it
here. This collection for be for tests that can be run with only the
pipeline itself in place. There are other test files in the scripts/
directory.
"""
#region Imports and definitions
import os
import glob
import logging
import numpy as np
from scipy.special import erfc
import pyfits # CASA has pyfits, not astropy
# Analysis utilities
import analysisUtils as au
# Pipeline versionining
from .pipelineVersion import version as pipeVer
# CASA stuff
from . import casaStuff
# Pipeline CASA routines
from . import casaCubeRoutines as ccr
from . import casaMaskingRoutines as cma
from . import casaMosaicRoutines as cmr
from . import casaFeatherRoutines as cfr
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#endregion
def test_estimate_noise(
):
"""
Test the noise estimation routine.
"""
tol = 1e-2
vec = np.random.randn(1e5)
mad_est = cma.estimate_noise(vec, method='mad')
std_est = cma.estimate_noise(vec, method='std')
chauv_est = cma.estimate_noise(vec, method='chauv')
chauvmad_est = cma.estimate_noise(vec, method='chauvmad')
logger.info("mad estimate accuracy: "+str(np.abs(mad_est-1.0)))
if np.abs(mad_est - 1.0) > tol:
logger.error("mad estimate exceeds tolerance.")
logger.info("std estimate accuracy: "+str(np.abs(std_est-1.0)))
if np.abs(std_est - 1.0) > tol:
logger.error("std estimate exceeds tolerance.")
logger.info("chauv estimate accuracy: "+str(np.abs(chauv_est-1.0)))
if np.abs(chauv_est - 1.0) > tol:
logger.error("chauv estimate exceeds tolerance.")
logger.info("chauvmad estimate accuracy: "+str(np.abs(chauvmad_est-1.0)))
if np.abs(chauvmad_est - 1.0) > tol:
logger.error("chauvmad estimate exceeds tolerance.")
return(None)
| [
"logging.getLogger",
"numpy.abs",
"numpy.random.randn"
] | [((817, 844), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (834, 844), False, 'import logging\n'), ((1007, 1032), 'numpy.random.randn', 'np.random.randn', (['(100000.0)'], {}), '(100000.0)\n', (1022, 1032), True, 'import numpy as np\n'), ((1330, 1351), 'numpy.abs', 'np.abs', (['(mad_est - 1.0)'], {}), '(mad_est - 1.0)\n', (1336, 1351), True, 'import numpy as np\n'), ((1491, 1512), 'numpy.abs', 'np.abs', (['(std_est - 1.0)'], {}), '(std_est - 1.0)\n', (1497, 1512), True, 'import numpy as np\n'), ((1656, 1679), 'numpy.abs', 'np.abs', (['(chauv_est - 1.0)'], {}), '(chauv_est - 1.0)\n', (1662, 1679), True, 'import numpy as np\n'), ((1831, 1857), 'numpy.abs', 'np.abs', (['(chauvmad_est - 1.0)'], {}), '(chauvmad_est - 1.0)\n', (1837, 1857), True, 'import numpy as np\n'), ((1301, 1322), 'numpy.abs', 'np.abs', (['(mad_est - 1.0)'], {}), '(mad_est - 1.0)\n', (1307, 1322), True, 'import numpy as np\n'), ((1462, 1483), 'numpy.abs', 'np.abs', (['(std_est - 1.0)'], {}), '(std_est - 1.0)\n', (1468, 1483), True, 'import numpy as np\n'), ((1625, 1648), 'numpy.abs', 'np.abs', (['(chauv_est - 1.0)'], {}), '(chauv_est - 1.0)\n', (1631, 1648), True, 'import numpy as np\n'), ((1797, 1823), 'numpy.abs', 'np.abs', (['(chauvmad_est - 1.0)'], {}), '(chauvmad_est - 1.0)\n', (1803, 1823), True, 'import numpy as np\n')] |
import json
import sys
sys.path.append('..')
from torch.utils.data.dataset import Dataset
from pathlib import Path
import pickle
import pdb
import torch
import numpy as np
import argparse
import os
import sys
import librosa
import numpy as np
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
def Audio_Collate(batch):
data, angles = list(zip(*batch))
data_len = torch.LongTensor(np.array([x.size(1) for x in data if x.size(1)!=1]))
if len(data_len) == 0:
return -1
max_len = max(data_len)
wrong_indices = []
for i, a_ in enumerate(angles):
if a_[0] == -1:
wrong_indices.append(i)
B = len(data)
inputs = torch.zeros(B-len(wrong_indices), 6, max_len, 257)
labels = torch.zeros(B-len(wrong_indices), 10)
j = 0
'''zero pad'''
# for i in range(B):
# if i in wrong_indices:
# continue
# inputs[j, :, :data[i].size(1),:] = data[i]
# labels[j, angles[i]] = 1.0
# j += 1
'''replica'''
for i in range(B):
if i in wrong_indices:
continue
inputs[j, :, :data[i].size(1),:] = data[i]
labels[j, angles[i]] = 1.0
num_pad = max_len - data[i].size(1) # To be padded
idx_ = data[i].size(1)
while num_pad > 0:
if num_pad > data[i].size(1):
inputs[j, :, idx_:idx_+data[i].size(1),:] = data[i]
idx_ += data[i].size(1)
num_pad -= data[i].size(1)
else:
inputs[j, :, idx_:idx_+num_pad,:] = data[i][:,:num_pad,:]
num_pad = 0
j += 1
data = (inputs, labels, data_len)
return data
class Audio_Reader(Dataset):
def __init__(self, datalist):
super(Audio_Reader, self).__init__()
self.datalist = datalist
self.nfft = 512
self.hopsize = self.nfft // 4
self.window = 'hann'
def __len__(self):
return len(self.datalist)
def FeatureExtractor(self, sig):
def mag(sig):
S = np.abs(librosa.stft(y=sig,
n_fft=self.nfft,
hop_length=self.hopsize,
center=True,
window=self.window,
pad_mode='reflect'))**2
S[:10, :] = 0.0
return S
def vad(spec):
S3 = (spec[0] + spec[1])
indices = S3.sum(0) > S3.sum(0).mean()/50
return np.stack([spec[0][:,indices], spec[1][:,indices]])
def transform(audio):
channel_num = audio.shape[0]
feature = []
for n in range(channel_num):
feature.append(mag(audio[n]))
feature = vad(feature)
return feature
return transform(sig)
def vad(self, audio):
s1 = abs(audio[0]).mean()
s2 = abs(audio[1]).mean()
if s1 > s2:
gizun = s1 / 50
indices = abs(audio[0]) > gizun
else:
gizun = s2 / 50
indices = abs(audio[1]) > gizun
return audio[:, indices], indices
def vad_sanghoon(self, audio, time):
# fname = fname.replace('1_enhanced', '2_VAD')
audio_list = []
for t_ in time:
start, end = t_
audio_list.append(audio[:, int(16000*start)+512:int(16000*end)-512])
if len(audio_list) > 1:
return np.concatenate(audio_list, 1)
else:
return audio_list[0]
def __getitem__(self, idx):
with open(self.datalist[idx], 'rb') as f:
data = pickle.load(f)
audio_path, angle, time, LR = data['audio_path'], data['angle'], data['time'], data['LR']
audio_path = audio_path.replace('/home/jungwook/AOSE_Unet/', './')
audio_enhanced, _ = librosa.load(audio_path, sr=16000 , mono=False, dtype=np.float32)
audio_path = audio_path.replace('/home/nas/DB/AI_grand_challenge_2020/jungwook_test/wind_train_wav2minsuk', '/home/nas/DB/AI_grand_challenge_2020/jungwook_wind_drone_18_20_rec')
audio_path = audio_path.replace('db_d', 'db/d')
audio_path = audio_path.replace('./jungwook8//enhance_wav', './data')
audio_noisy, _ = librosa.load(audio_path, sr=16000 , mono=False, dtype=np.float32)
#[C, T]
if audio_enhanced.sum() == 0.0 or len(time) == 0:
return torch.FloatTensor(3,1,1), np.array([-1])
else:
audio_enhanced = self.vad_sanghoon(audio_enhanced, time)
audio_noisy = self.vad_sanghoon(audio_noisy, time)
audio_enhanced, indices = self.vad(audio_enhanced)
audio_noisy = audio_noisy[:, indices]
feature_enhanced = self.FeatureExtractor(audio_enhanced)
feature_noisy = self.FeatureExtractor(audio_noisy)
'''(channels, seq_len, mel_bins)'''
# pdb.set_trace()
return torch.FloatTensor(np.concatenate([feature_enhanced, feature_noisy], axis=0)).transpose(1,2), np.array([int(x)//20 for x in angle])
# return feature, np.array([int(x)//20 for x in angle])
| [
"pickle.load",
"torch.FloatTensor",
"numpy.stack",
"numpy.array",
"numpy.concatenate",
"librosa.stft",
"sys.path.append",
"warnings.filterwarnings",
"librosa.load"
] | [((23, 44), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (38, 44), False, 'import sys\n'), ((281, 314), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (304, 314), False, 'import warnings\n'), ((3977, 4041), 'librosa.load', 'librosa.load', (['audio_path'], {'sr': '(16000)', 'mono': '(False)', 'dtype': 'np.float32'}), '(audio_path, sr=16000, mono=False, dtype=np.float32)\n', (3989, 4041), False, 'import librosa\n'), ((4389, 4453), 'librosa.load', 'librosa.load', (['audio_path'], {'sr': '(16000)', 'mono': '(False)', 'dtype': 'np.float32'}), '(audio_path, sr=16000, mono=False, dtype=np.float32)\n', (4401, 4453), False, 'import librosa\n'), ((2601, 2653), 'numpy.stack', 'np.stack', (['[spec[0][:, indices], spec[1][:, indices]]'], {}), '([spec[0][:, indices], spec[1][:, indices]])\n', (2609, 2653), True, 'import numpy as np\n'), ((3571, 3600), 'numpy.concatenate', 'np.concatenate', (['audio_list', '(1)'], {}), '(audio_list, 1)\n', (3585, 3600), True, 'import numpy as np\n'), ((3752, 3766), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3763, 3766), False, 'import pickle\n'), ((4558, 4584), 'torch.FloatTensor', 'torch.FloatTensor', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (4575, 4584), False, 'import torch\n'), ((4584, 4598), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (4592, 4598), True, 'import numpy as np\n'), ((2088, 2206), 'librosa.stft', 'librosa.stft', ([], {'y': 'sig', 'n_fft': 'self.nfft', 'hop_length': 'self.hopsize', 'center': '(True)', 'window': 'self.window', 'pad_mode': '"""reflect"""'}), "(y=sig, n_fft=self.nfft, hop_length=self.hopsize, center=True,\n window=self.window, pad_mode='reflect')\n", (2100, 2206), False, 'import librosa\n'), ((5119, 5176), 'numpy.concatenate', 'np.concatenate', (['[feature_enhanced, feature_noisy]'], {'axis': '(0)'}), '([feature_enhanced, feature_noisy], axis=0)\n', (5133, 5176), True, 'import numpy as np\n')] |
import pyedflib
import numpy as np
import pandas as pd
import sys
import mne
from pywt import wavedec
pathDataSet = "C:\\xampp\\htdocs\\klasifikasi\\public\\uploaded\\"
def data_load(FILE, selected_channels=[]):
fullNm = pathDataSet + FILE
f = pyedflib.EdfReader(fullNm )
n = f.signals_in_file
signal_labels = f.getSignalLabels()
channel_freq = f.getSampleFrequencies()
sigbufs = np.zeros((n, f.getNSamples()[0]))
for i in np.arange(n):
sigbufs[i, :] = f.readSignal(i)
f.close()
# and load the data into a DataFrame
df_signals = pd.DataFrame(sigbufs)
df_signals = df_signals.transpose()
df_signals.columns = signal_labels
df_signals = df_signals.loc[:,~df_signals.columns.duplicated()]
df_signals = df_signals[selected_channels].astype('float32')
return df_signals,channel_freq[0]
def mne_object(data, freq, events = None):
info = mne.create_info(ch_names=list(data.columns),
sfreq=freq,
ch_types=['eeg']*data.shape[-1])
data_T = data.transpose()
raw = mne.io.RawArray(data_T, info,verbose=False)
if events:
start_times = np.array(events[::2])
end_times = np.array(events[1::2])
anno_length = end_times-start_times
event_name = np.array(['Ictal']*len(anno_length))
raw.set_annotations(mne.Annotations(start_times,
anno_length,
event_name))
return raw
def loadAndFiltering(FILE,channel_keeps):
raw_data, freq = data_load(FILE, channel_keeps)
if len(raw_data) ==0:
print("no data ")
return raw_data
mne_data = mne_object(raw_data, freq)
raw=mne_data.copy()
return raw
def extract_windows(array, start, max_time, sub_window_size,
stride_size):
sub_windows = (
start +
np.expand_dims(np.arange(sub_window_size), 0) +
np.expand_dims(np.arange(max_time + 1- sub_window_size-start, step=stride_size), 0).T
)
return array[:,sub_windows]
def Crop(raw):
cropS = 3
strides = 1
tMin=0
tMax=raw.get_data().shape[1]#18*256*cropS
sub_window_size,stride_size = 256*cropS,256*strides
cropData = extract_windows(raw.get_data(), tMin, tMax , sub_window_size,stride_size)
cropData = cropData.reshape(cropData.shape[1],cropData.shape[0],cropData.shape[2])
return cropData
def create_modelCNN(input_shape, num_class,flatten=False):
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.backend import clear_session
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Conv1D#, Input
from tensorflow.keras.layers import MaxPooling1D
from tensorflow.keras.layers import GlobalAveragePooling1D#, GlobalMaxPooling1D
from keras.layers import Activation,Flatten, Dropout
clear_session()
model = Sequential()
def add_conv_block(model, num_filters, input_shape=None):
if input_shape:
model.add(Conv1D(num_filters, kernel_size=3, activation='relu', padding='same', input_shape=input_shape))
else:
model.add(Conv1D(num_filters, kernel_size=3, activation='relu', padding='same'))
return model
model = add_conv_block(model, 128, input_shape=input_shape[1:])
model = add_conv_block(model, 128)
model.add(Dropout(0.3))
model.add(MaxPooling1D(pool_size=3, # size of the window
strides=2, # factor to downsample
padding='same'))
model.add(Dropout(0.1))
for i in range(2):
model.add(Conv1D(filters=256,kernel_size=3,padding="same",activation='relu'))
model.add(Dropout(0.1))
if flatten:
model.add(Flatten())
else:
model.add(GlobalAveragePooling1D())
model.add(Dense(units=128,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(num_class))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
return [n5,n25,n75,n95, median]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
# return [ no_mean_crossings]
def get_featuresStat(list_values):
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return crossings + statistics
def getFeatureStatWithWavelet(signal,numCH,waveName,level):
ftrStat=[]
for x in range(numCH):
list_coeff = wavedec(signal[x], waveName,level=level)
features = []
for coeff in list_coeff:
features += get_featuresStat(coeff)
ftrStat.append(features)
return np.array(ftrStat)
if __name__ == '__main__':
FILE=sys.argv[1]
print('haha')
#FILE = 'chb01_03.edf'
loaded = np.load("channel_keeps.npz")
selected_channels =loaded['channel_keeps']
raw = loadAndFiltering(FILE,selected_channels)
cropData = Crop(raw)
waveName,level = 'bior3.1',4
numCH = cropData[0].shape[0]
signal = cropData[0]
oneData = getFeatureStatWithWavelet(
signal,numCH,
waveName,level)
oneData = oneData.reshape(1,oneData.shape[0],oneData.shape[1])
KELAS = 3
model = create_modelCNN(oneData.shape,KELAS)#,False)
nmModel = 'modelCNN_fold_1.h5'
model.load_weights(nmModel)
# print(oneData.shape)
cnt=0
for idx in range(cropData.shape[0]):
numCH = cropData[idx].shape[0]
signal = cropData[idx]
oneData = getFeatureStatWithWavelet(
signal,numCH,
waveName,level)
oneData = oneData.reshape(1,oneData.shape[0],oneData.shape[1])
yPred = model.predict(oneData)
yPred = np.argmax(yPred,axis=1)
if yPred[0] == 0:
hasil = "Normal"
elif yPred[0] == 1:
hasil = "Interictal"
else :
hasil = "Kejang"
print("segment=%d prediksi=%s "%(idx,hasil))
print(FILE)
cnt+=1
if cnt >1 :
break
| [
"numpy.nanpercentile",
"numpy.array",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.layers.Dense",
"numpy.nanmean",
"keras.layers.Activation",
"tensorflow.keras.layers.MaxPooling1D",
"mne.io.RawArray",
"numpy.arange",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"pandas.DataFr... | [((256, 282), 'pyedflib.EdfReader', 'pyedflib.EdfReader', (['fullNm'], {}), '(fullNm)\n', (274, 282), False, 'import pyedflib\n'), ((456, 468), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (465, 468), True, 'import numpy as np\n'), ((583, 604), 'pandas.DataFrame', 'pd.DataFrame', (['sigbufs'], {}), '(sigbufs)\n', (595, 604), True, 'import pandas as pd\n'), ((1084, 1128), 'mne.io.RawArray', 'mne.io.RawArray', (['data_T', 'info'], {'verbose': '(False)'}), '(data_T, info, verbose=False)\n', (1099, 1128), False, 'import mne\n'), ((2919, 2934), 'tensorflow.keras.backend.clear_session', 'clear_session', ([], {}), '()\n', (2932, 2934), False, 'from tensorflow.keras.backend import clear_session\n'), ((2945, 2957), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2955, 2957), False, 'from tensorflow.keras.models import Sequential\n'), ((4151, 4183), 'numpy.nanpercentile', 'np.nanpercentile', (['list_values', '(5)'], {}), '(list_values, 5)\n', (4167, 4183), True, 'import numpy as np\n'), ((4194, 4227), 'numpy.nanpercentile', 'np.nanpercentile', (['list_values', '(25)'], {}), '(list_values, 25)\n', (4210, 4227), True, 'import numpy as np\n'), ((4238, 4271), 'numpy.nanpercentile', 'np.nanpercentile', (['list_values', '(75)'], {}), '(list_values, 75)\n', (4254, 4271), True, 'import numpy as np\n'), ((4282, 4315), 'numpy.nanpercentile', 'np.nanpercentile', (['list_values', '(95)'], {}), '(list_values, 95)\n', (4298, 4315), True, 'import numpy as np\n'), ((4329, 4362), 'numpy.nanpercentile', 'np.nanpercentile', (['list_values', '(50)'], {}), '(list_values, 50)\n', (4345, 4362), True, 'import numpy as np\n'), ((5285, 5302), 'numpy.array', 'np.array', (['ftrStat'], {}), '(ftrStat)\n', (5293, 5302), True, 'import numpy as np\n'), ((5411, 5439), 'numpy.load', 'np.load', (['"""channel_keeps.npz"""'], {}), "('channel_keeps.npz')\n", (5418, 5439), True, 'import numpy as np\n'), ((1159, 1180), 'numpy.array', 'np.array', (['events[::2]'], {}), '(events[::2])\n', (1167, 1180), True, 'import numpy as np\n'), ((1197, 1219), 'numpy.array', 'np.array', (['events[1::2]'], {}), '(events[1::2])\n', (1205, 1219), True, 'import numpy as np\n'), ((3403, 3415), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (3410, 3415), False, 'from keras.layers import Activation, Flatten, Dropout\n'), ((3429, 3481), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=3, strides=2, padding='same')\n", (3441, 3481), False, 'from tensorflow.keras.layers import MaxPooling1D\n'), ((3589, 3601), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (3596, 3601), False, 'from keras.layers import Activation, Flatten, Dropout\n'), ((3833, 3868), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(128)', 'activation': '"""relu"""'}), "(units=128, activation='relu')\n", (3838, 3868), False, 'from tensorflow.keras.layers import Dense\n'), ((3881, 3893), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (3888, 3893), False, 'from keras.layers import Activation, Flatten, Dropout\n'), ((3907, 3923), 'tensorflow.keras.layers.Dense', 'Dense', (['num_class'], {}), '(num_class)\n', (3912, 3923), False, 'from tensorflow.keras.layers import Dense\n'), ((3937, 3958), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3947, 3958), False, 'from keras.layers import Activation, Flatten, Dropout\n'), ((5097, 5138), 'pywt.wavedec', 'wavedec', (['signal[x]', 'waveName'], {'level': 'level'}), '(signal[x], waveName, level=level)\n', (5104, 5138), False, 'from pywt import wavedec\n'), ((6448, 6472), 'numpy.argmax', 'np.argmax', (['yPred'], {'axis': '(1)'}), '(yPred, axis=1)\n', (6457, 6472), True, 'import numpy as np\n'), ((1338, 1391), 'mne.Annotations', 'mne.Annotations', (['start_times', 'anno_length', 'event_name'], {}), '(start_times, anno_length, event_name)\n', (1353, 1391), False, 'import mne\n'), ((3638, 3707), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(256)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=256, kernel_size=3, padding='same', activation='relu')\n", (3644, 3707), False, 'from tensorflow.keras.layers import Conv1D\n'), ((3720, 3732), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (3727, 3732), False, 'from keras.layers import Activation, Flatten, Dropout\n'), ((3762, 3771), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3769, 3771), False, 'from keras.layers import Activation, Flatten, Dropout\n'), ((3795, 3819), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (3817, 3819), False, 'from tensorflow.keras.layers import GlobalAveragePooling1D\n'), ((3986, 3998), 'tensorflow.keras.optimizers.Adam', 'Adam', (['(0.0001)'], {}), '(0.0001)\n', (3990, 3998), False, 'from tensorflow.keras.optimizers import Adam\n'), ((1895, 1921), 'numpy.arange', 'np.arange', (['sub_window_size'], {}), '(sub_window_size)\n', (1904, 1921), True, 'import numpy as np\n'), ((1951, 2018), 'numpy.arange', 'np.arange', (['(max_time + 1 - sub_window_size - start)'], {'step': 'stride_size'}), '(max_time + 1 - sub_window_size - start, step=stride_size)\n', (1960, 2018), True, 'import numpy as np\n'), ((3064, 3162), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['num_filters'], {'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""', 'input_shape': 'input_shape'}), "(num_filters, kernel_size=3, activation='relu', padding='same',\n input_shape=input_shape)\n", (3070, 3162), False, 'from tensorflow.keras.layers import Conv1D\n'), ((3196, 3265), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['num_filters'], {'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), "(num_filters, kernel_size=3, activation='relu', padding='same')\n", (3202, 3265), False, 'from tensorflow.keras.layers import Conv1D\n'), ((4485, 4506), 'numpy.array', 'np.array', (['list_values'], {}), '(list_values)\n', (4493, 4506), True, 'import numpy as np\n'), ((4614, 4635), 'numpy.array', 'np.array', (['list_values'], {}), '(list_values)\n', (4622, 4635), True, 'import numpy as np\n'), ((4638, 4661), 'numpy.nanmean', 'np.nanmean', (['list_values'], {}), '(list_values)\n', (4648, 4661), True, 'import numpy as np\n')] |
# coding: utf-8
"""TV-L1 optical flow algorithm implementation.
"""
from functools import partial
import numpy as np
from scipy import ndimage as ndi
from skimage.transform import warp
from ._optical_flow_utils import coarse_to_fine
def _tvl1(reference_image, moving_image, flow0, attachment, tightness,
num_warp, num_iter, tol, prefilter):
"""TV-L1 solver for optical flow estimation.
Parameters
----------
reference_image : ndarray, shape (M, N[, P[, ...]])
The first gray scale image of the sequence.
moving_image : ndarray, shape (M, N[, P[, ...]])
The second gray scale image of the sequence.
flow0 : ndarray, shape (image0.ndim, M, N[, P[, ...]])
Initialization for the vector field.
attachment : float
Attachment parameter. The smaller this parameter is,
the smoother is the solutions.
tightness : float
Tightness parameter. It should have a small value in order to
maintain attachement and regularization parts in
correspondence.
num_warp : int
Number of times image1 is warped.
num_iter : int
Number of fixed point iteration.
tol : float
Tolerance used as stopping criterion based on the L² distance
between two consecutive values of (u, v).
prefilter : bool
Whether to prefilter the estimated optical flow before each
image warp.
Returns
-------
flow : ndarray, shape ((image0.ndim, M, N[, P[, ...]])
The estimated optical flow components for each axis.
"""
dtype = reference_image.dtype
grid = np.meshgrid(*[np.arange(n, dtype=dtype)
for n in reference_image.shape],
indexing='ij')
dt = 0.5 / reference_image.ndim
reg_num_iter = 2
f0 = attachment * tightness
f1 = dt / tightness
tol *= reference_image.size
flow_current = flow_previous = flow0
g = np.zeros((reference_image.ndim,) + reference_image.shape, dtype=dtype)
proj = np.zeros((reference_image.ndim, reference_image.ndim,)
+ reference_image.shape, dtype=dtype)
s_g = [slice(None), ] * g.ndim
s_p = [slice(None), ] * proj.ndim
s_d = [slice(None), ] * (proj.ndim-2)
for _ in range(num_warp):
if prefilter:
flow_current = ndi.median_filter(flow_current,
[1] + reference_image.ndim * [3])
image1_warp = warp(moving_image, grid + flow_current, mode='nearest')
grad = np.array(np.gradient(image1_warp))
NI = (grad*grad).sum(0)
NI[NI == 0] = 1
rho_0 = image1_warp - reference_image - (grad * flow_current).sum(0)
for _ in range(num_iter):
# Data term
rho = rho_0 + (grad*flow_current).sum(0)
idx = abs(rho) <= f0 * NI
flow_auxiliary = flow_current
flow_auxiliary[:, idx] -= rho[idx]*grad[:, idx]/NI[idx]
idx = ~idx
srho = f0 * np.sign(rho[idx])
flow_auxiliary[:, idx] -= srho*grad[:, idx]
# Regularization term
flow_current = flow_auxiliary.copy()
for idx in range(reference_image.ndim):
s_p[0] = idx
for _ in range(reg_num_iter):
for ax in range(reference_image.ndim):
s_g[0] = ax
s_g[ax+1] = slice(0, -1)
g[tuple(s_g)] = np.diff(flow_current[idx], axis=ax)
s_g[ax+1] = slice(None)
norm = np.sqrt((g ** 2).sum(0))[np.newaxis, ...]
norm *= f1
norm += 1.
proj[idx] -= dt * g
proj[idx] /= norm
# d will be the (negative) divergence of proj[idx]
d = -proj[idx].sum(0)
for ax in range(reference_image.ndim):
s_p[1] = ax
s_p[ax+2] = slice(0, -1)
s_d[ax] = slice(1, None)
d[tuple(s_d)] += proj[tuple(s_p)]
s_p[ax+2] = slice(None)
s_d[ax] = slice(None)
flow_current[idx] = flow_auxiliary[idx] + d
flow_previous -= flow_current # The difference as stopping criteria
if (flow_previous*flow_previous).sum() < tol:
break
flow_previous = flow_current
return flow_current
def optical_flow_tvl1(reference_image, moving_image,
*,
attachment=15, tightness=0.3, num_warp=5, num_iter=10,
tol=1e-4, prefilter=False, dtype=np.float32):
r"""Coarse to fine optical flow estimator.
The TV-L1 solver is applied at each level of the image
pyramid. TV-L1 is a popular algorithm for optical flow estimation
introduced by Zack et al. [1]_, improved in [2]_ and detailed in [3]_.
Parameters
----------
reference_image : ndarray, shape (M, N[, P[, ...]])
The first gray scale image of the sequence.
moving_image : ndarray, shape (M, N[, P[, ...]])
The second gray scale image of the sequence.
attachment : float, optional
Attachment parameter (:math:`\lambda` in [1]_). The smaller
this parameter is, the smoother the returned result will be.
tightness : float, optional
Tightness parameter (:math:`\tau` in [1]_). It should have
a small value in order to maintain attachement and
regularization parts in correspondence.
num_warp : int, optional
Number of times image1 is warped.
num_iter : int, optional
Number of fixed point iteration.
tol : float, optional
Tolerance used as stopping criterion based on the L² distance
between two consecutive values of (u, v).
prefilter : bool, optional
Whether to prefilter the estimated optical flow before each
image warp. This helps to remove the potential outliers.
dtype : dtype, optional
Output data type: must be floating point. Single precision
provides good results and saves memory usage and computation
time compared to double precision.
Returns
-------
flow : ndarray, shape ((image0.ndim, M, N[, P[, ...]])
The estimated optical flow components for each axis.
Notes
-----
Color images are not supported.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2007, September). A
duality based approach for realtime TV-L 1 optical flow. In Joint
pattern recognition symposium (pp. 214-223). Springer, Berlin,
Heidelberg. :DOI:`10.1007/978-3-540-74936-3_22`
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., & Cremers,
D. (2009). An improved algorithm for TV-L 1 optical flow. In
Statistical and geometrical approaches to visual motion analysis
(pp. 23-45). Springer, Berlin, Heidelberg.
:DOI:`10.1007/978-3-642-03061-1_2`
.. [3] <NAME>., <NAME>., & Facciolo,
G. (2013). TV-L1 optical flow estimation. Image Processing On
Line, 2013, 137-150. :DOI:`10.5201/ipol.2013.26`
Examples
--------
>>> from skimage.color import rgb2gray
>>> from skimage.data import stereo_motorcycle
>>> from skimage.registration import optical_flow_tvl1
>>> image0, image1, disp = stereo_motorcycle()
>>> # --- Convert the images to gray level: color is not supported.
>>> image0 = rgb2gray(image0)
>>> image1 = rgb2gray(image1)
>>> flow = optical_flow_tvl1(image1, image0)
"""
solver = partial(_tvl1, attachment=attachment,
tightness=tightness, num_warp=num_warp, num_iter=num_iter,
tol=tol, prefilter=prefilter)
return coarse_to_fine(reference_image, moving_image, solver, dtype=dtype)
| [
"skimage.transform.warp",
"numpy.diff",
"numpy.zeros",
"functools.partial",
"numpy.sign",
"scipy.ndimage.median_filter",
"numpy.gradient",
"numpy.arange"
] | [((1949, 2019), 'numpy.zeros', 'np.zeros', (['((reference_image.ndim,) + reference_image.shape)'], {'dtype': 'dtype'}), '((reference_image.ndim,) + reference_image.shape, dtype=dtype)\n', (1957, 2019), True, 'import numpy as np\n'), ((2031, 2127), 'numpy.zeros', 'np.zeros', (['((reference_image.ndim, reference_image.ndim) + reference_image.shape)'], {'dtype': 'dtype'}), '((reference_image.ndim, reference_image.ndim) + reference_image.\n shape, dtype=dtype)\n', (2039, 2127), True, 'import numpy as np\n'), ((7674, 7805), 'functools.partial', 'partial', (['_tvl1'], {'attachment': 'attachment', 'tightness': 'tightness', 'num_warp': 'num_warp', 'num_iter': 'num_iter', 'tol': 'tol', 'prefilter': 'prefilter'}), '(_tvl1, attachment=attachment, tightness=tightness, num_warp=\n num_warp, num_iter=num_iter, tol=tol, prefilter=prefilter)\n', (7681, 7805), False, 'from functools import partial\n'), ((2474, 2529), 'skimage.transform.warp', 'warp', (['moving_image', '(grid + flow_current)'], {'mode': '"""nearest"""'}), "(moving_image, grid + flow_current, mode='nearest')\n", (2478, 2529), False, 'from skimage.transform import warp\n'), ((2340, 2405), 'scipy.ndimage.median_filter', 'ndi.median_filter', (['flow_current', '([1] + reference_image.ndim * [3])'], {}), '(flow_current, [1] + reference_image.ndim * [3])\n', (2357, 2405), True, 'from scipy import ndimage as ndi\n'), ((2554, 2578), 'numpy.gradient', 'np.gradient', (['image1_warp'], {}), '(image1_warp)\n', (2565, 2578), True, 'import numpy as np\n'), ((1630, 1655), 'numpy.arange', 'np.arange', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (1639, 1655), True, 'import numpy as np\n'), ((3027, 3044), 'numpy.sign', 'np.sign', (['rho[idx]'], {}), '(rho[idx])\n', (3034, 3044), True, 'import numpy as np\n'), ((3497, 3532), 'numpy.diff', 'np.diff', (['flow_current[idx]'], {'axis': 'ax'}), '(flow_current[idx], axis=ax)\n', (3504, 3532), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import os
def sort_list(list1, list2):
zipped_pairs = zip(list2, list1)
z = [x for _, x in sorted(zipped_pairs)]
return z
def plot_loss(DIR):
DIR = DIR+"/loss"
loss_first = 5951
file_no = int(len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))]))
loss_increment = 4
print(file_no)
DIR_big_matrix_loss = DIR+"/loss_{0}.npy"
big_matrix_loss = []
loss_labels = []
#load rest of loss
counter = 1
for filename in os.listdir(DIR):
if counter % 100 == 0:
y = np.load(DIR+'/'+filename)
label = int(filename[5:-4])
big_matrix_loss.append(y)
loss_labels.append(label)
print('loading file {0}'.format(counter)+ ' of {0}'.format(file_no))
counter = counter+1
plt.figure(30)
big_matrix_loss = sort_list(big_matrix_loss,loss_labels)
loss_labels.sort()
plot = plt.plot(loss_labels,big_matrix_loss)
axes = plt.gca()
axes.set_ylim([0,0.3])
plt.title("loss")
plt.xlabel('episodes')
plt.ylabel('loss')
plt.show()
exit()
def combine_results(big_matrix,file_no,DIR_file):
episode_xtick_position = []
episode_xtick_position.append(1)
episode_xtick_labels = []
episode_xtick_labels.append(1)
for i in range(2,int(file_no)+1):
y = np.load(DIR_file.format(i))
big_matrix = np.concatenate((big_matrix,y))
#episode_legend_positions_and_lables
episode_xtick_position.append(episode_xtick_position[-1]+y.shape[0])
episode_xtick_labels.append(i)
return big_matrix,episode_xtick_position,episode_xtick_labels
def lineplotCI(x_data, y_data, sorted_x, low_CI, upper_CI, x_label, y_label, title):
# Create the plot object
_, ax = plt.subplots()
# Plot the data, set the linewidth, color and transparency of the
# line, provide a label for the legend
ax.plot(x_data, y_data, lw = 1, color = '#539caf', alpha = 1, label = 'Mean of weights')
# Shade the confidence interval
ax.fill_between(sorted_x, low_CI, upper_CI, color = '#539caf', alpha = 0.4, label = '1 standard deviation of weights')
# Label the axes and provide a title
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
# Display legend
ax.legend(loc = 'best')
font = {'family' : 'normal',
'size' : 22}
matplotlib.rc('font', **font)
#define directory for log file
DIR = ""
file_no = int(len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])/8)
#initialise matrix for loading
DIR_big_matrix_y_apnn_in = DIR+"/y_apnn_in_stack_{0}00.npy"
big_matrix_y_apnn_in = np.load(DIR_big_matrix_y_apnn_in.format(1))
DIR_big_matrix_y_apnn_out = DIR+"/y_apnn_out_stack_{0}00.npy"
big_matrix_y_apnn_out = np.load(DIR_big_matrix_y_apnn_out.format(1))
DIR_big_matrix_y_dqn_out = DIR+"/y_dqn_out_stack_{0}00.npy"
big_matrix_y_dqn_out = np.load(DIR_big_matrix_y_dqn_out.format(1))
DIR_big_matrix_apnn_weights = DIR+"/apnn_weights_{0}00.npy"
big_matrix_apnn_weights = np.load(DIR_big_matrix_apnn_weights.format(1))
DIR_big_matrix_conv_layers_out = DIR+"/y_conv_layers_stack_{0}00.npy"
big_matrix_conv_layers_out = np.load(DIR_big_matrix_conv_layers_out.format(1))
DIR_big_matrix_elgibility_traces = DIR+"/Eligbility_traces_stack_{0}00.npy"
DIR_big_matrix_state_types = DIR+"/state_type_{0}00.npy"
DIR_big_matrix_reward = DIR+"/reward_{0}00.npy"
big_matrix_reward = np.load(DIR_big_matrix_reward.format(1))
print(big_matrix_reward)
#load each big_matrix
big_matrix_y_apnn_in,episode_xtick_position,episode_xtick_labels = combine_results(big_matrix_y_apnn_in,file_no,DIR_big_matrix_y_apnn_in)
big_matrix_y_apnn_out,destroy,destroy_2 = combine_results(big_matrix_y_apnn_out,file_no,DIR_big_matrix_y_apnn_out)
big_matrix_y_dqn_out,destroy,destroy_2 = combine_results(big_matrix_y_dqn_out,file_no,DIR_big_matrix_y_dqn_out)
#big_matrix_conv_layers_out,destroy,destroy_2 = combine_results(big_matrix_conv_layers_out,file_no,DIR_big_matrix_conv_layers_out)
#v-stack weightes
for i in range(2,int(file_no)+1):
y = np.load(DIR_big_matrix_apnn_weights.format(i))
big_matrix_apnn_weights = np.dstack((big_matrix_apnn_weights,y))
y2 = np.load(DIR_big_matrix_reward.format(i))
big_matrix_reward = np.dstack((big_matrix_reward,y2))
#set_xticks
episode_xtick_position = np.asarray(episode_xtick_position)[0::50]
x_values = episode_xtick_labels
episode_xtick_labels = np.asarray(episode_xtick_labels)[0::50]
print(file_no)
file_list = []
for i in range(1,file_no):
episode_number = i #in thousands of episodes
single_episode_y_apnn_in = np.load(DIR_big_matrix_y_apnn_in.format(episode_number))
single_episode_y_apnn_out = np.load(DIR_big_matrix_y_apnn_out.format(episode_number))
single_episode_y_dqn_out = np.load(DIR_big_matrix_y_dqn_out.format(episode_number))
single_elgibility_traces = np.load(DIR_big_matrix_elgibility_traces.format(episode_number))
single_episode_conv_layers_out = np.load(DIR_big_matrix_conv_layers_out.format(episode_number))
if single_elgibility_traces.shape[2]>10:
file_list.append(i)
print(file_list)
while True:
# plot all episodes
f = plt.figure(1)
imgplot = plt.imshow(big_matrix_y_apnn_in.transpose(), interpolation = 'none',aspect = 150)
plt.title("y_apnn_in")
plt.colorbar(imgplot)
plt.xticks(episode_xtick_position,episode_xtick_labels)
plt.xlabel('thousands of epsiodes')
plt.ylabel('apnn input')
g = plt.figure(2)
imgplot = plt.imshow(big_matrix_y_apnn_out.transpose(), interpolation = 'none',aspect = 150)
plt.title("y_apnn_out")
plt.colorbar(imgplot)
plt.xticks(episode_xtick_position,episode_xtick_labels)
plt.xlabel('thousands of epsiodes')
plt.ylabel('apnn output')
e = plt.figure(3)
imgplot = plt.imshow(big_matrix_y_dqn_out.transpose(), interpolation = 'none',aspect = 150)
plt.title("y_dqn_out")
plt.colorbar(imgplot)
plt.xticks(episode_xtick_position,episode_xtick_labels)
plt.xlabel('thousands of epsiodes')
plt.ylabel('dqn output')
#d = plt.figure(22)
#imgplot = plt.imshow(big_matrix_conv_layers_out.transpose(), interpolation = 'none',aspect = 1)
#plt.title("y_dqn_out")
#plt.colorbar(imgplot)
#plt.xticks(episode_xtick_position,episode_xtick_labels)
#plt.xlabel('thousands of epsiodes')
#plt.ylabel('dqn output')
#weights plots
apnn_weights_mean = np.mean(big_matrix_apnn_weights,axis = 0)
apnn_weights_var = np.var(apnn_weights_mean,axis = 0)
apnn_weights_mean = np.mean(apnn_weights_mean,axis = 0)
lower_CI = apnn_weights_mean+apnn_weights_var
upper_CI = apnn_weights_mean-apnn_weights_var
z = plt.figure(8)
#imgplot = plt.plot(apnn_weights_mean)
lineplotCI(x_values, apnn_weights_mean, x_values, lower_CI, upper_CI, 'thousands of epsiodes', 'mean weight value', "apnn weights")
#plt.xticks(episode_xtick_position,episode_xtick_labels)
#show weights per output
for i in range(0,3):
plt.figure(10+i)
plot = plt.plot(x_values,np.transpose(big_matrix_apnn_weights[i,:,:]))
plot = plt.plot(x_values,np.transpose(big_matrix_reward[0,:,:]))
plt.title("y_apnn_out")
plt.xlabel('houndreds of epsiodes')
plt.ylabel('weights for action {0}'.format(i))
#plot single episodes
episode_number = int(input("enter episode number for plotting "))
single_episode_y_apnn_in = np.load(DIR_big_matrix_y_apnn_in.format(episode_number))
single_episode_y_apnn_out = np.load(DIR_big_matrix_y_apnn_out.format(episode_number))
single_episode_y_dqn_out = np.load(DIR_big_matrix_y_dqn_out.format(episode_number))
single_elgibility_traces = np.load(DIR_big_matrix_elgibility_traces.format(episode_number))
single_episode_conv_layers_out = np.load(DIR_big_matrix_conv_layers_out.format(episode_number))
single_episode_state_types = np.load(DIR_big_matrix_state_types.format(episode_number))
print(single_episode_state_types)
f = plt.figure(4)
imgplot = plt.imshow(single_episode_y_apnn_in.transpose(), interpolation = 'none',aspect = 0.25)
plt.title("y_apnn_in_single_episode")
plt.colorbar(imgplot)
plt.xlabel('steps')
plt.ylabel('apnn input')
g = plt.figure(5)
imgplot = plt.imshow(single_episode_y_apnn_out.transpose(), interpolation = 'none',aspect = 1.5)
plt.title("y_apnn_out_single_episode")
plt.colorbar(imgplot)
plt.xlabel('steps')
plt.ylabel('apnn output')
print(single_episode_y_apnn_out.transpose())
e = plt.figure(6)
imgplot = plt.imshow(single_episode_y_dqn_out.transpose(), interpolation = 'none',aspect = 1.5)
plt.title("y_dqn_out_single_episode")
plt.colorbar(imgplot)
plt.xlabel('steps')
plt.ylabel('dqn output')
e = plt.figure(20)
imgplot = plt.imshow(single_episode_conv_layers_out.transpose(), interpolation = 'none',aspect = 0.02)
plt.title("conv_layers_out_single_episode")
plt.colorbar(imgplot)
plt.xlabel('steps')
plt.ylabel('dqn output')
g = plt.figure(7)
imgplot = plt.imshow(single_episode_y_apnn_out.transpose()+single_episode_y_dqn_out.transpose(), interpolation = 'none',aspect = 1.5)
plt.title("taken action")
plt.colorbar(imgplot)
plt.xlabel('steps')
#plot Eligbility_traces_singles
for i in range(0,3):
plt.figure(14+i)
plot = plt.plot(np.transpose(single_elgibility_traces[i,:,:]))
plt.title("elgibility traces")
plt.xlabel('steps')
plt.ylabel('trace for action {0}'.format(i))
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.rc",
"numpy.mean",
"os.listdir",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.concatenate",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.pyplot.show",
... | [((2459, 2488), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (2472, 2488), False, 'import matplotlib\n'), ((573, 588), 'os.listdir', 'os.listdir', (['DIR'], {}), '(DIR)\n', (583, 588), False, 'import os\n'), ((892, 906), 'matplotlib.pyplot.figure', 'plt.figure', (['(30)'], {}), '(30)\n', (902, 906), True, 'from matplotlib import pyplot as plt\n'), ((1002, 1040), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_labels', 'big_matrix_loss'], {}), '(loss_labels, big_matrix_loss)\n', (1010, 1040), True, 'from matplotlib import pyplot as plt\n'), ((1051, 1060), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1058, 1060), True, 'from matplotlib import pyplot as plt\n'), ((1092, 1109), 'matplotlib.pyplot.title', 'plt.title', (['"""loss"""'], {}), "('loss')\n", (1101, 1109), True, 'from matplotlib import pyplot as plt\n'), ((1114, 1136), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""episodes"""'], {}), "('episodes')\n", (1124, 1136), True, 'from matplotlib import pyplot as plt\n'), ((1141, 1159), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (1151, 1159), True, 'from matplotlib import pyplot as plt\n'), ((1164, 1174), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1172, 1174), True, 'from matplotlib import pyplot as plt\n'), ((1857, 1871), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1869, 1871), True, 'from matplotlib import pyplot as plt\n'), ((4257, 4296), 'numpy.dstack', 'np.dstack', (['(big_matrix_apnn_weights, y)'], {}), '((big_matrix_apnn_weights, y))\n', (4266, 4296), True, 'import numpy as np\n'), ((4370, 4404), 'numpy.dstack', 'np.dstack', (['(big_matrix_reward, y2)'], {}), '((big_matrix_reward, y2))\n', (4379, 4404), True, 'import numpy as np\n'), ((4442, 4476), 'numpy.asarray', 'np.asarray', (['episode_xtick_position'], {}), '(episode_xtick_position)\n', (4452, 4476), True, 'import numpy as np\n'), ((4539, 4571), 'numpy.asarray', 'np.asarray', (['episode_xtick_labels'], {}), '(episode_xtick_labels)\n', (4549, 4571), True, 'import numpy as np\n'), ((5284, 5297), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5294, 5297), True, 'from matplotlib import pyplot as plt\n'), ((5398, 5420), 'matplotlib.pyplot.title', 'plt.title', (['"""y_apnn_in"""'], {}), "('y_apnn_in')\n", (5407, 5420), True, 'from matplotlib import pyplot as plt\n'), ((5425, 5446), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplot'], {}), '(imgplot)\n', (5437, 5446), True, 'from matplotlib import pyplot as plt\n'), ((5451, 5507), 'matplotlib.pyplot.xticks', 'plt.xticks', (['episode_xtick_position', 'episode_xtick_labels'], {}), '(episode_xtick_position, episode_xtick_labels)\n', (5461, 5507), True, 'from matplotlib import pyplot as plt\n'), ((5511, 5546), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""thousands of epsiodes"""'], {}), "('thousands of epsiodes')\n", (5521, 5546), True, 'from matplotlib import pyplot as plt\n'), ((5551, 5575), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""apnn input"""'], {}), "('apnn input')\n", (5561, 5575), True, 'from matplotlib import pyplot as plt\n'), ((5584, 5597), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (5594, 5597), True, 'from matplotlib import pyplot as plt\n'), ((5699, 5722), 'matplotlib.pyplot.title', 'plt.title', (['"""y_apnn_out"""'], {}), "('y_apnn_out')\n", (5708, 5722), True, 'from matplotlib import pyplot as plt\n'), ((5727, 5748), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplot'], {}), '(imgplot)\n', (5739, 5748), True, 'from matplotlib import pyplot as plt\n'), ((5753, 5809), 'matplotlib.pyplot.xticks', 'plt.xticks', (['episode_xtick_position', 'episode_xtick_labels'], {}), '(episode_xtick_position, episode_xtick_labels)\n', (5763, 5809), True, 'from matplotlib import pyplot as plt\n'), ((5813, 5848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""thousands of epsiodes"""'], {}), "('thousands of epsiodes')\n", (5823, 5848), True, 'from matplotlib import pyplot as plt\n'), ((5853, 5878), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""apnn output"""'], {}), "('apnn output')\n", (5863, 5878), True, 'from matplotlib import pyplot as plt\n'), ((5887, 5900), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (5897, 5900), True, 'from matplotlib import pyplot as plt\n'), ((6001, 6023), 'matplotlib.pyplot.title', 'plt.title', (['"""y_dqn_out"""'], {}), "('y_dqn_out')\n", (6010, 6023), True, 'from matplotlib import pyplot as plt\n'), ((6028, 6049), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplot'], {}), '(imgplot)\n', (6040, 6049), True, 'from matplotlib import pyplot as plt\n'), ((6054, 6110), 'matplotlib.pyplot.xticks', 'plt.xticks', (['episode_xtick_position', 'episode_xtick_labels'], {}), '(episode_xtick_position, episode_xtick_labels)\n', (6064, 6110), True, 'from matplotlib import pyplot as plt\n'), ((6114, 6149), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""thousands of epsiodes"""'], {}), "('thousands of epsiodes')\n", (6124, 6149), True, 'from matplotlib import pyplot as plt\n'), ((6154, 6178), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dqn output"""'], {}), "('dqn output')\n", (6164, 6178), True, 'from matplotlib import pyplot as plt\n'), ((6536, 6576), 'numpy.mean', 'np.mean', (['big_matrix_apnn_weights'], {'axis': '(0)'}), '(big_matrix_apnn_weights, axis=0)\n', (6543, 6576), True, 'import numpy as np\n'), ((6601, 6634), 'numpy.var', 'np.var', (['apnn_weights_mean'], {'axis': '(0)'}), '(apnn_weights_mean, axis=0)\n', (6607, 6634), True, 'import numpy as np\n'), ((6660, 6694), 'numpy.mean', 'np.mean', (['apnn_weights_mean'], {'axis': '(0)'}), '(apnn_weights_mean, axis=0)\n', (6667, 6694), True, 'import numpy as np\n'), ((6804, 6817), 'matplotlib.pyplot.figure', 'plt.figure', (['(8)'], {}), '(8)\n', (6814, 6817), True, 'from matplotlib import pyplot as plt\n'), ((8118, 8131), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (8128, 8131), True, 'from matplotlib import pyplot as plt\n'), ((8237, 8274), 'matplotlib.pyplot.title', 'plt.title', (['"""y_apnn_in_single_episode"""'], {}), "('y_apnn_in_single_episode')\n", (8246, 8274), True, 'from matplotlib import pyplot as plt\n'), ((8279, 8300), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplot'], {}), '(imgplot)\n', (8291, 8300), True, 'from matplotlib import pyplot as plt\n'), ((8305, 8324), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (8315, 8324), True, 'from matplotlib import pyplot as plt\n'), ((8329, 8353), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""apnn input"""'], {}), "('apnn input')\n", (8339, 8353), True, 'from matplotlib import pyplot as plt\n'), ((8362, 8375), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (8372, 8375), True, 'from matplotlib import pyplot as plt\n'), ((8481, 8519), 'matplotlib.pyplot.title', 'plt.title', (['"""y_apnn_out_single_episode"""'], {}), "('y_apnn_out_single_episode')\n", (8490, 8519), True, 'from matplotlib import pyplot as plt\n'), ((8524, 8545), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplot'], {}), '(imgplot)\n', (8536, 8545), True, 'from matplotlib import pyplot as plt\n'), ((8550, 8569), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (8560, 8569), True, 'from matplotlib import pyplot as plt\n'), ((8574, 8599), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""apnn output"""'], {}), "('apnn output')\n", (8584, 8599), True, 'from matplotlib import pyplot as plt\n'), ((8657, 8670), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {}), '(6)\n', (8667, 8670), True, 'from matplotlib import pyplot as plt\n'), ((8775, 8812), 'matplotlib.pyplot.title', 'plt.title', (['"""y_dqn_out_single_episode"""'], {}), "('y_dqn_out_single_episode')\n", (8784, 8812), True, 'from matplotlib import pyplot as plt\n'), ((8817, 8838), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplot'], {}), '(imgplot)\n', (8829, 8838), True, 'from matplotlib import pyplot as plt\n'), ((8843, 8862), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (8853, 8862), True, 'from matplotlib import pyplot as plt\n'), ((8867, 8891), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dqn output"""'], {}), "('dqn output')\n", (8877, 8891), True, 'from matplotlib import pyplot as plt\n'), ((8900, 8914), 'matplotlib.pyplot.figure', 'plt.figure', (['(20)'], {}), '(20)\n', (8910, 8914), True, 'from matplotlib import pyplot as plt\n'), ((9026, 9069), 'matplotlib.pyplot.title', 'plt.title', (['"""conv_layers_out_single_episode"""'], {}), "('conv_layers_out_single_episode')\n", (9035, 9069), True, 'from matplotlib import pyplot as plt\n'), ((9074, 9095), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplot'], {}), '(imgplot)\n', (9086, 9095), True, 'from matplotlib import pyplot as plt\n'), ((9100, 9119), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (9110, 9119), True, 'from matplotlib import pyplot as plt\n'), ((9124, 9148), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dqn output"""'], {}), "('dqn output')\n", (9134, 9148), True, 'from matplotlib import pyplot as plt\n'), ((9157, 9170), 'matplotlib.pyplot.figure', 'plt.figure', (['(7)'], {}), '(7)\n', (9167, 9170), True, 'from matplotlib import pyplot as plt\n'), ((9313, 9338), 'matplotlib.pyplot.title', 'plt.title', (['"""taken action"""'], {}), "('taken action')\n", (9322, 9338), True, 'from matplotlib import pyplot as plt\n'), ((9343, 9364), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplot'], {}), '(imgplot)\n', (9355, 9364), True, 'from matplotlib import pyplot as plt\n'), ((9369, 9388), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (9379, 9388), True, 'from matplotlib import pyplot as plt\n'), ((9670, 9680), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9678, 9680), True, 'from matplotlib import pyplot as plt\n'), ((1472, 1503), 'numpy.concatenate', 'np.concatenate', (['(big_matrix, y)'], {}), '((big_matrix, y))\n', (1486, 1503), True, 'import numpy as np\n'), ((7121, 7139), 'matplotlib.pyplot.figure', 'plt.figure', (['(10 + i)'], {}), '(10 + i)\n', (7131, 7139), True, 'from matplotlib import pyplot as plt\n'), ((7298, 7321), 'matplotlib.pyplot.title', 'plt.title', (['"""y_apnn_out"""'], {}), "('y_apnn_out')\n", (7307, 7321), True, 'from matplotlib import pyplot as plt\n'), ((7330, 7365), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""houndreds of epsiodes"""'], {}), "('houndreds of epsiodes')\n", (7340, 7365), True, 'from matplotlib import pyplot as plt\n'), ((9458, 9476), 'matplotlib.pyplot.figure', 'plt.figure', (['(14 + i)'], {}), '(14 + i)\n', (9468, 9476), True, 'from matplotlib import pyplot as plt\n'), ((9554, 9584), 'matplotlib.pyplot.title', 'plt.title', (['"""elgibility traces"""'], {}), "('elgibility traces')\n", (9563, 9584), True, 'from matplotlib import pyplot as plt\n'), ((9593, 9612), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (9603, 9612), True, 'from matplotlib import pyplot as plt\n'), ((637, 666), 'numpy.load', 'np.load', (["(DIR + '/' + filename)"], {}), "(DIR + '/' + filename)\n", (644, 666), True, 'import numpy as np\n'), ((7171, 7217), 'numpy.transpose', 'np.transpose', (['big_matrix_apnn_weights[i, :, :]'], {}), '(big_matrix_apnn_weights[i, :, :])\n', (7183, 7217), True, 'import numpy as np\n'), ((7250, 7290), 'numpy.transpose', 'np.transpose', (['big_matrix_reward[0, :, :]'], {}), '(big_matrix_reward[0, :, :])\n', (7262, 7290), True, 'import numpy as np\n'), ((9499, 9546), 'numpy.transpose', 'np.transpose', (['single_elgibility_traces[i, :, :]'], {}), '(single_elgibility_traces[i, :, :])\n', (9511, 9546), True, 'import numpy as np\n'), ((318, 333), 'os.listdir', 'os.listdir', (['DIR'], {}), '(DIR)\n', (328, 333), False, 'import os\n'), ((2566, 2581), 'os.listdir', 'os.listdir', (['DIR'], {}), '(DIR)\n', (2576, 2581), False, 'import os\n'), ((352, 375), 'os.path.join', 'os.path.join', (['DIR', 'name'], {}), '(DIR, name)\n', (364, 375), False, 'import os\n'), ((2600, 2623), 'os.path.join', 'os.path.join', (['DIR', 'name'], {}), '(DIR, name)\n', (2612, 2623), False, 'import os\n')] |
#!/usr/bin/env python
# integeral.py
import numpy as num
def integral(x,y):
"""
ROUTINE: INTEGRAL
USEAGE: RESULT = INTEGRAL( X, Y )
PURPOSE: Integrate tabulated data using Simpson's rule
with 3-point Lagragian interpolation. Data may be
regularly sampled in X, or irregularly sampled.
INPUT:
X Vector of x axis points.
(Elements must be unique and monotonically increasing)
Y Vector of corresponding Y axis points.
KEYWORD_INPUT: None.
OUTPUT: Result of integration.
EXAMPLE:
Example 1:
Define 11 x-values on the closed interval [0.0 , 0.8].
X = [ 0.0, .12, .22, .32, .36, .40, .44, .54, .64, .70, .80 ]
Define 11 f-values corresponding to x(i).
F = [ 0.200000, 1.30973, 1.30524, 1.74339, 2.07490, 2.45600, $
2.84299, 3.50730, 3.18194, 2.36302, 0.231964 ]
Compute the integral.
RESULT = INTEGRAL( X, F )
In this example, the f-values are generated from a known function,
(f = .2 + 25*x - 200*x^2 + 675*x^3 - 900*x^4 + 400*x^5)
The Multiple Application Trapezoid Method yields; result = 1.5648
The Multiple Application Simpson's Method yields; result = 1.6036
IDL User Library INT_TABULATED.PRO yields; result = 1.6232
INTEGRAL.PRO yields; result = 1.6274
The Exact Solution (4 decimal accuracy) yields; result = 1.6405
AUTHOR: <NAME>, CIMSS/SSEC (<EMAIL>)
Based on a FORTRAN-77 version by <NAME>, CIMSS/SSEC
22-DEC-95
REVISIONS: None.
"""
n = x.size
x0 = x[0:n-2]
x1 = x[1:n-1]
x2 = x[2:n-0]
y0 = y[0:n-2]
y1 = y[1:n-1]
y2 = y[2:n-0]
#
# compute interpolation delta and midpoint arrays
#
dx = x1-x0
xmid = 0.5*(x1+x0)
#
# compute 3 point lagrange interpolation
#
l0 = ((xmid-x1)/(x0-x1))*((xmid-x2)/(x0-x2))
l1 = ((xmid-x0)/(x1-x0))*((xmid-x2)/(x1-x2))
l2 = ((xmid-x0)/(x2-x0))*((xmid-x1)/(x2-x1))
ymid = y0*l0 + y1*l1 + y2*l2;
#
# compute integral sum
#
integ = sum(1.0/6.0*dx*(y0+4.0*ymid+y1))
#
# handle last 3 points similarly
#
x0 = x[n-3]
x1 = x[n-2]
x2 = x[n-1]
y0 = y[n-3]
y1 = y[n-2]
y2 = y[n-1]
dx = x2 - x1
xmid = 0.5*(x2+x1)
l0 = ((xmid-x1)/(x0-x1))*((xmid-x2)/(x0-x2))
l1 = ((xmid-x0)/(x1-x0))*((xmid-x2)/(x1-x2))
l2 = ((xmid-x0)/(x2-x0))*((xmid-x1)/(x2-x1))
ymid = y0*l0 + y1*l1 + y2*l2;
integ = integ + 1.0/6.0*dx*(y1+4.0*ymid+y2)
return integ
if __name__ == '__main__':
print(integral.__doc__)
X = num.array((0.0, .12, .22, .32, .36, .40, .44, .54, .64, .70, .80))
Y = num.array((0.200000, 1.30973, 1.30524, 1.74339, 2.07490, 2.45600,
2.84299, 3.50730, 3.18194, 2.36302, 0.231964))
i = integral(X,Y)
print(i)
| [
"numpy.array"
] | [((2625, 2698), 'numpy.array', 'num.array', (['(0.0, 0.12, 0.22, 0.32, 0.36, 0.4, 0.44, 0.54, 0.64, 0.7, 0.8)'], {}), '((0.0, 0.12, 0.22, 0.32, 0.36, 0.4, 0.44, 0.54, 0.64, 0.7, 0.8))\n', (2634, 2698), True, 'import numpy as num\n'), ((2698, 2806), 'numpy.array', 'num.array', (['(0.2, 1.30973, 1.30524, 1.74339, 2.0749, 2.456, 2.84299, 3.5073, 3.18194, \n 2.36302, 0.231964)'], {}), '((0.2, 1.30973, 1.30524, 1.74339, 2.0749, 2.456, 2.84299, 3.5073, \n 3.18194, 2.36302, 0.231964))\n', (2707, 2806), True, 'import numpy as num\n')] |
import numpy as np
import pandas as pd
import os
class polar_h10_running_wrangler:
'''
Wrangles running data
Note that it considers anything slower than 60 min/mi as still.
'''
def __init__(self, filepath):
self.filepath = filepath
self.meta_df = self.wrangle_meta_df()
self.data_df = self.wrangle_data_df()
def wrangle_meta_df(self):
"""
Extracts and wrangle session metadata
"""
meta_df = pd.read_csv(self.filepath)[:1]
meta_df.dropna(axis=1, inplace=True)
meta_df['Date'] = pd.to_datetime(meta_df['Date'], format='%d-%m-%Y')
meta_df['Start time'] = pd.to_datetime(
meta_df['Start time'], infer_datetime_format=True)
meta_df['Duration'] = pd.to_timedelta(meta_df['Duration'])
meta_df.drop(columns=['Date'], inplace=True)
renaming_dict = {'Start time': 'Start Datetime'}
meta_df.rename(columns=renaming_dict, inplace=True)
meta_df.loc[0, 'Sport'] = meta_df.loc[0, 'Sport'].title()
meta_df.loc[0, 'Name'] = meta_df.loc[0, 'Name'].title()
return meta_df
def wrangle_data_df(self, pace_threshold=75):
'''
Extracts and wrangles the session data
'''
data_df = pd.read_csv(self.filepath, header=2)
data_df.dropna(axis=1, inplace=True)
data_df['Pace (min/mi)'] = '00:' + data_df['Pace (min/mi)']
data_df['Pace (min/mi)'] = pd.to_timedelta(
data_df['Pace (min/mi)']
).dt.total_seconds() / 60
data_df['Pace (min/mi)'] = np.round(
data_df['Pace (min/mi)'], decimals=1
)
data_df[data_df['Pace (min/mi)'] > pace_threshold] = 0
data = np.full(shape=data_df.index.shape,
fill_value=self.get_start_datetime())
start_datetime_series = pd.Series(data=data, index=data_df.index)
data_df['Time'] = pd.to_timedelta(
data_df['Time']) + start_datetime_series
data_df.set_index('Time', inplace=True)
return data_df
def get_activity(self):
activity = self.meta_df.loc[0, 'Sport'].lower()
return activity
def get_name(self):
name = self.meta_df.loc[0, 'Name'].replace(' ', '_').lower()
return name
def get_start_datetime(self):
start_datetime = self.meta_df.loc[0, 'Start Datetime']
return start_datetime
def save_wrangled_data(self):
'''
Saves the session data. Format is:
<date>_<start_time>_<activity>_<last_name>_<first_name>
'''
start_dt_str = self.get_start_datetime().strftime('%Y-%m-%d_%H:%M')
activity = self.get_activity()
name = self.get_name()
save_filename = '{}_{}_{}.csv'.format(start_dt_str, activity, name)
filepath = os.path.join('..', 'data', 'wrangled_data', save_filename)
self.data_df.to_csv(filepath)
| [
"pandas.Series",
"pandas.to_timedelta",
"pandas.read_csv",
"pandas.to_datetime",
"os.path.join",
"numpy.round"
] | [((579, 629), 'pandas.to_datetime', 'pd.to_datetime', (["meta_df['Date']"], {'format': '"""%d-%m-%Y"""'}), "(meta_df['Date'], format='%d-%m-%Y')\n", (593, 629), True, 'import pandas as pd\n'), ((662, 727), 'pandas.to_datetime', 'pd.to_datetime', (["meta_df['Start time']"], {'infer_datetime_format': '(True)'}), "(meta_df['Start time'], infer_datetime_format=True)\n", (676, 727), True, 'import pandas as pd\n'), ((771, 807), 'pandas.to_timedelta', 'pd.to_timedelta', (["meta_df['Duration']"], {}), "(meta_df['Duration'])\n", (786, 807), True, 'import pandas as pd\n'), ((1275, 1311), 'pandas.read_csv', 'pd.read_csv', (['self.filepath'], {'header': '(2)'}), '(self.filepath, header=2)\n', (1286, 1311), True, 'import pandas as pd\n'), ((1586, 1632), 'numpy.round', 'np.round', (["data_df['Pace (min/mi)']"], {'decimals': '(1)'}), "(data_df['Pace (min/mi)'], decimals=1)\n", (1594, 1632), True, 'import numpy as np\n'), ((1864, 1905), 'pandas.Series', 'pd.Series', ([], {'data': 'data', 'index': 'data_df.index'}), '(data=data, index=data_df.index)\n', (1873, 1905), True, 'import pandas as pd\n'), ((2835, 2893), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""wrangled_data"""', 'save_filename'], {}), "('..', 'data', 'wrangled_data', save_filename)\n", (2847, 2893), False, 'import os\n'), ((475, 501), 'pandas.read_csv', 'pd.read_csv', (['self.filepath'], {}), '(self.filepath)\n', (486, 501), True, 'import pandas as pd\n'), ((1933, 1965), 'pandas.to_timedelta', 'pd.to_timedelta', (["data_df['Time']"], {}), "(data_df['Time'])\n", (1948, 1965), True, 'import pandas as pd\n'), ((1462, 1503), 'pandas.to_timedelta', 'pd.to_timedelta', (["data_df['Pace (min/mi)']"], {}), "(data_df['Pace (min/mi)'])\n", (1477, 1503), True, 'import pandas as pd\n')] |
""" keywords
Code to check convergence.
P.g_convergence = uod.get('G_CONVERGENCE', 'QCHEM')
P.max_force_g_convergence = uod.get('MAX_FORCE_G_CONVERGENCE', 3.0e-4)
P.rms_force_g_convergence = uod.get('RMS_FORCE_G_CONVERGENCE', 3.0e-4)
P.max_energy_g_convergence = uod.get('MAX_ENERGY_G_CONVERGENCE', 1.0e-6)
P.max_disp_g_convergence = uod.get('MAX_DISP_G_CONVERGENCE', 1.2e-3)
P.rms_disp_g_convergence = uod.get('RMS_DISP_G_CONVERGENCE', 1.2e-3)
P.flexible_g_convergence = uod.get('FLEXIBLE_G_CONVERGENCE', False)
"""
from .printTools import print_opt, printArray, printMat
import numpy as np
from . import optParams as op
from math import fabs
from .linearAlgebra import absMax, rms
from .intcosMisc import Gmat, Bmat, qValues
# Check convergence criteria and print status to output file.
# return True, if geometry is optimized
# By default, checks maximum force and (Delta(E) or maximum disp)
def convCheck(iterNum, Molsys, dq, f, energies, qPivot=None, masses=None):
max_disp = absMax(dq)
rms_disp = rms(dq)
Nintco = len(Molsys.intcos)
has_fixed = any([ints.fixedEqVal for ints in Molsys.intcos])
energy = energies[-1]
last_energy = energies[-2] if len(energies) > 1 else 0.0
# if op.Params.opt_type == 'IRC'
# if ircData.go: return True
# Save original forces and put back in below.
if op.Params.opt_type == 'IRC' or has_fixed:
f_backup = np.copy(f)
DE = energy - last_energy
# Remove arbitrary forces for user-specified equilibrium values.
if has_fixed:
print_opt(
"\tForces used to impose fixed constraints are not included in convergence check.\n"
)
for i, ints in enumerate(Molsys.intcos):
if ints.fixedEqVal:
f[i] = 0
if op.Params.opt_type == 'IRC':
G = Gmat(Molsys.intcos, Molsys.geom, masses)
B = Bmat(Molsys.intcos, Molsys.geom, masses)
Ginv = np.linalg.inv(G)
# compute p_m, mass-weighted hypersphere vector
q_pivot = qPivot
x = Molsys.geom
print("B matrix")
print(B)
print("geom")
print(x)
q = qValues(Molsys.intcos, Molsys.geom)
#q = np.dot(Ginv, np.dot(B, np.dot(np.identity(Molsys.Natom * 3), x)))
print("q")
print(q)
print("q-pivot")
print(q_pivot)
p = np.subtract(q, q_pivot)
# gradient perpendicular to p and tangent to hypersphere is:
# g_m' = g_m - (g_m^t p_m / p_m^t p_m) p_m, or
# g' = g - (g^t p / (p^t G^-1 p)) G^-1 p
#Ginv_p = np.array(Nintco, float)
for i in range(Nintco):
Ginv_p = np.dot(Ginv, p)
overlap = np.dot(f, p) / np.dot(p, Ginv_p)
for i in range(Nintco):
f[i] -= overlap * Ginv_p[i]
if op.Params.print_lvl > 1:
print_opt("Forces perpendicular to hypersphere.\n")
printArray(f)
# Compute forces after projection and removal above.
max_force = absMax(f)
rms_force = rms(f)
if op.Params.opt_type != 'IRC':
print_opt("\n ==> Convergence Check <==\n\n")
print_opt(" Measures of convergence in internal coordinates in au.\n")
print_opt(
" Criteria marked as inactive (o), active & met (*), and active & unmet ( ).\n"
)
print_opt(
" ---------------------------------------------------------------------------------------------"
)
if iterNum == 0: print_opt(" ~\n")
else: print_opt("\n")
print_opt(
" Step Total Energy Delta E MAX Force RMS Force MAX Disp RMS Disp "
)
if iterNum == 0: print_opt(" ~\n")
else: print_opt("\n")
print_opt(
" ---------------------------------------------------------------------------------------------"
)
if iterNum == 0: print_opt(" ~\n")
else: print_opt("\n")
print_opt(" Convergence Criteria")
if op.Params.i_max_DE: print_opt(" %10.2e %1s" % (op.Params.conv_max_DE, "*"))
else: print_opt(" %1s" % "o")
if op.Params.i_max_force:
print_opt(" %10.2e %1s" % (op.Params.conv_max_force, "*"))
else:
print_opt(" %1s" % "o")
if op.Params.i_rms_force:
print_opt(" %10.2e %1s" % (op.Params.conv_rms_force, "*"))
else:
print_opt(" %1s" % "o")
if op.Params.i_max_disp:
print_opt(" %10.2e %1s" % (op.Params.conv_max_disp, "*"))
else:
print_opt(" %1s" % "o")
if op.Params.i_rms_disp:
print_opt(" %10.2e %1s" % (op.Params.conv_rms_disp, "*"))
else:
print_opt(" %1s" % "o")
if iterNum == 0: print_opt(" ~\n")
else: print_opt("\n")
print_opt(
" ---------------------------------------------------------------------------------------------"
)
if iterNum == 0: print_opt(" ~\n")
else: print_opt("\n")
print_opt(
" %4d %16.8f %10.2e %1s %10.2e %1s %10.2e %1s %10.2e %1s %10.2e %1s ~\n"
% (iterNum + 1, energy, DE, ('*' if fabs(DE) < op.Params.conv_max_DE else "")
if op.Params.i_max_DE else 'o', max_force,
('*' if fabs(max_force) < op.Params.conv_max_force else "")
if op.Params.i_max_force else 'o', rms_force,
('*' if fabs(rms_force) < op.Params.conv_rms_force else "")
if op.Params.i_rms_force else 'o', max_disp,
('*' if fabs(max_disp) < op.Params.conv_max_disp else "")
if op.Params.i_max_disp else 'o', rms_disp,
('*' if fabs(rms_disp) < op.Params.conv_rms_disp else "")
if op.Params.i_rms_disp else 'o'))
print_opt(
" ---------------------------------------------------------------------------------------------\n\n"
)
#
# Return forces to what they were when conv_check was called
if op.Params.opt_type == 'IRC' or has_fixed:
f[:] = f_backup
# The requirement of i_untampered means that if a user explicitly adds any of the
# 5 indiv. criteria on top of G_CONVERGENCE, it is required to be met.
# forces and either energy change or displacement met, convergence!
if op.Params.i_untampered and \
( op.Params.g_convergence == 'QCHEM' or op.Params.g_convergence == "MOLPRO" ) and \
max_force < op.Params.conv_max_force and \
( fabs(DE) < op.Params.conv_max_DE or max_disp < op.Params.conv_max_disp):
return True
# if max/rms forces/disp met or flat potential forces met, convergence!
if op.Params.i_untampered and \
( op.Params.g_convergence == "GAU" or op.Params.g_convergence == "GAU_TIGHT" or
op.Params.g_convergence == 'GAU_VERYTIGHT' or op.Params.g_convergence == 'GAU_LOOSE') and \
((max_force < op.Params.conv_max_force and
rms_force < op.Params.conv_rms_force and
max_disp < op.Params.conv_max_disp and
rms_disp < op.Params.conv_rms_disp) or
100 * rms_force < op.Params.conv_rms_force):
return True
# if criterion not active or criterion met, convergence!
if (not op.Params.i_max_DE or fabs(DE) < op.Params.conv_max_DE) and \
(not op.Params.i_max_force or max_force < op.Params.conv_max_force) and \
(not op.Params.i_rms_force or rms_force < op.Params.conv_rms_force) and \
(not op.Params.i_max_disp or max_disp < op.Params.conv_max_disp) and \
(not op.Params.i_rms_disp or rms_disp < op.Params.conv_rms_disp) :
return True
if op.Params.opt_type == 'IRC' and \
(not op.Params.i_max_DE or fabs(DE) < op.Params.conv_max_DE) and \
(not op.Params.i_max_disp or max_disp < op.Params.conv_max_disp) and \
(not op.Params.i_rms_disp or rms_disp < op.Params.conv_rms_disp):
return True
return False
| [
"numpy.copy",
"numpy.subtract",
"numpy.dot",
"numpy.linalg.inv",
"math.fabs"
] | [((1407, 1417), 'numpy.copy', 'np.copy', (['f'], {}), '(f)\n', (1414, 1417), True, 'import numpy as np\n'), ((1927, 1943), 'numpy.linalg.inv', 'np.linalg.inv', (['G'], {}), '(G)\n', (1940, 1943), True, 'import numpy as np\n'), ((2355, 2378), 'numpy.subtract', 'np.subtract', (['q', 'q_pivot'], {}), '(q, q_pivot)\n', (2366, 2378), True, 'import numpy as np\n'), ((2652, 2667), 'numpy.dot', 'np.dot', (['Ginv', 'p'], {}), '(Ginv, p)\n', (2658, 2667), True, 'import numpy as np\n'), ((2687, 2699), 'numpy.dot', 'np.dot', (['f', 'p'], {}), '(f, p)\n', (2693, 2699), True, 'import numpy as np\n'), ((2702, 2719), 'numpy.dot', 'np.dot', (['p', 'Ginv_p'], {}), '(p, Ginv_p)\n', (2708, 2719), True, 'import numpy as np\n'), ((6588, 6596), 'math.fabs', 'fabs', (['DE'], {}), '(DE)\n', (6592, 6596), False, 'from math import fabs\n'), ((7387, 7395), 'math.fabs', 'fabs', (['DE'], {}), '(DE)\n', (7391, 7395), False, 'from math import fabs\n'), ((7844, 7852), 'math.fabs', 'fabs', (['DE'], {}), '(DE)\n', (7848, 7852), False, 'from math import fabs\n'), ((5255, 5263), 'math.fabs', 'fabs', (['DE'], {}), '(DE)\n', (5259, 5263), False, 'from math import fabs\n'), ((5378, 5393), 'math.fabs', 'fabs', (['max_force'], {}), '(max_force)\n', (5382, 5393), False, 'from math import fabs\n'), ((5514, 5529), 'math.fabs', 'fabs', (['rms_force'], {}), '(rms_force)\n', (5518, 5529), False, 'from math import fabs\n'), ((5649, 5663), 'math.fabs', 'fabs', (['max_disp'], {}), '(max_disp)\n', (5653, 5663), False, 'from math import fabs\n'), ((5781, 5795), 'math.fabs', 'fabs', (['rms_disp'], {}), '(rms_disp)\n', (5785, 5795), False, 'from math import fabs\n')] |
import numpy as np
from aoc.challenge_base import ChallengeBase
class Challenge(ChallengeBase):
"""
Day 18 challenges
"""
def parse_input(self):
"""
Parse input lines
"""
# Pad acre edges by 1 so we can quickly look up things without worrying about edge effects
self.acres = np.zeros((52, 52), dtype=np.int8)
# 1 for open, 2 for tree, 3 for lumberyard
acre_types = {".": 1, "|": 2, "#": 3}
for y, line in enumerate(self.lines):
for x, char in enumerate(line.strip()):
self.acres[y + 1, x + 1] = acre_types[char]
def run_time_step(self):
"""
Run a single time step, updating the map
"""
next_map = np.copy(self.acres)
for y in range(self.acres.shape[0] - 2):
for x in range(self.acres.shape[1] - 2):
local_patch = self.acres[y:y+3, x:x+3]
central_point = self.acres[y+1, x+1]
if central_point == 1:
# Open ground. Becomes trees if >= 3 adjacent trees.
if np.count_nonzero(local_patch == 2) >= 3:
next_map[y+1, x+1] = 2
elif central_point == 2:
# Trees. Becomes lumberyard if >= 3 adjacent lumberyards.
if np.count_nonzero(local_patch == 3) >= 3:
next_map[y+1, x+1] = 3
else:
# Lumberyard. Becomes open unless adjacent to >= 1 lumberyard and >= 1 tree.
if np.count_nonzero(local_patch == 2) == 0 or np.count_nonzero(local_patch == 3) <= 1:
next_map[y+1, x+1] = 1
# Do the time tick!
self.acres = next_map
def challenge1(self):
"""
Day 18 challenge 1
"""
self.parse_input()
# Start the magical acre-evolution
for _ in range(10):
self.run_time_step()
# Print the final resource value
resource_value = np.count_nonzero(self.acres == 2) * np.count_nonzero(self.acres == 3)
print(f"Final resource value: {resource_value}")
def challenge2(self):
"""
Day 18 challenge 2
"""
self.parse_input()
# This time, record resource values at each time point
with open("out.txt", "wt") as f:
for minute in range(1000):
self.run_time_step()
resource_value = np.count_nonzero(self.acres == 2) * np.count_nonzero(self.acres == 3)
print(f"{minute},{resource_value}", file=f)
# Looking at this, we have a repeating pattern:
# 970,205900
# 971,208080
# 972,207000
# 973,202842
# ...
# 996,202650
# 997,205545
# then back to the start. I.e. period of 28 and we know the values in there.
# (1000000000 - 970) % 28 == 2
# so answer is 208080
print("Resource value after 1000000000 time steps: 208080")
| [
"numpy.count_nonzero",
"numpy.copy",
"numpy.zeros"
] | [((333, 366), 'numpy.zeros', 'np.zeros', (['(52, 52)'], {'dtype': 'np.int8'}), '((52, 52), dtype=np.int8)\n', (341, 366), True, 'import numpy as np\n'), ((745, 764), 'numpy.copy', 'np.copy', (['self.acres'], {}), '(self.acres)\n', (752, 764), True, 'import numpy as np\n'), ((2038, 2071), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.acres == 2)'], {}), '(self.acres == 2)\n', (2054, 2071), True, 'import numpy as np\n'), ((2074, 2107), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.acres == 3)'], {}), '(self.acres == 3)\n', (2090, 2107), True, 'import numpy as np\n'), ((2484, 2517), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.acres == 2)'], {}), '(self.acres == 2)\n', (2500, 2517), True, 'import numpy as np\n'), ((2520, 2553), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.acres == 3)'], {}), '(self.acres == 3)\n', (2536, 2553), True, 'import numpy as np\n'), ((1111, 1145), 'numpy.count_nonzero', 'np.count_nonzero', (['(local_patch == 2)'], {}), '(local_patch == 2)\n', (1127, 1145), True, 'import numpy as np\n'), ((1341, 1375), 'numpy.count_nonzero', 'np.count_nonzero', (['(local_patch == 3)'], {}), '(local_patch == 3)\n', (1357, 1375), True, 'import numpy as np\n'), ((1571, 1605), 'numpy.count_nonzero', 'np.count_nonzero', (['(local_patch == 2)'], {}), '(local_patch == 2)\n', (1587, 1605), True, 'import numpy as np\n'), ((1614, 1648), 'numpy.count_nonzero', 'np.count_nonzero', (['(local_patch == 3)'], {}), '(local_patch == 3)\n', (1630, 1648), True, 'import numpy as np\n')] |
#from POPS_lib.fileIO import read_Calibration_fromFile,read_Calibration_fromString,save_Calibration
#import fileIO
from scipy.interpolate import UnivariateSpline
import numpy as np
import pylab as plt
from io import StringIO as io
import pandas as pd
import warnings
#read_fromFile = fileIO.read_Calibration_fromFile
#read_fromString = fileIO.read_Calibration_fromString
def _msg(txt, save, out_file, verbose):
if verbose:
print(txt)
if save:
out_file.write(str(txt) + '\n')
def get_interface_bins(fname, n_bins, imin=1.4, imax=4.8, save=False, verbose = True):
"""Prints the bins assosiated with what is seen on the POPS user interface and the serial output, respectively.
Parameters
----------
fname: string or calibration instance
name of file containing a calibration or a calibration instance it self
n_bins: int
number of bins
imin: float [1.4], optional
log10 of the minimum value considered (digitizer bins)
imax: float [4.8], optional
log10 of the maximum value considered (digitizer bins)
save: bool or string.
If result is saved into file given by string.
Returns
-------
matplotlib axes instance
pandas DataFrame instance
"""
if isinstance(fname, str):
cal = read_csv(fname)
else:
cal = fname
bin_ed = np.linspace(imin, imax, n_bins + 1)
bin_center_log = 10 ** ((bin_ed[:-1] + bin_ed[1:]) / 2.)
bin_center_lin = ((10 ** bin_ed[:-1] + 10 ** bin_ed[1:]) / 2.)
bin_ed = 10 ** bin_ed
bin_ed_cal = cal.calibrationFunction(bin_ed)
bin_center_lin_cal = cal.calibrationFunction(bin_center_lin)
bin_center_log_cal = cal.calibrationFunction(bin_center_log)
if save:
save_file = open(save, 'w')
else:
save_file = False
txt = '''
bin edges (digitizer bins)
--------------------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_ed):
_msg(i, save, save_file, verbose)
# bin_center_cal = cal.calibrationFunction(bin_center)
txt = '''
bin centers (digitizer bins)
----------------------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_center_lin):
_msg(i, save, save_file, verbose)
txt = '''
bin centers of logarithms (digitizer bins)
----------------------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_center_log):
_msg(i, save, save_file, verbose)
txt = '''
bin edges (nm)
--------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_ed_cal):
_msg(i, save, save_file, verbose)
# bin_center_cal = cal.calibrationFunction(bin_center)
txt = '''
bin centers (nm)
----------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_center_lin_cal):
_msg(i, save, save_file, verbose)
txt = '''
bin centers of logarithms (nm)
----------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_center_log_cal):
_msg(i, save, save_file, verbose)
out = {}
df_bin_c = pd.DataFrame(bin_center_lin_cal, index=bin_center_log, columns=['Bin_centers'])
df_bin_e = pd.DataFrame(bin_ed_cal, index = bin_ed, columns = ['Bin_edges'])
# a = df.Bin_centers.plot()
if verbose:
f, a = plt.subplots()
d = df_bin_c.Bin_centers.values[1:-1]
g, = a.plot(np.arange(len(d)) + 2, d)
g.set_linestyle('')
g.set_marker('o')
# g.set_label('')
a.set_yscale('log')
a.set_xlim((1, 16))
a.set_ylim((100, 3000))
a.set_ylabel('Bin center (nm)')
a.grid(which='both')
a.set_xlabel('POPS bin')
out['axes'] = a
else:
out['axes'] = None
# a.set_title('Bin')
out['bincenters_v_int'] = df_bin_c
out['binedges_v_int'] = df_bin_e
return out
def _string2Dataframe(data, log=True):
sb = io(data)
dataFrame = pd.read_csv(sb, sep = ' ', names = ('d','amp')).sort('d')
if log:
dataFrame.amp = 10 ** dataFrame.amp
return dataFrame
def read_str(data, log=True):
'''Read a calibration table from string.
Arguments
---------
data: string.
Multiline string with a diameter-intensity pair seperated by space. Diameter in nm, intensity in digitizer bin
or log_10(digitizer bins).
log: bool, optional.
Set True if the intensity values are given in log_10(digitizer bins).
Example
-------
data = """140 88
150 102
173 175
200 295
233 480
270 740
315 880
365 1130
420 1350
490 1930
570 3050
660 4200
770 5100
890 6300
1040 8000
1200 8300
1400 10000
1600 11500
1880 16000
2180 21000
2500 28000s
3000 37000"""
read_str(data, log = False)
'''
dataFrame = _string2Dataframe(data, log=log)
calibrationInstance = calibration(dataFrame)
return calibrationInstance
def read_csv(fname):
""" most likely found here"""
calDataFrame = pd.read_csv(fname)
calibrationInstance = calibration(calDataFrame)
return calibrationInstance
def save_Calibration(calibrationInstance, fname):
"""should be saved hier cd ~/data/POPS_calibrations/"""
calibrationInstance.data.to_csv(fname, index = False)
return
class calibration:
def __init__(self,dataTabel):
self.data = dataTabel
self.calibrationFunction = self.get_calibrationFunctionSpline()
def get_interface_bins(self, n_bins, imin=1.4, imax=4.8, save=False, verbose = False):
out = get_interface_bins(self, n_bins, imin=imin, imax=imax, save=save, verbose = verbose)
return out
def save_csv(self,fname):
save_Calibration(self,fname)
return
def get_calibrationFunctionSpline(self, fitOrder = 1):# = 1, noOfPts = 500, plot = False):
"""
Performes a spline fit/smoothening (scipy.interpolate.UnivariateSpline) of d over amp (yes this way not the other way around).
Returns (generates): creates a function self.spline which can later be used to calculate d from amp
Optional Parameters:
\t s: int - oder of the spline function
\t noOfPts: int - length of generated graph
\t plot: boolean - if result is supposed to be plotted
"""
# The following two step method is necessary to get a smooth curve.
#When I only do the second step on the cal_curve I get some wired whiggles
##### First Step
if (self.data.amp.values[1:]-self.data.amp.values[:-1]).min() < 0:
warnings.warn('The data represent a non injective function! This will not work. plot the calibration to see what I meen')
fitOrder = 1
sf = UnivariateSpline(self.data.d.values, self.data.amp.values, s=fitOrder)
d = np.logspace(np.log10(self.data.d.values.min()), np.log10(self.data.d.values.max()), 500)
amp = sf(d)
##### second step
cal_function = UnivariateSpline(amp, d, s=fitOrder)
return cal_function
def plot_calibration(self):
"""Plots the calibration function and data
Arguments
------------
cal: calibration instance
Returns
------------
figure
axes
calibration data graph
calibration function graph
"""
cal_function = self.calibrationFunction
amp = np.logspace(np.log10(self.data.amp.min()), np.log10(self.data.amp.max()), 500)
d = cal_function(amp)
f,a = plt.subplots()
cal_data, = a.plot(self.data.d, self.data.amp, 'o',label = 'data',)
cal_func, = a.plot(d,amp, label = 'function')
a.loglog()
a.set_xlim(0.9*self.data.d.min(), 1.1*self.data.d.max())
a.set_xlabel('Diameter (nm)')#($\mu$m)')
a.set_ylim(0.9*self.data.amp.min(), 1.1*self.data.amp.max())
a.set_ylabel('Amplitude (digitizer bins)')
a.set_title('Calibration curve')
a.legend(loc = 2)
return f,a,cal_data, cal_func | [
"pandas.read_csv",
"numpy.linspace",
"warnings.warn",
"scipy.interpolate.UnivariateSpline",
"pandas.DataFrame",
"pylab.subplots",
"io.StringIO"
] | [((1366, 1401), 'numpy.linspace', 'np.linspace', (['imin', 'imax', '(n_bins + 1)'], {}), '(imin, imax, n_bins + 1)\n', (1377, 1401), True, 'import numpy as np\n'), ((3122, 3201), 'pandas.DataFrame', 'pd.DataFrame', (['bin_center_lin_cal'], {'index': 'bin_center_log', 'columns': "['Bin_centers']"}), "(bin_center_lin_cal, index=bin_center_log, columns=['Bin_centers'])\n", (3134, 3201), True, 'import pandas as pd\n'), ((3217, 3278), 'pandas.DataFrame', 'pd.DataFrame', (['bin_ed_cal'], {'index': 'bin_ed', 'columns': "['Bin_edges']"}), "(bin_ed_cal, index=bin_ed, columns=['Bin_edges'])\n", (3229, 3278), True, 'import pandas as pd\n'), ((3954, 3962), 'io.StringIO', 'io', (['data'], {}), '(data)\n', (3956, 3962), True, 'from io import StringIO as io\n'), ((5072, 5090), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (5083, 5090), True, 'import pandas as pd\n'), ((3347, 3361), 'pylab.subplots', 'plt.subplots', ([], {}), '()\n', (3359, 3361), True, 'import pylab as plt\n'), ((6826, 6896), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['self.data.d.values', 'self.data.amp.values'], {'s': 'fitOrder'}), '(self.data.d.values, self.data.amp.values, s=fitOrder)\n', (6842, 6896), False, 'from scipy.interpolate import UnivariateSpline\n'), ((7072, 7108), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['amp', 'd'], {'s': 'fitOrder'}), '(amp, d, s=fitOrder)\n', (7088, 7108), False, 'from scipy.interpolate import UnivariateSpline\n'), ((7664, 7678), 'pylab.subplots', 'plt.subplots', ([], {}), '()\n', (7676, 7678), True, 'import pylab as plt\n'), ((3979, 4023), 'pandas.read_csv', 'pd.read_csv', (['sb'], {'sep': '""" """', 'names': "('d', 'amp')"}), "(sb, sep=' ', names=('d', 'amp'))\n", (3990, 4023), True, 'import pandas as pd\n'), ((6659, 6790), 'warnings.warn', 'warnings.warn', (['"""The data represent a non injective function! This will not work. plot the calibration to see what I meen"""'], {}), "(\n 'The data represent a non injective function! This will not work. plot the calibration to see what I meen'\n )\n", (6672, 6790), False, 'import warnings\n')] |
from __future__ import print_function
import sys
import numpy as np
import os
import tensorflow as tf
import experiments
from data import cifar_dataset
from models import nets
import pickle
import time
from numpy import linalg as LA
################################################################################################
# Read experiment to run
################################################################################################
ID = int(sys.argv[1:][0])
opt = experiments.opt[ID]
# Skip execution if instructed in experiment
if opt.skip:
print("SKIP")
quit()
print('Experiment:', opt.name)
tf.compat.v1.set_random_seed(opt.seed)
if opt.hyper.mse:
metric_name = 'mse'
else:
metric_name = 'accuracy'
################################################################################################
MAX_SAMPLES = 5e4
def create_graph():
################################################################################################
# Define training and validation datasets thorugh Dataset API
################################################################################################
# Initialize dataset and creates TF records if they do not exist
# Initialize dataset and creates TF records if they do not exist
if opt.dataset.dataset_name == 'cifar':
from data import cifar_dataset
dataset = cifar_dataset.Cifar10(opt)
elif opt.dataset.dataset_name == 'rand10':
from data import rand10_dataset
dataset = rand10_dataset.Rand10(opt)
elif opt.dataset.dataset_name == 'rand10000':
from data import rand10000_dataset
dataset = rand10000_dataset.Rand10000(opt)
elif opt.dataset.dataset_name == 'rand10_regression':
from data import rand10_regression_dataset
dataset = rand10_regression_dataset.Rand10_regression(opt)
elif opt.dataset.dataset_name == 'rand10000_regression':
from data import rand10000_regression_dataset
dataset = rand10000_regression_dataset.Rand10000_regression(opt)
# No repeatable dataset for testing
train_dataset_full = dataset.create_dataset(augmentation=False, standarization=True,
set_name='train', repeat=False)
val_dataset_full = dataset.create_dataset(augmentation=False, standarization=True,
set_name='val', repeat=False)
test_dataset_full = dataset.create_dataset(augmentation=False, standarization=True,
set_name='test', repeat=False)
# Hadles to switch datasets
handle = tf.compat.v1.placeholder(tf.string, shape=[])
iterator = tf.compat.v1.data.Iterator.from_string_handle(
handle, val_dataset_full.output_types, val_dataset_full.output_shapes)
train_iterator_full = train_dataset_full.make_initializable_iterator()
val_iterator_full = val_dataset_full.make_initializable_iterator()
test_iterator_full = test_dataset_full.make_initializable_iterator()
################################################################################################
################################################################################################
# DNN
################################################################################################
# Get data from dataset
image, y_ = iterator.get_next()
if opt.dataset.dataset_name == 'cifar':
image = tf.image.resize_images(image, [opt.hyper.image_size, opt.hyper.image_size])
if opt.extense_summary:
tf.summary.image('input', image)
elif opt.dataset.dataset_name == 'rand10' or opt.dataset.dataset_name == 'rand10_regression':
image = tf.compat.v1.reshape(image, [-1, 10])
elif opt.dataset.dataset_name == 'rand10000' or opt.dataset.dataset_name == 'rand10000_regression':
image = tf.compat.v1.reshape(image, [-1, 10000])
# Call DNN
dropout_rate = tf.compat.v1.placeholder(tf.float32)
to_call = getattr(nets, opt.dnn.name)
y, parameters, activations = to_call(image, dropout_rate, opt, dataset.list_labels)
if opt.hyper.mse:
# MSE metric
metric = tf.reduce_mean((y_ - y) ** 2)
else:
# Accuracy metric
im_prediction = tf.equal(tf.argmax(y, 1), y_)
im_prediction = tf.cast(im_prediction, tf.float32)
metric = tf.reduce_mean(im_prediction)
num_iter_train = int(dataset.num_images_training * opt.dataset.proportion_training_set
/ opt.hyper.batch_size) - 1
num_iter_test = int(dataset.num_images_test / opt.hyper.batch_size) - 1
num_iter_val = int(dataset.num_images_val / opt.hyper.batch_size) - 1
return activations, metric, y_, handle, dropout_rate, train_iterator_full, val_iterator_full, \
test_iterator_full, num_iter_train, num_iter_val, num_iter_test
################################################################################################
def get_activations(handle, metric, gt, dropout_rate, activations, opt,
iterator_dataset, handle_dataset, num_iter, cross):
sess.run(iterator_dataset.initializer)
np.random.seed(cross)
activations_out = []
metric_tmp = 0.0
max_samples_per_iter = int(MAX_SAMPLES / num_iter)
for _ in range(num_iter):
activations_tmp, metric_batch, labels_batch = \
sess.run([activations, metric, gt], feed_dict={handle: handle_dataset, dropout_rate: opt.hyper.drop_test})
metric_tmp += metric_batch
labels_tmp = []
for layer in range(len(activations_tmp)):
labels_tmp.append(np.repeat(labels_batch, np.prod(np.shape(activations_tmp[layer])[1:-1])))
activations_tmp[layer] = np.reshape(activations_tmp[layer], (-1, np.shape(activations_tmp[layer])[-1]))
num_samples = np.shape(activations_tmp[layer])[0]
if num_samples > max_samples_per_iter:
idx = np.random.permutation(num_samples)[: max_samples_per_iter]
activations_tmp[layer] = activations_tmp[layer][idx, :]
labels_tmp[layer] = labels_tmp[layer][idx]
if not activations_out:
activations_out = activations_tmp
labels_out = labels_tmp
else:
for layer in range(len(activations_tmp)):
activations_out[layer] = np.append(activations_out[layer], activations_tmp[layer], axis=0)
labels_out[layer] = np.append(labels_out[layer], labels_tmp[layer], axis=0)
metric_out = metric_tmp / num_iter
return activations_out, metric_out, labels_out
t0 = time.time()
# if not os.path.isfile(opt.log_dir_base + opt.name + '/activations0.pkl'):
activations, metric, gt, handle, dropout_rate, train_iterator_full, val_iterator_full, test_iterator_full,\
num_iter_train, num_iter_val, num_iter_test = create_graph()
# results = [[[] for i in range(2)] for i in range(6)]
with tf.Session() as sess:
################################################################################################
# Set up checkpoints and data
################################################################################################
print("RESTORE")
print(opt.log_dir_base + opt.name)
saver = tf.compat.v1.train.Saver(max_to_keep=opt.max_to_keep_checkpoints)
if os.path.isdir(opt.log_dir_base + opt.name + '/models/'):
saver.restore(sess, tf.train.latest_checkpoint(opt.log_dir_base + opt.name + '/models/'))
elif os.path.isdir(opt.log_dir_base + opt.name + '/'):
saver.restore(sess, tf.train.latest_checkpoint(opt.log_dir_base + opt.name + '/'))
else:
print('Can\'t find model save path.')
quit()
################################################################################################
################################################################################################
# RUN TEST
################################################################################################
train_handle_full = sess.run(train_iterator_full.string_handle())
validation_handle_full = sess.run(val_iterator_full.string_handle())
test_handle_full = sess.run(test_iterator_full.string_handle())
for cross in range(3):
print('cross:', cross)
res, met, gt_labels_train = get_activations(handle, metric, gt, dropout_rate, activations, opt,
train_iterator_full, train_handle_full, num_iter_train, cross)
if cross == 0:
print("Train", metric_name+':', met)
with open(opt.log_dir_base + opt.name + '/activations' + str(cross) + '.pkl', 'wb') as f:
pickle.dump(res, f, protocol=2)
with open(opt.log_dir_base + opt.name + '/labels' + str(cross) + '.pkl', 'wb') as f:
pickle.dump(gt_labels_train, f, protocol=2)
with open(opt.log_dir_base + opt.name + '/' + metric_name + str(cross) + '.pkl', 'wb') as f:
pickle.dump(met, f, protocol=2)
res_test, met_test, gt_labels_test = get_activations(handle, metric, gt, dropout_rate, activations, opt,
test_iterator_full, test_handle_full, num_iter_test,
cross)
if cross == 0:
print("Test", metric_name+':', met_test)
with open(opt.log_dir_base + opt.name + '/activations_test' + str(cross) + '.pkl', 'wb') as f:
pickle.dump(res_test, f, protocol=2)
with open(opt.log_dir_base + opt.name + '/labels_test' + str(cross) + '.pkl', 'wb') as f:
pickle.dump(gt_labels_test, f, protocol=2)
with open(opt.log_dir_base + opt.name + '/' + metric_name + '_test' + str(cross) + '.pkl', 'wb') as f:
pickle.dump(met_test, f, protocol=2)
sys.stdout.flush()
tf.reset_default_graph()
t1 = time.time()
sys.stdout.flush()
print('Time: ', t1 - t0)
print('get_activations.py')
print(':)')
| [
"data.rand10_dataset.Rand10",
"tensorflow.image.resize_images",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.reduce_mean",
"tensorflow.cast",
"data.rand10000_regression_dataset.Rand10000_regression",
"tensorflow.summary.image",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.reshape... | [((628, 666), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['opt.seed'], {}), '(opt.seed)\n', (656, 666), True, 'import tensorflow as tf\n'), ((6678, 6689), 'time.time', 'time.time', ([], {}), '()\n', (6687, 6689), False, 'import time\n'), ((9967, 9991), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (9989, 9991), True, 'import tensorflow as tf\n'), ((9997, 10008), 'time.time', 'time.time', ([], {}), '()\n', (10006, 10008), False, 'import time\n'), ((10009, 10027), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10025, 10027), False, 'import sys\n'), ((2644, 2689), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.string'], {'shape': '[]'}), '(tf.string, shape=[])\n', (2668, 2689), True, 'import tensorflow as tf\n'), ((2705, 2826), 'tensorflow.compat.v1.data.Iterator.from_string_handle', 'tf.compat.v1.data.Iterator.from_string_handle', (['handle', 'val_dataset_full.output_types', 'val_dataset_full.output_shapes'], {}), '(handle, val_dataset_full.\n output_types, val_dataset_full.output_shapes)\n', (2750, 2826), True, 'import tensorflow as tf\n'), ((3991, 4027), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4015, 4027), True, 'import tensorflow as tf\n'), ((5211, 5232), 'numpy.random.seed', 'np.random.seed', (['cross'], {}), '(cross)\n', (5225, 5232), True, 'import numpy as np\n'), ((7001, 7013), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7011, 7013), True, 'import tensorflow as tf\n'), ((7333, 7398), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {'max_to_keep': 'opt.max_to_keep_checkpoints'}), '(max_to_keep=opt.max_to_keep_checkpoints)\n', (7357, 7398), True, 'import tensorflow as tf\n'), ((7406, 7461), 'os.path.isdir', 'os.path.isdir', (["(opt.log_dir_base + opt.name + '/models/')"], {}), "(opt.log_dir_base + opt.name + '/models/')\n", (7419, 7461), False, 'import os\n'), ((1392, 1418), 'data.cifar_dataset.Cifar10', 'cifar_dataset.Cifar10', (['opt'], {}), '(opt)\n', (1413, 1418), False, 'from data import cifar_dataset\n'), ((3490, 3565), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['image', '[opt.hyper.image_size, opt.hyper.image_size]'], {}), '(image, [opt.hyper.image_size, opt.hyper.image_size])\n', (3512, 3565), True, 'import tensorflow as tf\n'), ((4219, 4248), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((y_ - y) ** 2)'], {}), '((y_ - y) ** 2)\n', (4233, 4248), True, 'import tensorflow as tf\n'), ((4363, 4397), 'tensorflow.cast', 'tf.cast', (['im_prediction', 'tf.float32'], {}), '(im_prediction, tf.float32)\n', (4370, 4397), True, 'import tensorflow as tf\n'), ((4415, 4444), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['im_prediction'], {}), '(im_prediction)\n', (4429, 4444), True, 'import tensorflow as tf\n'), ((7570, 7618), 'os.path.isdir', 'os.path.isdir', (["(opt.log_dir_base + opt.name + '/')"], {}), "(opt.log_dir_base + opt.name + '/')\n", (7583, 7618), False, 'import os\n'), ((9946, 9964), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9962, 9964), False, 'import sys\n'), ((1524, 1550), 'data.rand10_dataset.Rand10', 'rand10_dataset.Rand10', (['opt'], {}), '(opt)\n', (1545, 1550), False, 'from data import rand10_dataset\n'), ((3610, 3642), 'tensorflow.summary.image', 'tf.summary.image', (['"""input"""', 'image'], {}), "('input', image)\n", (3626, 3642), True, 'import tensorflow as tf\n'), ((3757, 3794), 'tensorflow.compat.v1.reshape', 'tf.compat.v1.reshape', (['image', '[-1, 10]'], {}), '(image, [-1, 10])\n', (3777, 3794), True, 'import tensorflow as tf\n'), ((4318, 4333), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (4327, 4333), True, 'import tensorflow as tf\n'), ((7491, 7559), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (["(opt.log_dir_base + opt.name + '/models/')"], {}), "(opt.log_dir_base + opt.name + '/models/')\n", (7517, 7559), True, 'import tensorflow as tf\n'), ((8774, 8805), 'pickle.dump', 'pickle.dump', (['res', 'f'], {'protocol': '(2)'}), '(res, f, protocol=2)\n', (8785, 8805), False, 'import pickle\n'), ((8911, 8954), 'pickle.dump', 'pickle.dump', (['gt_labels_train', 'f'], {'protocol': '(2)'}), '(gt_labels_train, f, protocol=2)\n', (8922, 8954), False, 'import pickle\n'), ((9068, 9099), 'pickle.dump', 'pickle.dump', (['met', 'f'], {'protocol': '(2)'}), '(met, f, protocol=2)\n', (9079, 9099), False, 'import pickle\n'), ((9587, 9623), 'pickle.dump', 'pickle.dump', (['res_test', 'f'], {'protocol': '(2)'}), '(res_test, f, protocol=2)\n', (9598, 9623), False, 'import pickle\n'), ((9734, 9776), 'pickle.dump', 'pickle.dump', (['gt_labels_test', 'f'], {'protocol': '(2)'}), '(gt_labels_test, f, protocol=2)\n', (9745, 9776), False, 'import pickle\n'), ((9900, 9936), 'pickle.dump', 'pickle.dump', (['met_test', 'f'], {'protocol': '(2)'}), '(met_test, f, protocol=2)\n', (9911, 9936), False, 'import pickle\n'), ((1662, 1694), 'data.rand10000_dataset.Rand10000', 'rand10000_dataset.Rand10000', (['opt'], {}), '(opt)\n', (1689, 1694), False, 'from data import rand10000_dataset\n'), ((3915, 3955), 'tensorflow.compat.v1.reshape', 'tf.compat.v1.reshape', (['image', '[-1, 10000]'], {}), '(image, [-1, 10000])\n', (3935, 3955), True, 'import tensorflow as tf\n'), ((5899, 5931), 'numpy.shape', 'np.shape', (['activations_tmp[layer]'], {}), '(activations_tmp[layer])\n', (5907, 5931), True, 'import numpy as np\n'), ((6422, 6487), 'numpy.append', 'np.append', (['activations_out[layer]', 'activations_tmp[layer]'], {'axis': '(0)'}), '(activations_out[layer], activations_tmp[layer], axis=0)\n', (6431, 6487), True, 'import numpy as np\n'), ((6524, 6579), 'numpy.append', 'np.append', (['labels_out[layer]', 'labels_tmp[layer]'], {'axis': '(0)'}), '(labels_out[layer], labels_tmp[layer], axis=0)\n', (6533, 6579), True, 'import numpy as np\n'), ((7648, 7709), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (["(opt.log_dir_base + opt.name + '/')"], {}), "(opt.log_dir_base + opt.name + '/')\n", (7674, 7709), True, 'import tensorflow as tf\n'), ((1822, 1870), 'data.rand10_regression_dataset.Rand10_regression', 'rand10_regression_dataset.Rand10_regression', (['opt'], {}), '(opt)\n', (1865, 1870), False, 'from data import rand10_regression_dataset\n'), ((6008, 6042), 'numpy.random.permutation', 'np.random.permutation', (['num_samples'], {}), '(num_samples)\n', (6029, 6042), True, 'import numpy as np\n'), ((2004, 2058), 'data.rand10000_regression_dataset.Rand10000_regression', 'rand10000_regression_dataset.Rand10000_regression', (['opt'], {}), '(opt)\n', (2053, 2058), False, 'from data import rand10000_regression_dataset\n'), ((5834, 5866), 'numpy.shape', 'np.shape', (['activations_tmp[layer]'], {}), '(activations_tmp[layer])\n', (5842, 5866), True, 'import numpy as np\n'), ((5715, 5747), 'numpy.shape', 'np.shape', (['activations_tmp[layer]'], {}), '(activations_tmp[layer])\n', (5723, 5747), True, 'import numpy as np\n')] |
# Copyright (c) 2020 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
#
# THIS IS NOT THE ORIGINAL VERSION OF THE FILE.
#
# Last modified 2021-12-02
import logging
import numpy as np
logger = logging.getLogger(__name__)
class Optimizer(object):
def __init__(self, theta, step_size):
#self.theta = theta
self.dim = len(theta)
self.t = 0
def update(self, theta, globalg):
logger.info(self.t)
self.t += 1
step = self._compute_step(globalg)
ratio = np.linalg.norm(step) / np.linalg.norm(theta)
return ratio, theta + step
def _compute_step(self, globalg):
raise NotImplementedError
class SimpleSGD(Optimizer):
def __init__(self, theta, stepsize):
Optimizer.__init__(self, theta, stepsize)
self.stepsize = stepsize
def _compute_step(self, globalg):
step = -self.stepsize * globalg
return step
class SGD(Optimizer):
def __init__(self, theta, stepsize, momentum=0.9):
Optimizer.__init__(self, theta, stepsize)
self.v = np.zeros(self.dim, dtype=np.float32)
self.stepsize, self.momentum = stepsize, momentum
def _compute_step(self, globalg):
self.v = self.momentum * self.v + (1. - self.momentum) * globalg
step = -self.stepsize * self.v
return step
class Adam(Optimizer):
def __init__(self, theta, stepsize, beta1=0.9, beta2=0.999, epsilon=1e-08):
Optimizer.__init__(self, theta, stepsize)
self.stepsize = stepsize
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
def reset(self):
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
self.t = 0
def _compute_step(self, globalg):
a = self.stepsize * np.sqrt(1 - self.beta2**self.t) / (
1 - self.beta1**self.t)
self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step = -a * self.m / (np.sqrt(self.v) + self.epsilon)
return step
| [
"logging.getLogger",
"numpy.zeros",
"numpy.sqrt",
"numpy.linalg.norm"
] | [((565, 592), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (582, 592), False, 'import logging\n'), ((1440, 1476), 'numpy.zeros', 'np.zeros', (['self.dim'], {'dtype': 'np.float32'}), '(self.dim, dtype=np.float32)\n', (1448, 1476), True, 'import numpy as np\n'), ((1997, 2033), 'numpy.zeros', 'np.zeros', (['self.dim'], {'dtype': 'np.float32'}), '(self.dim, dtype=np.float32)\n', (2005, 2033), True, 'import numpy as np\n'), ((2051, 2087), 'numpy.zeros', 'np.zeros', (['self.dim'], {'dtype': 'np.float32'}), '(self.dim, dtype=np.float32)\n', (2059, 2087), True, 'import numpy as np\n'), ((2127, 2163), 'numpy.zeros', 'np.zeros', (['self.dim'], {'dtype': 'np.float32'}), '(self.dim, dtype=np.float32)\n', (2135, 2163), True, 'import numpy as np\n'), ((2181, 2217), 'numpy.zeros', 'np.zeros', (['self.dim'], {'dtype': 'np.float32'}), '(self.dim, dtype=np.float32)\n', (2189, 2217), True, 'import numpy as np\n'), ((886, 906), 'numpy.linalg.norm', 'np.linalg.norm', (['step'], {}), '(step)\n', (900, 906), True, 'import numpy as np\n'), ((909, 930), 'numpy.linalg.norm', 'np.linalg.norm', (['theta'], {}), '(theta)\n', (923, 930), True, 'import numpy as np\n'), ((2304, 2337), 'numpy.sqrt', 'np.sqrt', (['(1 - self.beta2 ** self.t)'], {}), '(1 - self.beta2 ** self.t)\n', (2311, 2337), True, 'import numpy as np\n'), ((2550, 2565), 'numpy.sqrt', 'np.sqrt', (['self.v'], {}), '(self.v)\n', (2557, 2565), True, 'import numpy as np\n')] |
import numpy as np
from catboost import CatBoostClassifier, Pool
from sklearn.calibration import calibration_curve
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import (roc_auc_score,
roc_curve,
precision_recall_curve,
average_precision_score,
f1_score,
accuracy_score)
def run_experiment(df_model, train_valid_records, test_records, features, target):
df_test = df_model[df_model.record_id.isin(test_records.record_id)]
X_test = df_test[features]
y_test = df_test[target].map({False: 0, True: 1})
summaries = []
skf = StratifiedKFold(n_splits=5)
for i, (train, valid) in enumerate(skf.split(train_valid_records.record_id, train_valid_records[target])):
train_records = train_valid_records.iloc[train].record_id
df_train = df_model[df_model.record_id.isin(train_records)]
valid_records = train_valid_records.iloc[valid].record_id
df_valid = df_model[df_model.record_id.isin(valid_records)]
assert len(set(df_train.record_id.unique()) & set(df_valid.record_id.unique())) == 0
assert len(set(df_train.record_id.unique()) & set(df_test.record_id.unique())) == 0
assert len(set(df_test.record_id.unique()) & set(df_valid.record_id.unique())) == 0
model = CatBoostClassifier(max_depth=3, learning_rate=0.01, early_stopping_rounds=100)
X_train = df_train[features]
y_train = df_train[target].map({False: 0, True: 1})
X_valid = df_valid[features]
y_valid = df_valid[target].map({False: 0, True: 1})
valid_counts = y_valid.value_counts()
model.fit(X_train, y_train, eval_set=(X_valid, y_valid))
pred = model.predict_proba(X_test)
y_score = pred[:, 1]
auc = roc_auc_score(y_test.values, y_score)
fpr, tpr, _ = roc_curve(y_test.values, y_score)
auc = roc_auc_score(y_test.values, y_score)
fpr, tpr, thresholds = roc_curve(y_test.values, y_score)
ap = average_precision_score(y_test.values, y_score)
precision, recall, _ = precision_recall_curve(y_test.values, y_score)
f1s = [(thr, f1_score(y_test.values, y_score > thr)) for thr in np.arange(0, 1, 0.01)]
accs = [(thr, accuracy_score(y_test.values, y_score > thr)) for thr in np.arange(0, 1, 0.01)]
best_thr_f1, best_f1 = max(f1s, key=lambda t: t[1])
best_thr_acc, best_acc = max(accs, key=lambda t: t[1])
fraction_of_positives, mean_predicted_value = calibration_curve(y_test.values, y_score, n_bins=7)
shap_values = model.get_feature_importance(Pool(X_test, y_test), type="ShapValues")
expected_value = shap_values[0, -1]
shap_values = shap_values[:, :-1]
summary = {
"model": model,
"roc_auc": auc,
"avg_precision": ap,
"fpr": fpr,
"tpr": tpr,
"thresholds": thresholds,
"precision": precision,
"recall": recall,
"best_thr_f1": best_thr_f1,
"best_f1": best_f1,
"best_thr_acc": best_thr_acc,
"best_acc": best_acc,
"fraction_of_positives": fraction_of_positives,
"mean_predicted_value": mean_predicted_value,
"expected_value": expected_value,
"shap_values": shap_values
}
summaries.append(summary)
print(f"""
--------------------------------------------------------------------
Finished pipeline for fold #{i + 1}
Summary:
Positive cases : {valid_counts[1]}
Negative cases : {valid_counts[0]}
ROC AUC = {auc}
Average Precision = {ap}
Best F1 = {best_f1} (Threshold = {best_thr_f1})
Best Accuracy = {best_acc} (Threshold = {best_thr_acc})
--------------------------------------------------------------------
""")
return summaries, df_test
| [
"sklearn.metrics.f1_score",
"catboost.Pool",
"sklearn.metrics.average_precision_score",
"sklearn.metrics.precision_recall_curve",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.StratifiedKFold",
"sklearn.metrics.roc_curve",
"catboost.CatBoostClassifier",
"sklearn.calibration.calibration_c... | [((715, 742), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)'}), '(n_splits=5)\n', (730, 742), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1418, 1496), 'catboost.CatBoostClassifier', 'CatBoostClassifier', ([], {'max_depth': '(3)', 'learning_rate': '(0.01)', 'early_stopping_rounds': '(100)'}), '(max_depth=3, learning_rate=0.01, early_stopping_rounds=100)\n', (1436, 1496), False, 'from catboost import CatBoostClassifier, Pool\n'), ((1893, 1930), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test.values', 'y_score'], {}), '(y_test.values, y_score)\n', (1906, 1930), False, 'from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, f1_score, accuracy_score\n'), ((1953, 1986), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test.values', 'y_score'], {}), '(y_test.values, y_score)\n', (1962, 1986), False, 'from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, f1_score, accuracy_score\n'), ((2002, 2039), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test.values', 'y_score'], {}), '(y_test.values, y_score)\n', (2015, 2039), False, 'from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, f1_score, accuracy_score\n'), ((2071, 2104), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test.values', 'y_score'], {}), '(y_test.values, y_score)\n', (2080, 2104), False, 'from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, f1_score, accuracy_score\n'), ((2119, 2166), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_test.values', 'y_score'], {}), '(y_test.values, y_score)\n', (2142, 2166), False, 'from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, f1_score, accuracy_score\n'), ((2198, 2244), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test.values', 'y_score'], {}), '(y_test.values, y_score)\n', (2220, 2244), False, 'from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, f1_score, accuracy_score\n'), ((2620, 2671), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_test.values', 'y_score'], {'n_bins': '(7)'}), '(y_test.values, y_score, n_bins=7)\n', (2637, 2671), False, 'from sklearn.calibration import calibration_curve\n'), ((2724, 2744), 'catboost.Pool', 'Pool', (['X_test', 'y_test'], {}), '(X_test, y_test)\n', (2728, 2744), False, 'from catboost import CatBoostClassifier, Pool\n'), ((2267, 2305), 'sklearn.metrics.f1_score', 'f1_score', (['y_test.values', '(y_score > thr)'], {}), '(y_test.values, y_score > thr)\n', (2275, 2305), False, 'from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, f1_score, accuracy_score\n'), ((2318, 2339), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (2327, 2339), True, 'import numpy as np\n'), ((2363, 2407), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test.values', '(y_score > thr)'], {}), '(y_test.values, y_score > thr)\n', (2377, 2407), False, 'from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, f1_score, accuracy_score\n'), ((2420, 2441), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (2429, 2441), True, 'import numpy as np\n')] |
from ..filters import run_filters, cheap_filters, all_filters
from ..utils.misc import invert, values_map_to_same_key, one_hot
from ..utils.graph_ops import get_node_cover
from .alldiffs import count_alldiffs
import numpy as np
from functools import reduce
# TODO: count how many isomorphisms each background node participates in.
# TODO: switch from recursive to iterative implementation for readability
n_iterations = 0
def recursive_isomorphism_counter(tmplt, world, candidates, *,
unspec_cover, verbose, init_changed_cands, count_iterations=False):
global n_iterations
n_iterations += 1
# If the node cover is empty, the unspec nodes are disconnected. Thus, we
# can skip straight to counting solutions to the alldiff constraint problem
if len(unspec_cover) == 0:
# Elimination filter is not needed here and would be a waste of time
tmplt, world, candidates = run_filters(tmplt, world, candidates=candidates, filters=cheap_filters,
verbose=False, init_changed_cands=init_changed_cands)
node_to_cands = {node: world.nodes[candidates[idx]]
for idx, node in enumerate(tmplt.nodes)}
return count_alldiffs(node_to_cands)
tmplt, world, candidates = run_filters(tmplt, world, candidates=candidates, filters=all_filters,
verbose=False, init_changed_cands=init_changed_cands)
# Since the node cover is not empty, we first choose some valid
# assignment of the unspecified nodes one at a time until the remaining
# unspecified nodes are disconnected.
n_isomorphisms = 0
node_idx = unspec_cover[0]
cand_idxs = np.argwhere(candidates[node_idx]).flat
for i, cand_idx in enumerate(cand_idxs):
candidates_copy = candidates.copy()
candidates_copy[node_idx] = one_hot(cand_idx, world.n_nodes)
# recurse to make assignment for the next node in the unspecified cover
n_isomorphisms += recursive_isomorphism_counter(
tmplt, world, candidates_copy, unspec_cover=unspec_cover[1:],
verbose=verbose, init_changed_cands=one_hot(node_idx, tmplt.n_nodes), count_iterations=count_iterations)
# TODO: more useful progress summary
if verbose:
print("depth {}: {} of {}".format(len(unspec_cover), i, len(cand_idxs)), n_isomorphisms)
return n_isomorphisms
def count_isomorphisms(tmplt, world, *, candidates=None, verbose=True, count_iterations=False):
"""
counts the number of ways to assign template nodes to world nodes such that
edges between template nodes also appear between the corresponding world
nodes. Does not factor in the number of ways to assign the edges. Only
counts the number of assignments between nodes.
if the set of unspecified template nodes is too large or too densely
connected, this code may never finish.
"""
global n_iterations
n_iterations = 0
if candidates is None:
tmplt, world, candidates = uclasm.run_filters(
tmplt, world, filters=uclasm.all_filters, verbose=verbose)
unspec_nodes = np.where(candidates.sum(axis=1) > 1)[0]
tmplt_subgraph = tmplt.subgraph(unspec_nodes)
unspec_cover = get_node_cover(tmplt_subgraph)
unspec_cover_nodes = [tmplt_subgraph.nodes[node_idx] for node_idx in unspec_cover]
unspec_cover_idxes = [tmplt.node_idxs[node] for node in unspec_cover_nodes]
# Send zeros to init_changed_cands since we already just ran the filters
count = recursive_isomorphism_counter(
tmplt, world, candidates, verbose=verbose, unspec_cover=unspec_cover_idxes,
init_changed_cands=np.zeros(tmplt.nodes.shape, dtype=np.bool), count_iterations=count_iterations)
if count_iterations:
return count, n_iterations
else:
return count
def recursive_isomorphism_finder(tmplt, world, candidates, *,
unspec_node_idxs, verbose, init_changed_cands,
found_isomorphisms):
if len(unspec_node_idxs) == 0:
# All nodes have been assigned, add the isomorphism to the list
new_isomorphism = {}
for tmplt_idx, tmplt_node in enumerate(tmplt.nodes):
if verbose:
print(str(tmplt_node)+":", world.nodes[candidates[tmplt_idx]])
new_isomorphism[tmplt_node] = world.nodes[candidates[tmplt_idx]][0]
found_isomorphisms.append(new_isomorphism)
return found_isomorphisms
tmplt, world, candidates = run_filters(tmplt, world, candidates=candidates,
filters=all_filters, verbose=False,
init_changed_cands=init_changed_cands)
node_idx = unspec_node_idxs[0]
cand_idxs = np.argwhere(candidates[node_idx]).flat
for i, cand_idx in enumerate(cand_idxs):
candidates_copy = candidates.copy()
candidates_copy[node_idx] = one_hot(cand_idx, world.n_nodes)
# recurse to make assignment for the next node in the unspecified cover
recursive_isomorphism_finder(
tmplt, world, candidates_copy,
unspec_node_idxs=unspec_node_idxs[1:],
verbose=verbose,
init_changed_cands=one_hot(node_idx, tmplt.n_nodes),
found_isomorphisms=found_isomorphisms)
return found_isomorphisms
def find_isomorphisms(tmplt, world, *, candidates=None, verbose=True):
""" Returns a list of isomorphisms as dictionaries mapping template nodes to
world nodes. Note: this is much slower than counting, and should only be
done for small numbers of isomorphisms and fully filtered candidate matrices
"""
if candidates is None:
tmplt, world, candidates = uclasm.run_filters(
tmplt, world, filters=uclasm.all_filters, verbose=verbose)
unspec_node_idxs = np.where(candidates.sum(axis=1) > 1)[0]
found_isomorphisms = []
return recursive_isomorphism_finder(
tmplt, world, candidates, verbose=verbose,
unspec_node_idxs=unspec_node_idxs,
init_changed_cands=np.zeros(tmplt.nodes.shape, dtype=np.bool),
found_isomorphisms=found_isomorphisms)
def print_isomorphisms(tmplt, world, *, candidates=None, verbose=True):
""" Prints the list of isomorphisms """
print(find_isomorphisms(tmplt, world, candidates=candidates,
verbose=verbose))
| [
"numpy.zeros",
"numpy.argwhere"
] | [((1682, 1715), 'numpy.argwhere', 'np.argwhere', (['candidates[node_idx]'], {}), '(candidates[node_idx])\n', (1693, 1715), True, 'import numpy as np\n'), ((4748, 4781), 'numpy.argwhere', 'np.argwhere', (['candidates[node_idx]'], {}), '(candidates[node_idx])\n', (4759, 4781), True, 'import numpy as np\n'), ((3676, 3718), 'numpy.zeros', 'np.zeros', (['tmplt.nodes.shape'], {'dtype': 'np.bool'}), '(tmplt.nodes.shape, dtype=np.bool)\n', (3684, 3718), True, 'import numpy as np\n'), ((6060, 6102), 'numpy.zeros', 'np.zeros', (['tmplt.nodes.shape'], {'dtype': 'np.bool'}), '(tmplt.nodes.shape, dtype=np.bool)\n', (6068, 6102), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
CALFEM Vedo
Utils for 3D visualization in CALFEM using Vedo (https://vedo.embl.es/)
@author: <NAME>
"""
import numpy as np
import vedo as v
import pyvtk
import vtk
import sys
#import webbrowser
from scipy.io import loadmat
import calfem.core as cfc
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# Tools, used in this file but can be accessed by a user as well (see exv4.py)
def get_coord_from_edof(edof_row,dof,element_type):
"""
Routine to get element coodinates based on element type and degrees of freedom.
:param array edof_row: Element topology row [1 x degrees of freedom per element]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param int element_type: Element type [1-6]
:return array coords: Array of node coordinates for element [n_nodes x 3]
"""
if element_type == 1 or element_type == 2 or element_type == 5:
edof_row1,edof_row2 = np.split(edof_row,2)
#coord1 = int(np.where(np.all(edof_row1==dof,axis=1))[0])
#coord2 = int(np.where(np.all(edof_row2==dof,axis=1))[0])
#coord1 = int(np.where(np.any(edof_row1==dof,axis=1))[0])
#coord2 = int(np.where(np.any(edof_row2==dof,axis=1))[0])
coord1 = int(np.where((edof_row1==dof).any(axis=1))[0])
coord2 = int(np.where((edof_row2==dof).any(axis=1))[0])
return coord1, coord2
elif element_type == 3 or element_type == 4:
edof_row1,edof_row2,edof_row3,edof_row4,edof_row5,edof_row6,edof_row7,edof_row8 = np.split(edof_row,8)
#coord1 = int(np.where(np.all(edof_row1==dof,axis=1))[0])
#coord2 = int(np.where(np.all(edof_row2==dof,axis=1))[0])
#coord3 = int(np.where(np.all(edof_row3==dof,axis=1))[0])
#coord4 = int(np.where(np.all(edof_row4==dof,axis=1))[0])
#coord5 = int(np.where(np.all(edof_row5==dof,axis=1))[0])
#coord6 = int(np.where(np.all(edof_row6==dof,axis=1))[0])
#coord7 = int(np.where(np.all(edof_row7==dof,axis=1))[0])
#coord8 = int(np.where(np.all(edof_row8==dof,axis=1))[0])
coord1 = int(np.where(np.any(edof_row1==dof,axis=1))[0])
coord2 = int(np.where(np.any(edof_row2==dof,axis=1))[0])
coord3 = int(np.where(np.any(edof_row3==dof,axis=1))[0])
coord4 = int(np.where(np.any(edof_row4==dof,axis=1))[0])
coord5 = int(np.where(np.any(edof_row5==dof,axis=1))[0])
coord6 = int(np.where(np.any(edof_row6==dof,axis=1))[0])
coord7 = int(np.where(np.any(edof_row7==dof,axis=1))[0])
coord8 = int(np.where(np.any(edof_row8==dof,axis=1))[0])
coords = np.array([coord1, coord2, coord3, coord4, coord5, coord6, coord7, coord8])
return coords
elif element_type == 6:
edof_row1,edof_row2,edof_row3,edof_row4 = np.split(edof_row,4)
#coord1 = int(np.where(np.all(edof_row1==dof,axis=1))[0])
#coord2 = int(np.where(np.all(edof_row2==dof,axis=1))[0])
#coord3 = int(np.where(np.all(edof_row3==dof,axis=1))[0])
#coord4 = int(np.where(np.all(edof_row4==dof,axis=1))[0])
coord1 = int(np.where(np.any(edof_row1==dof,axis=1))[0])
coord2 = int(np.where(np.any(edof_row2==dof,axis=1))[0])
coord3 = int(np.where(np.any(edof_row3==dof,axis=1))[0])
coord4 = int(np.where(np.any(edof_row4==dof,axis=1))[0])
coords = np.array([coord1, coord2, coord3, coord4])
return coords
def get_a_from_coord(coord_row_num,num_of_deformations,a,scale=1):
"""
Routine to get node displacements based on coordinates.
:param int coord_row_num: Node coordinate row number
:param int num_of_deformations: Number of degrees of freedom per node
:param array a: Global displacement vector [1 x total degrees of freedom]
:return float dx: Nodal displacement in x-direction
:return float dy: Nodal displacement in y-direction
:return float dz: Nodal displacement in z-direction
"""
dx = a[coord_row_num*num_of_deformations]*scale
dy = a[coord_row_num*num_of_deformations+1]*scale
dz = a[coord_row_num*num_of_deformations+2]*scale
return dx, dy, dz
def get_node_elements(coord,scale,alpha,dof,bcPrescr=None,bc=None,bc_color='red',fPrescr=None,f=None,f_color='blue6',dofs_per_node=None):
"""
Routine to get node node actors.
:param array coord: Nodal coordinates [number of nodes x 3]
:param int scale: Node actor radius
:param float alpha: Node actor transparency [0-1]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param array bcPrescr: Degrees of freedom with prescribed boundary conditions [number of prescribed boundary contidions x 1]
:param array bc: Values for prescribed boundary conditions [number of prescribed boundary contidions x 1]
:param string bc_color: Color for nodes with prescribed boundary conditions
:param array fPrescr: Degrees of freedom with applied forces [number of applied forces x 1]
:param array f: Values for forces [number of applied forces x 1]
:param string f_color: Color for nodes with applied forces
:param int dofs_per_node: Degrees of freedom per node [1-6]
:return list nodes: Node actors
"""
nnode = np.size(coord, axis = 0)
ncoord = np.size(coord, axis = 1)
nodes = []
bc_dict = {}
indx = 0
if isinstance(bcPrescr, np.ndarray):
for i in bcPrescr:
bc_dict[i] = bc[indx]
indx += 1
f_dict = {}
indx = 0
if isinstance(fPrescr, np.ndarray):
for i in fPrescr:
f_dict[i] = f[indx]
indx += 1
for i in range(nnode):
#if ncoord == 3:
dofs = dof[i]
if np.any(np.isin(bcPrescr, dofs, assume_unique=True)) == True:
color = bc_color
elif np.any(np.isin(fPrescr, dofs, assume_unique=True)) == True:
color = f_color
else:
color = 'black'
node = v.Sphere(c=color).scale(1.5*scale).pos([coord[i,0],coord[i,1],coord[i,2]]).alpha(alpha)
if np.any(np.isin(bcPrescr, dofs, assume_unique=True)) == True:
node.name = f"Node nr. {i+1}, DoFs & BCs: ["
for j in range(dofs_per_node):
#print('j',j)
node.name += str(dof[i,j])
if dof[i,j] in bc_dict:
node.name += (': ' + str(bc_dict[dof[i,j]]))
if j == dofs_per_node-1:
node.name += ']'
else:
node.name += ', '
elif np.any(np.isin(fPrescr, dofs, assume_unique=True)) == True:
node.name = f"Node nr. {i+1}, DoFs & Forces: ["
for j in range(dofs_per_node):
node.name += str(dof[i,j])
if dof[i,j] in f_dict:
node.name += (': ' + str(f_dict[dof[i,j]]))
if j == dofs_per_node-1:
node.name += ']'
else:
node.name += ', '
else:
node.name = f"Node nr. {i+1}, DoFs: ["
for j in range(dofs_per_node):
node.name += str(dof[i,j])
if j == dofs_per_node-1:
node.name += ']'
else:
node.name += ', '
nodes.append(node)
return nodes
def vectors(
points,
vectors,
c="k",
alpha=1,
shaftWidth=0.05,
text=None, vmax=None, vmin=None, cmap='jet', values = None):
"""
Routine for creating vectors.
:param list points: Mid point for vector [number of vectors x 3]
:param list vectors: Vector components [number of vectors x 3]
:param string dof: Vector color
:param float alpha: Vector transparancy [0-1]
:param float shaftWidth: Vector width
:param list text: Vector values [number of vectors x 1]
:param float vmax: Maximum vector value for colormapping
:param float vmin: Minimum vector value for colormapping
:param string cmap: Vector colormap
:param list values: [number of vectors x 1]
:return list cylinders: Vector actors
"""
if isinstance(points, v.Points):
points = points.points()
else:
points = np.array(points)
vectors = np.array(vectors) / 2
spts = points - vectors
epts = points + vectors
npts = np.size(points,0)
cylinders = []
for i in range(npts):
cyl = v.Cylinder([spts[i],epts[i]],r=shaftWidth*0.01,res=4,c=c)
cyl.name = text[i]
cyl.cmap(cmap,input_array=values[i],vmin=vmin,vmax=vmax,on='cells')
cylinders.append(cyl)
'''
arrs2d = shapes.Arrows2D(
spts,
epts,
c=c,
shaftLength=shaftLength,
shaftWidth=shaftWidth,
headLength=headLength,
headWidth=headWidth,
fill=fill,
alpha=alpha,
)
'''
#arrs2d.pickable(False)
#arrs2d.name = "quiver"
return cylinders
def check_input(edof,coord,dof,element_type,a=None,values=None,nseg=None):
"""
Routine for checking input to draw_mesh, draw_displaced_mesh & animation.
:param array edof: Element topology [number of elements x degrees of freedom per element]
:param array coord: Nodal coordinates [number of nodes x 3]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param int element_type: Element type [1-6]
:param array a: Global displacement vector [degrees of freedom x 1]
:param array values: Scalar values [number of elements x 1 | number of elements x nodes per element | number of nodes x 1]
:param int nseg: Number of beam segments + 1
:return int number_of_elements: Number of elements
:return int number_of_degrees_of_freedom_per_element: Degrees of freesom per element
:return int number_of_coordinates: Number of coordinates
:return int number_of_dimensions: Number of dimensions for model [1-3]
:return int number_of_degrees_of_freedom: Number of degrees of freedom
:return int degrees_of_freedom_per_node: Degrees of freedom per node
:return int number_of_displacements: Number of displacements
:return string val: Types of scalar input ['el_values' / 'nodal_values_by_el' / 'nodal_values']
"""
if element_type == 1 or element_type == 2 or element_type == 5:
number_of_nodes_per_element = 2
elif element_type == 3 or element_type == 4:
number_of_nodes_per_element = 8
elif element_type == 6:
number_of_nodes_per_element = 4
number_of_elements = np.size(edof, axis=0)
number_of_degrees_of_freedom_per_element = np.size(edof, axis=1)
number_of_coordinates = np.size(coord, axis=0)
number_of_dimensions = np.size(coord, axis=1)
number_of_degrees_of_freedom = np.size(dof, axis=0)*np.size(dof, axis=1)
degrees_of_freedom_per_node = np.size(dof, axis=1)
if a is not None:
number_of_displacements = np.size(a, axis=0)
if values is not None:
#print(values)
if element_type == 1 or element_type == 2 or element_type == 5:
if element_type == 1 or element_type == 2:
nseg = 1
number_of_values = np.size(values, axis=0)
#print(np.size(values, axis=0))
#print(number_of_elements)
if number_of_values == number_of_elements*nseg:
val = 'el_values'
else:
print("Invalid number of element-/nodal values, please make sure values correspond to total number of elements or nodes")
sys.exit()
else:
number_of_values = np.size(values, axis=0)*np.size(values, axis=1)
if number_of_values == number_of_elements:
val = 'el_values'
elif number_of_values == number_of_elements*number_of_nodes_per_element:
val = 'nodal_values_by_el'
elif number_of_values == number_of_coordinates:
val = 'nodal_values'
else:
print("Invalid number of element-/nodal values, please make sure values correspond to total number of elements or nodes")
sys.exit()
# Implementera kontroll av dim. stämmer för draw_mesh/draw_displaced_mesh
if element_type == 1 or element_type == 2 or element_type == 5:
print(element_type)
elif element_type == 3 or element_type == 4:
print(element_type)
elif element_type == 6:
print(element_type)
if a is None and values is None:
print(1)
return number_of_elements, \
number_of_degrees_of_freedom_per_element, \
number_of_coordinates, \
number_of_dimensions, \
number_of_degrees_of_freedom, \
degrees_of_freedom_per_node,
elif a is None:
print(2)
return number_of_elements, \
number_of_degrees_of_freedom_per_element, \
number_of_coordinates, \
number_of_dimensions, \
number_of_degrees_of_freedom, \
degrees_of_freedom_per_node, \
val
elif values is None:
print(3)
return number_of_elements, \
number_of_degrees_of_freedom_per_element, \
number_of_coordinates, \
number_of_dimensions, \
number_of_degrees_of_freedom, \
degrees_of_freedom_per_node, \
number_of_displacements
else:
#print('test')
return number_of_elements, \
number_of_degrees_of_freedom_per_element, \
number_of_coordinates, \
number_of_dimensions, \
number_of_degrees_of_freedom, \
degrees_of_freedom_per_node, \
number_of_displacements, \
val
""" Från kopia 2
def ugrid_from_edof_ec(edof, ex, ey, ez, ed=None, dofs_per_node=3, ignore_first=True):
coords, topo, node_dofs, node_displ = convert_to_node_topo(edof, ex, ey, ez, ed, dofs_per_node, ignore_first)
npoint = coords.shape[0]
nel = topo.shape[0]
nnd = topo.shape[1]
if nnd == 4:
ct = vtk.VTK_TETRA
elif nnd == 8:
ct = vtk.VTK_HEXAHEDRON
else:
print("Topology not supported.")
celltypes = [ct] * nel
return UGrid([coords, topo, celltypes])
def convert_to_node_topo(edof, ex, ey, ez, ed=None, dofs_per_node=3, ignore_first=True):
Routine to convert dof based topology and element coordinates to node based
topology required for visualisation with VTK and other visualisation frameworks
:param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array ex: element x coordinates [nel x n_nodes]
:param array ey: element y coordinates [nel x n_nodes]
:param array ez: element z coordinates [nel x n_nodes]
:param array n_dofs_per_node: number of dofs per node. (default = 3)
:param boolean ignore_first: ignore first column of edof. (default = True)
:return array coords: Array of node coordinates. [n_nodes x 3]
:return array topo: Node topology. [nel x n_nodes]
:return array node_dofs: Dofs for each node. [n_nodes x n_dofs_per_node]
node_hash_coords = {}
node_hash_numbers = {}
node_hash_dofs = {}
node_hash_displ = {}
el_hash_dofs = []
nel, cols = edof.shape
if ignore_first:
tot_dofs = cols-1
else:
tot_dofs = cols
n_nodes = int(tot_dofs / dofs_per_node)
# print("n_dofs_per_node =", dofs_per_node)
# print("cols =", tot_dofs)
# print("nel =", nel)
# print("n_nodes =", n_nodes)
if ed is None:
ed = np.zeros((nel,n_nodes*dofs_per_node))
for elx, ely, elz, eed, dofs in zip(ex, ey, ez, ed, edof):
if ignore_first:
el_dofs = dofs[1:]
else:
el_dofs = dofs
# 0 1 2 3 4 5 6 7 8 9 12 11
el_dof = np.zeros((n_nodes, dofs_per_node), dtype=int)
el_hash_topo = []
for i in range(n_nodes):
el_dof[i] = el_dofs[ (i*dofs_per_node):((i+1)*dofs_per_node) ]
node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i], ely[i], elz[i]]
node_hash_numbers[hash(tuple(el_dof[i]))] = -1
node_hash_dofs[hash(tuple(el_dof[i]))] = el_dof[i]
displ_dofs = []
for j in range(dofs_per_node):
displ_dofs.append(eed[i*dofs_per_node+j])
node_hash_displ[hash(tuple(el_dof[i]))] = displ_dofs
#node_hash_displ[hash(tuple(el_dof[i]))] = [eed[i*dofs_per_node], eed[i*dofs_per_node+1], eed[i*dofs_per_node+2]]
el_hash_topo.append(hash(tuple(el_dof[i])))
el_hash_dofs.append(el_hash_topo)
coord_count = 0
coords = []
node_dofs = []
node_displ = []
for node_hash in node_hash_numbers.keys():
node_hash_numbers[node_hash] = coord_count
node_dofs.append(node_hash_dofs[node_hash])
node_displ.append(node_hash_displ[node_hash])
coord_count +=1
coords.append(node_hash_coords[node_hash])
topo = []
for el_hashes in el_hash_dofs:
el_hash_topo = []
for el_hash in el_hashes:
el_hash_topo.append(node_hash_numbers[el_hash])
topo.append(el_hash_topo)
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]]
# ]
# )
if ed is None:
return np.asarray(coords), np.asarray(topo), np.asarray(node_dofs)
else:
#print('test')
return np.asarray(coords), np.asarray(topo), np.asarray(node_dofs), np.asarray(node_displ)
"""
### Aktuella
def ugrid_from_edof_ec(edof, ex, ey, ez, ed=None, dofs_per_node=3, ignore_first=True):
"""
Routine for creating an unstructured grid based on element topology.
:param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array ex: element x coordinates [nel x n_nodes]
:param array ey: element y coordinates [nel x n_nodes]
:param array ez: element z coordinates [nel x n_nodes]
:param array ed: element displacements [nel x n_dofs_per_node*n_nodes]
:param array n_dofs_per_node: number of dofs per node. (default = 3)
:param boolean ignore_first: ignore first column of edof. (default = True)
:return object Ugrid: Unstructured grid
"""
coords, topo, node_dofs, node_displ = convert_to_node_topo(edof, ex, ey, ez, ed, dofs_per_node, ignore_first)
npoint = coords.shape[0]
nel = topo.shape[0]
nnd = topo.shape[1]
if nnd == 4:
ct = vtk.VTK_TETRA
elif nnd == 8:
ct = vtk.VTK_HEXAHEDRON
else:
print("Topology not supported.")
celltypes = [ct] * nel
return UGrid([coords, topo, celltypes])
def convert_to_node_topo(edof, ex, ey, ez, ed=None, es=None, dofs_per_node=3, ignore_first=True):
"""
Routine to convert dof based topology and element coordinates to node based
topology required for visualisation with VTK and other visualisation frameworks
:param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array ex: element x coordinates [nel x n_nodes]
:param array ey: element y coordinates [nel x n_nodes]
:param array ez: element z coordinates [nel x n_nodes]
:param array n_dofs_per_node: number of dofs per node. (default = 3)
:param boolean ignore_first: ignore first column of edof. (default = True)
:return array coords: Array of node coordinates. [n_nodes x 3]
:return array topo: Node topology. [nel x n_nodes]
:return array node_dofs: Dofs for each node. [n_nodes x n_dofs_per_node]
"""
node_hash_coords = {}
node_hash_numbers = {}
node_hash_dofs = {}
node_hash_displ = {}
node_hash_scalar = {}
node_hash_count = {}
el_hash_dofs = []
nel, cols = edof.shape
if ignore_first:
tot_dofs = cols-1
else:
tot_dofs = cols
n_nodes = int(tot_dofs / dofs_per_node)
# print("n_dofs_per_node =", dofs_per_node)
# print("cols =", tot_dofs)
# print("nel =", nel)
# print("n_nodes =", n_nodes)
if ed is None:
ed = np.zeros((nel,n_nodes*dofs_per_node))
if es is None:
es = np.zeros((nel,n_nodes))
for elx, ely, elz, eed, ees, dofs in zip(ex, ey, ez, ed, es, edof):
if ignore_first:
el_dofs = dofs[1:]
else:
el_dofs = dofs
# 0 1 2 3 4 5 6 7 8 9 12 11
el_dof = np.zeros((n_nodes, dofs_per_node), dtype=int)
el_hash_topo = []
for i in range(n_nodes):
el_dof[i] = el_dofs[ (i*dofs_per_node):((i+1)*dofs_per_node) ]
node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i], ely[i], elz[i]]
node_hash_numbers[hash(tuple(el_dof[i]))] = -1
node_hash_dofs[hash(tuple(el_dof[i]))] = el_dof[i]
if hash(tuple(el_dof[i])) in node_hash_scalar:
node_hash_scalar[hash(tuple(el_dof[i]))] += ees[i]
else:
node_hash_scalar[hash(tuple(el_dof[i]))] = ees[i]
if hash(tuple(el_dof[i])) in node_hash_count:
node_hash_count[hash(tuple(el_dof[i]))] += 1
else:
node_hash_count[hash(tuple(el_dof[i]))] = 1
displ_dofs = []
for j in range(dofs_per_node):
displ_dofs.append(eed[i*dofs_per_node+j])
node_hash_displ[hash(tuple(el_dof[i]))] = displ_dofs
el_hash_topo.append(hash(tuple(el_dof[i])))
el_hash_dofs.append(el_hash_topo)
coord_count = 0
coords = []
node_dofs = []
node_displ = []
node_scalars = []
for node_hash in node_hash_numbers.keys():
node_hash_numbers[node_hash] = coord_count
node_dofs.append(node_hash_dofs[node_hash])
node_displ.append(node_hash_displ[node_hash])
node_scalars.append(node_hash_scalar[node_hash]/node_hash_count[node_hash])
coord_count +=1
coords.append(node_hash_coords[node_hash])
topo = []
for el_hashes in el_hash_dofs:
el_hash_topo = []
for el_hash in el_hashes:
el_hash_topo.append(node_hash_numbers[el_hash])
topo.append(el_hash_topo)
return np.asarray(coords), np.asarray(topo), np.asarray(node_dofs), np.asarray(node_displ), np.asarray(node_scalars)
def convert_nodal_values(edof,topo,dof,values):
"""
Routine for converting nodal values from element to global and interpolating.
:param array edof: element topology by degrees of freedom [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array topo: element topology [nel x nodes per element]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param array values: Element scalar values [nel x nodes per element]
:return array nodal_value_array: Global scalar values at nodes
"""
nel = np.size(edof, axis=0)
nnode = np.size(dof, axis=0)
nodal_value_array = np.zeros((nnode,1))
print('Number of element values: ', np.size(values, axis=0))
print('Number of values per element: ', np.size(values, axis=1))
#edof_upd = edof-1
topo_num = {}
#edof_num = {}
for i in range(nel):
topo_num[i] = tuple(topo[i])
#print(topo_num[i])
#edof_num[i] = tuple(edof[i,[0,3,6,9,12,15,18,21]])
#edof_num[i] = tuple(edof_upd[i])
#print(edof_num[i])
#print('topo_num')
#for key,val in topo_num.items():
# print(key,'|',val)
test = {}
for el, nodes in topo_num.items():
it = 0
#[0,3,7,4],[1,2,6,5],[0,1,5,4],[2,3,7,6]
#print(topo[el])
#points = [0,1,2,3,5,4,7,6]
points = [0,1,2,3,4,5,6,7]
#points = [7,6,5,4,3,2,1,0]
#print(nodes)
#print(points)
for i in nodes:
#print(zip(nodes))
#for i in range(nodes):
#print(el,points[it],i,'|',topo[el,it])
test[tuple([el,points[it]])] = i
it += 1
#print('test')
#for key,val in test.items():
# print(key,'|',val)
test2 = {}
for i in range(nnode):
test2[i] = []
#print('test2')
#for key,val in test.items():
# print(key,'|',val)
#print(values)
for data, node in test.items():
# print(node)
test2[node].append(values[data])
#print(test2)
for i in range(nnode):
nodal_value_array[i] = np.mean(test2[i])
return nodal_value_array
"""
def convert_a(coord_old,coord_new,a,ndofs):
ncoord = np.size(coord_old, axis=0)
a_new = np.zeros((ncoord,ndofs))
coord_hash_old = {}
coord_hash_new = {}
for i in range(ncoord):
coord_hash_old[hash(tuple(coord_old[i]))] = i
coord_hash_new[hash(tuple(coord_new[i]))] = i
indexes = []
for node_hash in coord_hash_old.keys():
index = coord_hash_new[node_hash]
indexes.append(index)
node = 0
for index in zip(indexes):
if ndofs == 1:
a_new[index] = a[node]
elif ndofs == 3:
a_new[index,0] = a[node*3]
a_new[index,1] = a[node*3+1]
a_new[index,2] = a[node*3+2]
node += 1
# Returns disp. by node, i.e. a_new = [number of nodes x degrees of freedom per node]
return a_new
"""
def convert_el_values(edof,values):
"""
Routine for converting element values from element to global.
:param array edof: element topology by degrees of freedom [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array values: Element scalar values [nel x nodes per element]
:return array el_values: Global scalar values for element
"""
nel = np.size(edof, axis=0)
el_values = np.zeros((nel*6,1))
for i in range(nel):
el_values[i*6,:] = values[i]
el_values[i*6+1,:] = values[i]
el_values[i*6+2,:] = values[i]
el_values[i*6+3,:] = values[i]
el_values[i*6+4,:] = values[i]
el_values[i*6+5,:] = values[i]
"""
el_hash_old = {}
elem = {}
for i in range(nel):
el_hash_old[hash(tuple(edof[i]))] = i
elem[i] = i
indexes = []
for el in el_hash_old.values():
index = elem[el]
indexes.append(index)
el = 0
for index in zip(indexes):
i = index[0]*6
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
el += 1
"""
return el_values
# def convert_to_node_topo(edof, ex, ey, ez, n_dofs_per_node=3, ignore_first=False):
# """
# Written by: <NAME>
# Modified by: <NAME>
# Routine to convert dof based topology and element coordinates to node based
# topology required for visualisation with VTK and other visualisation frameworks
# :param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
# :param array ex: element x coordinates [nel x n_nodes]
# :param array ey: element y coordinates [nel x n_nodes]
# :param array ez: element z coordinates [nel x n_nodes]
# :param array a: global deformation [ndof]
# :param array n_dofs_per_node: number of dofs per node. (default = 3)
# :param boolean ignore_first: ignore first column of edof. (default = False)
# :return array coords: Array of node coordinates. [n_nodes x 3]
# :return array topo: Node topology. [nel x n_nodes]
# :return array node_dofs: Dofs for each node. [n_nodes x n_dofs_per_node]
# :return array a: global deformation [ndof] (reorderd according to )
# """
# node_hash_coords = {}
# node_hash_numbers = {}
# #a_hash_numbers = {}
# #node_hash_a = {}
# node_hash_dofs = {}
# el_hash_dofs = []
# nel, cols = edof.shape
# if ignore_first:
# tot_dofs = cols-1
# else:
# tot_dofs = cols
# n_nodes = int(tot_dofs / n_dofs_per_node)
# print("cols =", tot_dofs)
# print("nel =", nel)
# print("n_nodes =", n_nodes)
# #node_hash_a[hash(tuple(a))] = a
# #print(node_hash_a)
# #tot_nnodes = int(np.size(a, axis = 0)/3)
# #a_node = np.zeros((tot_nnodes, n_dofs_per_node))
# #print(np.size(a_node, axis = 0),np.size(a_node, axis = 1))
# #for i in range(tot_nnodes):
# # a_node[i,:] = [a[i*3], a[i*3+1], a[i*3+2]]
# #node_hash_a[hash(tuple(a_node[i]))] = a_node[i,:]
# #print(a_node)
# # Loopar igenom element
# for elx, ely, elz, dofs in zip(ex, ey, ez, edof):
# if ignore_first:
# el_dofs = dofs[1:]
# else:
# el_dofs = dofs
# # 0 1 2 3 4 5 6 7 8 9 12 11
# el_dof = np.zeros((n_nodes, n_dofs_per_node), dtype=int)
# #a_upd = np.zeros((n_nodes, n_dofs_per_node), dtype=int)
# el_hash_topo = []
# # Loopar igenom elementets noder
# for i in range(n_nodes):
# el_dof[i] = el_dofs[ (i*n_dofs_per_node):((i+1)*n_dofs_per_node) ]
# node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i], ely[i], elz[i]]
# #node_hash_a[hash(tuple(a_node[i]))] = a
# #node_hash_a[hash(tuple(el_dof[i]))] = a
# #node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i]+a[i*3], ely[i]+a[i*3+1], elz[i]+a[i*3+2]]
# #node_hash_a[hash(tuple(a_upd[i]))] = [ a[i*3], a[i*3+1], a[i*3+2] ]
# node_hash_numbers[hash(tuple(el_dof[i]))] = -1
# #a_hash_numbers[hash(tuple(el_dof[i]))] = -1
# node_hash_dofs[hash(tuple(el_dof[i]))] = el_dof[i]
# el_hash_topo.append(hash(tuple(el_dof[i])))
# el_hash_dofs.append(el_hash_topo)
# coord_count = 0
# """
# #for i in range(tot_nnodes):
# for node_hash in node_hash_numbers.keys():
# node_hash_numbers[node_hash] = coord_count
# #node_hash_numbers[node_hash] = coord_count
# #node[i] = el_dofs[ (i*n_dofs_per_node):((i+1)*n_dofs_per_node) ]
# a_node[i] = node_hash_numbers[node_hash]
# coord_count +=1
# node_hash_a[hash(tuple(node[i]))] = a[i]
# """
# #for i in range
# coord_count = 0
# coords = []
# node_dofs = []
# #a_new = []
# #print(node_hash_numbers.keys())
# #print(len(node_hash_a))
# #print(node_hash_a)
# #print(node_hash_coords)
# #a_node_new = []
# # Skapar global koordinatmartis baserat på hashes
# for node_hash in node_hash_numbers.keys():
# node_hash_numbers[node_hash] = coord_count
# #print(node_hash_numbers[node_hash])
# #node_hash_a[hash(tuple(a))] = a_upd
# node_dofs.append(node_hash_dofs[node_hash])
# coord_count +=1
# coords.append(node_hash_coords[node_hash])
# #a_node_new.append(node_hash_a[node_hash])
# #a_node_new.append(node_hash_coords[node_hash])
# #print(node_hash_numbers.keys())
# #print(node_hash_coords)
# #a_new.append(node_hash_a[node_hash])
# #a_new.append(hash(node_hash_a[node_hash]))
# #a_new.append(hash(tuple(node_hash_a[node_hash])))
# """
# a_count = 0
# for a_hash in a_hash_numbers.keys():
# a_hash_numbers[node_hash] = coord_count
# a_count +=1
# a_node_new.append(node_hash_a[a_hash])
# #a_node_new.append(node_hash_coords[node_hash])
# #print(node_hash_numbers.keys())
# #print(node_hash_coords)
# #a_new.append(node_hash_a[node_hash])
# #a_new.append(hash(node_hash_a[node_hash]))
# #a_new.append(hash(tuple(node_hash_a[node_hash])))
# """
# #for i in range(coord_count)
# # node_hash_a[hash(tuple(el_dof[i]))] = -1
# #for node_hash in node_hash_numbers.keys():
# # a_node.append()
# topo = []
# #a_el = []
# #print(el_hash_dofs)
# #print(node_hash_numbers)
# # Skapar global topologimartis baserat på hashes
# for el_hashes in el_hash_dofs:
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[4]],
# node_hash_numbers[el_hashes[5]],
# node_hash_numbers[el_hashes[6]],
# node_hash_numbers[el_hashes[7]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[3]],
# node_hash_numbers[el_hashes[7]],
# node_hash_numbers[el_hashes[4]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[6]],
# node_hash_numbers[el_hashes[5]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[5]],
# node_hash_numbers[el_hashes[4]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]],
# node_hash_numbers[el_hashes[7]],
# node_hash_numbers[el_hashes[6]]
# ])
# #a_el.append(a[node_hash_numbers[el_hashes[0]]])
# #a_el.append(a[node_hash_numbers[el_hashes[1]]])
# #a_el.append(a[node_hash_numbers[el_hashes[2]]])
# #a_el.append(a[node_hash_numbers[el_hashes[3]]])
# #a_el.append(a[node_hash_numbers[el_hashes[4]]])
# #a_el.append(a[node_hash_numbers[el_hashes[5]]])
# #a_el.append(a[node_hash_numbers[el_hashes[6]]])
# #a_el.append(a[node_hash_numbers[el_hashes[7]]])
# """
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]]
# ]
# )
# """
# #print(coords)
# """
# a = a.tolist()
# print(a)
# for i in range(len(coords)):
# coords[i][0] = coords[i][0] + a[i*3]
# coords[i][1] = coords[i][1] + a[i*3+1]
# coords[i][2] = coords[i][2] + a[i*3+2]
# """
# #mesh = v.Mesh([def_coord[coords,:],[[0,1,2,3],[4,5,6,7],[0,3,7,4],[1,2,6,5],[0,1,5,4],[2,3,7,6]]],alpha=alpha).lw(1)
# return coords, topo, node_dofs
"""
def ugrid_from_edof_ec(edof, ex, ey, ez, a, dofs_per_node=3, ignore_first=False):
coords, topo, node_dofs = convert_to_node_topo_upd(edof, ex, ey, ez, dofs_per_node, ignore_first)
npoint = coords.shape[0]
nel = topo.shape[0]
nnd = topo.shape[1]
for i in range(npoint):
#print([a[i*3],a[i*3+1],a[i*3+2]])
coords[i][0] = coords[i][0] + a[i*3]
coords[i][1] = coords[i][1] + a[i*3+1]
coords[i][2] = coords[i][2] + a[i*3+2]
if nnd == 4:
ct = vtk.VTK_TETRA
elif nnd == 8:
ct = vtk.VTK_HEXAHEDRON
else:
print("Topology not supported.")
celltypes = [ct] * nel
return v.UGrid([coords, topo, celltypes])
def convert_to_node_topo_upd(edof, ex, ey, ez, dofs_per_node=3, ignore_first=False):
Routine to convert dof based topology and element coordinates to node based
topology required for visualisation with VTK and other visualisation frameworks
:param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array ex: element x coordinates [nel x n_nodes]
:param array ey: element y coordinates [nel x n_nodes]
:param array ez: element z coordinates [nel x n_nodes]
:param array n_dofs_per_node: number of dofs per node. (default = 3)
:param boolean ignore_first: ignore first column of edof. (default = True)
:return array coords: Array of node coordinates. [n_nodes x 3]
:return array topo: Node topology. [nel x n_nodes]
:return array node_dofs: Dofs for each node. [n_nodes x n_dofs_per_node]
node_hash_coords = {}
node_hash_numbers = {}
node_hash_dofs = {}
el_hash_dofs = []
nel, cols = edof.shape
if ignore_first:
tot_dofs = cols-1
else:
tot_dofs = cols
n_nodes = int(tot_dofs / dofs_per_node)
print("n_dofs_per_node =", dofs_per_node)
print("cols =", tot_dofs)
print("nel =", nel)
print("n_nodes =", n_nodes)
for elx, ely, elz, dofs in zip(ex, ey, ez, edof):
if ignore_first:
el_dofs = dofs[1:]
else:
el_dofs = dofs
# 0 1 2 3 4 5 6 7 8 9 12 11
el_dof = np.zeros((n_nodes, dofs_per_node), dtype=int)
el_hash_topo = []
for i in range(n_nodes):
el_dof[i] = el_dofs[ (i*dofs_per_node):((i+1)*dofs_per_node) ]
node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i], ely[i], elz[i]]
node_hash_numbers[hash(tuple(el_dof[i]))] = -1
node_hash_dofs[hash(tuple(el_dof[i]))] = el_dof[i]
el_hash_topo.append(hash(tuple(el_dof[i])))
el_hash_dofs.append(el_hash_topo)
coord_count = 0
coords = []
node_dofs = []
for node_hash in node_hash_numbers.keys():
node_hash_numbers[node_hash] = coord_count
node_dofs.append(node_hash_dofs[node_hash])
coord_count +=1
coords.append(node_hash_coords[node_hash])
topo = []
for el_hashes in el_hash_dofs:
el_hash_topo = []
for el_hash in el_hashes:
el_hash_topo.append(node_hash_numbers[el_hash])
topo.append(el_hash_topo)
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]]
# ]
# )
return np.asarray(coords), np.asarray(topo), np.asarray(node_dofs)
"""
| [
"numpy.mean",
"vedo.Sphere",
"numpy.size",
"numpy.asarray",
"numpy.isin",
"numpy.any",
"numpy.array",
"numpy.split",
"numpy.zeros",
"vedo.Cylinder",
"sys.exit"
] | [((5300, 5322), 'numpy.size', 'np.size', (['coord'], {'axis': '(0)'}), '(coord, axis=0)\n', (5307, 5322), True, 'import numpy as np\n'), ((5338, 5360), 'numpy.size', 'np.size', (['coord'], {'axis': '(1)'}), '(coord, axis=1)\n', (5345, 5360), True, 'import numpy as np\n'), ((8386, 8404), 'numpy.size', 'np.size', (['points', '(0)'], {}), '(points, 0)\n', (8393, 8404), True, 'import numpy as np\n'), ((10611, 10632), 'numpy.size', 'np.size', (['edof'], {'axis': '(0)'}), '(edof, axis=0)\n', (10618, 10632), True, 'import numpy as np\n'), ((10680, 10701), 'numpy.size', 'np.size', (['edof'], {'axis': '(1)'}), '(edof, axis=1)\n', (10687, 10701), True, 'import numpy as np\n'), ((10731, 10753), 'numpy.size', 'np.size', (['coord'], {'axis': '(0)'}), '(coord, axis=0)\n', (10738, 10753), True, 'import numpy as np\n'), ((10781, 10803), 'numpy.size', 'np.size', (['coord'], {'axis': '(1)'}), '(coord, axis=1)\n', (10788, 10803), True, 'import numpy as np\n'), ((10916, 10936), 'numpy.size', 'np.size', (['dof'], {'axis': '(1)'}), '(dof, axis=1)\n', (10923, 10936), True, 'import numpy as np\n'), ((23175, 23196), 'numpy.size', 'np.size', (['edof'], {'axis': '(0)'}), '(edof, axis=0)\n', (23182, 23196), True, 'import numpy as np\n'), ((23209, 23229), 'numpy.size', 'np.size', (['dof'], {'axis': '(0)'}), '(dof, axis=0)\n', (23216, 23229), True, 'import numpy as np\n'), ((23254, 23274), 'numpy.zeros', 'np.zeros', (['(nnode, 1)'], {}), '((nnode, 1))\n', (23262, 23274), True, 'import numpy as np\n'), ((25970, 25991), 'numpy.size', 'np.size', (['edof'], {'axis': '(0)'}), '(edof, axis=0)\n', (25977, 25991), True, 'import numpy as np\n'), ((26008, 26030), 'numpy.zeros', 'np.zeros', (['(nel * 6, 1)'], {}), '((nel * 6, 1))\n', (26016, 26030), True, 'import numpy as np\n'), ((1018, 1039), 'numpy.split', 'np.split', (['edof_row', '(2)'], {}), '(edof_row, 2)\n', (1026, 1039), True, 'import numpy as np\n'), ((8264, 8280), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (8272, 8280), True, 'import numpy as np\n'), ((8295, 8312), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (8303, 8312), True, 'import numpy as np\n'), ((8464, 8527), 'vedo.Cylinder', 'v.Cylinder', (['[spts[i], epts[i]]'], {'r': '(shaftWidth * 0.01)', 'res': '(4)', 'c': 'c'}), '([spts[i], epts[i]], r=shaftWidth * 0.01, res=4, c=c)\n', (8474, 8527), True, 'import vedo as v\n'), ((10840, 10860), 'numpy.size', 'np.size', (['dof'], {'axis': '(0)'}), '(dof, axis=0)\n', (10847, 10860), True, 'import numpy as np\n'), ((10861, 10881), 'numpy.size', 'np.size', (['dof'], {'axis': '(1)'}), '(dof, axis=1)\n', (10868, 10881), True, 'import numpy as np\n'), ((10994, 11012), 'numpy.size', 'np.size', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (11001, 11012), True, 'import numpy as np\n'), ((20383, 20423), 'numpy.zeros', 'np.zeros', (['(nel, n_nodes * dofs_per_node)'], {}), '((nel, n_nodes * dofs_per_node))\n', (20391, 20423), True, 'import numpy as np\n'), ((20454, 20478), 'numpy.zeros', 'np.zeros', (['(nel, n_nodes)'], {}), '((nel, n_nodes))\n', (20462, 20478), True, 'import numpy as np\n'), ((20708, 20753), 'numpy.zeros', 'np.zeros', (['(n_nodes, dofs_per_node)'], {'dtype': 'int'}), '((n_nodes, dofs_per_node), dtype=int)\n', (20716, 20753), True, 'import numpy as np\n'), ((22478, 22496), 'numpy.asarray', 'np.asarray', (['coords'], {}), '(coords)\n', (22488, 22496), True, 'import numpy as np\n'), ((22498, 22514), 'numpy.asarray', 'np.asarray', (['topo'], {}), '(topo)\n', (22508, 22514), True, 'import numpy as np\n'), ((22516, 22537), 'numpy.asarray', 'np.asarray', (['node_dofs'], {}), '(node_dofs)\n', (22526, 22537), True, 'import numpy as np\n'), ((22539, 22561), 'numpy.asarray', 'np.asarray', (['node_displ'], {}), '(node_displ)\n', (22549, 22561), True, 'import numpy as np\n'), ((22563, 22587), 'numpy.asarray', 'np.asarray', (['node_scalars'], {}), '(node_scalars)\n', (22573, 22587), True, 'import numpy as np\n'), ((23314, 23337), 'numpy.size', 'np.size', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (23321, 23337), True, 'import numpy as np\n'), ((23383, 23406), 'numpy.size', 'np.size', (['values'], {'axis': '(1)'}), '(values, axis=1)\n', (23390, 23406), True, 'import numpy as np\n'), ((24704, 24721), 'numpy.mean', 'np.mean', (['test2[i]'], {}), '(test2[i])\n', (24711, 24721), True, 'import numpy as np\n'), ((1600, 1621), 'numpy.split', 'np.split', (['edof_row', '(8)'], {}), '(edof_row, 8)\n', (1608, 1621), True, 'import numpy as np\n'), ((2686, 2760), 'numpy.array', 'np.array', (['[coord1, coord2, coord3, coord4, coord5, coord6, coord7, coord8]'], {}), '([coord1, coord2, coord3, coord4, coord5, coord6, coord7, coord8])\n', (2694, 2760), True, 'import numpy as np\n'), ((11247, 11270), 'numpy.size', 'np.size', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (11254, 11270), True, 'import numpy as np\n'), ((2861, 2882), 'numpy.split', 'np.split', (['edof_row', '(4)'], {}), '(edof_row, 4)\n', (2869, 2882), True, 'import numpy as np\n'), ((3423, 3465), 'numpy.array', 'np.array', (['[coord1, coord2, coord3, coord4]'], {}), '([coord1, coord2, coord3, coord4])\n', (3431, 3465), True, 'import numpy as np\n'), ((5774, 5817), 'numpy.isin', 'np.isin', (['bcPrescr', 'dofs'], {'assume_unique': '(True)'}), '(bcPrescr, dofs, assume_unique=True)\n', (5781, 5817), True, 'import numpy as np\n'), ((6123, 6166), 'numpy.isin', 'np.isin', (['bcPrescr', 'dofs'], {'assume_unique': '(True)'}), '(bcPrescr, dofs, assume_unique=True)\n', (6130, 6166), True, 'import numpy as np\n'), ((11620, 11630), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11628, 11630), False, 'import sys\n'), ((11690, 11713), 'numpy.size', 'np.size', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (11697, 11713), True, 'import numpy as np\n'), ((11714, 11737), 'numpy.size', 'np.size', (['values'], {'axis': '(1)'}), '(values, axis=1)\n', (11721, 11737), True, 'import numpy as np\n'), ((5877, 5919), 'numpy.isin', 'np.isin', (['fPrescr', 'dofs'], {'assume_unique': '(True)'}), '(fPrescr, dofs, assume_unique=True)\n', (5884, 5919), True, 'import numpy as np\n'), ((6614, 6656), 'numpy.isin', 'np.isin', (['fPrescr', 'dofs'], {'assume_unique': '(True)'}), '(fPrescr, dofs, assume_unique=True)\n', (6621, 6656), True, 'import numpy as np\n'), ((2179, 2211), 'numpy.any', 'np.any', (['(edof_row1 == dof)'], {'axis': '(1)'}), '(edof_row1 == dof, axis=1)\n', (2185, 2211), True, 'import numpy as np\n'), ((2244, 2276), 'numpy.any', 'np.any', (['(edof_row2 == dof)'], {'axis': '(1)'}), '(edof_row2 == dof, axis=1)\n', (2250, 2276), True, 'import numpy as np\n'), ((2309, 2341), 'numpy.any', 'np.any', (['(edof_row3 == dof)'], {'axis': '(1)'}), '(edof_row3 == dof, axis=1)\n', (2315, 2341), True, 'import numpy as np\n'), ((2374, 2406), 'numpy.any', 'np.any', (['(edof_row4 == dof)'], {'axis': '(1)'}), '(edof_row4 == dof, axis=1)\n', (2380, 2406), True, 'import numpy as np\n'), ((2439, 2471), 'numpy.any', 'np.any', (['(edof_row5 == dof)'], {'axis': '(1)'}), '(edof_row5 == dof, axis=1)\n', (2445, 2471), True, 'import numpy as np\n'), ((2504, 2536), 'numpy.any', 'np.any', (['(edof_row6 == dof)'], {'axis': '(1)'}), '(edof_row6 == dof, axis=1)\n', (2510, 2536), True, 'import numpy as np\n'), ((2569, 2601), 'numpy.any', 'np.any', (['(edof_row7 == dof)'], {'axis': '(1)'}), '(edof_row7 == dof, axis=1)\n', (2575, 2601), True, 'import numpy as np\n'), ((2634, 2666), 'numpy.any', 'np.any', (['(edof_row8 == dof)'], {'axis': '(1)'}), '(edof_row8 == dof, axis=1)\n', (2640, 2666), True, 'import numpy as np\n'), ((12225, 12235), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12233, 12235), False, 'import sys\n'), ((3176, 3208), 'numpy.any', 'np.any', (['(edof_row1 == dof)'], {'axis': '(1)'}), '(edof_row1 == dof, axis=1)\n', (3182, 3208), True, 'import numpy as np\n'), ((3241, 3273), 'numpy.any', 'np.any', (['(edof_row2 == dof)'], {'axis': '(1)'}), '(edof_row2 == dof, axis=1)\n', (3247, 3273), True, 'import numpy as np\n'), ((3306, 3338), 'numpy.any', 'np.any', (['(edof_row3 == dof)'], {'axis': '(1)'}), '(edof_row3 == dof, axis=1)\n', (3312, 3338), True, 'import numpy as np\n'), ((3371, 3403), 'numpy.any', 'np.any', (['(edof_row4 == dof)'], {'axis': '(1)'}), '(edof_row4 == dof, axis=1)\n', (3377, 3403), True, 'import numpy as np\n'), ((6016, 6033), 'vedo.Sphere', 'v.Sphere', ([], {'c': 'color'}), '(c=color)\n', (6024, 6033), True, 'import vedo as v\n')] |
"""Train the full model.
python im_caption_full.py --multi_gpu --batch_size 512 --save_checkpoint_steps\
1000 --gen_lr 0.001 --dis_lr 0.001
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import sys
import numpy as np
import tensorflow as tf
import tensorflow.contrib.gan as tfgan
import tensorflow.contrib.slim as slim
from tensorflow.contrib.framework import nest
from tensorflow.contrib.gan.python.losses.python.losses_impl import modified_discriminator_loss
from tensorflow.contrib.gan.python.train import get_sequential_train_hooks
from config import TF_MODELS_PATH
from input_pipeline import input_fn
from misc_fn import crop_sentence
from misc_fn import get_len
from misc_fn import obj_rewards
from misc_fn import transform_grads_fn
from misc_fn import validate_batch_size_for_multi_gpu
from misc_fn import variable_summaries
sys.path.append(TF_MODELS_PATH + '/research/slim')
from nets import inception_v4
tf.logging.set_verbosity(tf.logging.INFO)
tf.flags.DEFINE_integer('intra_op_parallelism_threads', 0, 'Number of threads')
tf.flags.DEFINE_integer('inter_op_parallelism_threads', 0, 'Number of threads')
tf.flags.DEFINE_bool('multi_gpu', False, 'use multi gpus')
tf.flags.DEFINE_integer('emb_dim', 512, 'emb dim')
tf.flags.DEFINE_integer('mem_dim', 512, 'mem dim')
tf.flags.DEFINE_float('keep_prob', 0.8, 'keep prob')
tf.flags.DEFINE_string('job_dir', 'saving', 'job dir')
tf.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.flags.DEFINE_integer('max_steps', 1000000, 'maximum training steps')
tf.flags.DEFINE_float('gen_lr', 0.0001, 'learning rate')
tf.flags.DEFINE_float('dis_lr', 0.0001, 'learning rate')
tf.flags.DEFINE_integer('save_summary_steps', 100, 'save summary steps')
tf.flags.DEFINE_integer('save_checkpoint_steps', 5000, 'save ckpt')
tf.flags.DEFINE_integer('max_caption_length', 20, 'max len')
tf.flags.DEFINE_bool('wass', False, 'use wass')
tf.flags.DEFINE_bool('use_pool', False, 'use pool')
tf.flags.DEFINE_integer('pool_size', 512, 'pool size')
tf.flags.DEFINE_string('inc_ckpt', None, 'path to InceptionV4 checkpoint')
tf.flags.DEFINE_string('imcap_ckpt', None, 'initialization checkpoint')
tf.flags.DEFINE_string('sae_ckpt', None, 'initialization checkpoint')
tf.flags.DEFINE_float('w_obj', 10, 'object weight')
tf.flags.DEFINE_float('w_mse', 100, 'object weight')
FLAGS = tf.flags.FLAGS
def generator(inputs, is_training=True):
"""The sentence generator."""
embedding = tf.get_variable(
name='embedding',
shape=[FLAGS.vocab_size, FLAGS.emb_dim],
initializer=tf.random_uniform_initializer(-0.08, 0.08))
softmax_w = tf.matrix_transpose(embedding)
softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size])
inputs = inputs[0]
feat = slim.fully_connected(inputs, FLAGS.mem_dim, activation_fn=None)
feat = tf.nn.l2_normalize(feat, axis=1)
batch_size = tf.shape(feat)[0]
cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.mem_dim)
if is_training:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, FLAGS.keep_prob, FLAGS.keep_prob)
zero_state = cell.zero_state(batch_size, tf.float32)
sequence, logits, log_probs, rnn_outs = [], [], [], []
_, state = cell(feat, zero_state)
state_bl = state
tf.get_variable_scope().reuse_variables()
for t in range(FLAGS.max_caption_length):
if t == 0:
rnn_inp = tf.zeros([batch_size], tf.int32) + FLAGS.start_id
rnn_inp = tf.nn.embedding_lookup(embedding, rnn_inp)
rnn_out, state = cell(rnn_inp, state)
rnn_outs.append(rnn_out)
logit = tf.nn.bias_add(tf.matmul(rnn_out, softmax_w), softmax_b)
categorical = tf.contrib.distributions.Categorical(logits=logit)
fake = categorical.sample()
log_prob = categorical.log_prob(fake)
sequence.append(fake)
log_probs.append(log_prob)
logits.append(logit)
rnn_inp = fake
sequence = tf.stack(sequence, axis=1)
log_probs = tf.stack(log_probs, axis=1)
logits = tf.stack(logits, axis=1)
# Computes the baseline for self-critic.
baseline = []
state = state_bl
for t in range(FLAGS.max_caption_length):
if t == 0:
rnn_inp = tf.zeros([batch_size], tf.int32) + FLAGS.start_id
rnn_inp = tf.nn.embedding_lookup(embedding, rnn_inp)
rnn_out, state = cell(rnn_inp, state)
logit = tf.nn.bias_add(tf.matmul(rnn_out, softmax_w), softmax_b)
fake = tf.argmax(logit, axis=1, output_type=tf.int32)
baseline.append(fake)
rnn_inp = fake
baseline = tf.stack(baseline, axis=1)
return sequence, logits, log_probs, baseline, feat
def discriminator(generated_data, generator_inputs, is_training=True):
"""The discriminator."""
if type(generated_data) is tuple:
# When the sentences are generated, we need to compute their length.
sequence = generated_data[0]
length = get_len(sequence, FLAGS.end_id)
else:
# We already know the length of the sentences from the input pipeline.
sequence = generated_data
length = generator_inputs[-1]
embedding = tf.get_variable(
name='embedding',
shape=[FLAGS.vocab_size, FLAGS.emb_dim],
initializer=tf.random_uniform_initializer(-0.08, 0.08))
cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.mem_dim)
if is_training:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, FLAGS.keep_prob, FLAGS.keep_prob)
rnn_inputs = tf.nn.embedding_lookup(embedding, sequence)
rnn_out, state = tf.nn.dynamic_rnn(cell, rnn_inputs, length, dtype=tf.float32)
pred = slim.fully_connected(rnn_out, 1, activation_fn=None, scope='fc')
pred = tf.squeeze(pred, 2)
mask = tf.sequence_mask(length, tf.shape(sequence)[1])
idx = tf.transpose(tf.stack([tf.range(tf.shape(length)[0]), length - 1]))
state_h = tf.gather_nd(rnn_out, idx)
feat = slim.fully_connected(state_h, FLAGS.mem_dim, activation_fn=None,
scope='recon')
feat = tf.nn.l2_normalize(feat, axis=1)
return pred, mask, feat
def rl_loss(gan_model, gan_loss, classes, scores, num, add_summaries):
"""Reinforcement learning loss."""
eps = 1e-7
gamma = 0.9
sequence, _, log_probs, seq_bl, pca = gan_model.generated_data
with tf.variable_scope(gan_model.discriminator_scope, reuse=True):
baselines, _, feat_bl = discriminator((seq_bl, None, None, None, pca), None)
baselines, feat_bl = nest.map_structure(
tf.stop_gradient, (baselines, feat_bl))
logits, mask, feat = gan_model.discriminator_gen_outputs
dist = tf.reduce_mean(tf.squared_difference(pca, feat), axis=1,
keepdims=True) * FLAGS.w_mse
loss_mse = tf.reduce_mean(dist)
l_rewards = -dist
l_rewards = tf.tile(l_rewards, [1, sequence.shape[1]])
l_rewards = tf.where(mask, l_rewards, tf.zeros_like(l_rewards))
l_rewards_mat = l_rewards
l_rewards = tf.unstack(l_rewards, axis=1)
dis_predictions = tf.nn.sigmoid(logits)
d_rewards = tf.log(dis_predictions + eps)
o_rewards = obj_rewards(sequence, mask, classes, scores, num) * FLAGS.w_obj
rewards = d_rewards + o_rewards
rewards = tf.where(mask, rewards, tf.zeros_like(rewards))
l_bl = -tf.reduce_mean(tf.squared_difference(pca, feat_bl), axis=1,
keepdims=True) * FLAGS.w_mse
l_bl = tf.tile(l_bl, [1, seq_bl.shape[1]])
l_bl = tf.where(mask, l_bl, tf.zeros_like(l_bl))
l_bl = tf.unstack(l_bl, axis=1)
baselines = tf.nn.sigmoid(baselines)
baselines = tf.log(baselines + eps)
baselines += obj_rewards(seq_bl, mask, classes, scores, num) * FLAGS.w_obj
baselines = tf.where(mask, baselines, tf.zeros_like(baselines))
log_prob_list = tf.unstack(log_probs, axis=1)
rewards_list = tf.unstack(rewards, axis=1)
cumulative_rewards = []
baseline_list = tf.unstack(baselines, axis=1)
cumulative_baseline = []
for t in range(FLAGS.max_caption_length):
cum_value = l_rewards[t]
for s in range(t, FLAGS.max_caption_length):
cum_value += np.power(gamma, s - t) * rewards_list[s]
cumulative_rewards.append(cum_value)
cum_value = l_bl[t]
for s in range(t, FLAGS.max_caption_length):
cum_value += np.power(gamma, s - t) * baseline_list[s]
cumulative_baseline.append(cum_value)
c_rewards = tf.stack(cumulative_rewards, axis=1)
c_baseline = tf.stack(cumulative_baseline, axis=1)
advantages = []
final_gen_objective = []
for t in range(FLAGS.max_caption_length):
log_probability = log_prob_list[t]
cum_advantage = cumulative_rewards[t] - cumulative_baseline[t]
cum_advantage = tf.clip_by_value(cum_advantage, -5.0, 5.0)
advantages.append(cum_advantage)
final_gen_objective.append(
log_probability * tf.stop_gradient(cum_advantage))
final_gen_objective = tf.stack(final_gen_objective, axis=1)
final_gen_objective = tf.losses.compute_weighted_loss(final_gen_objective,
tf.to_float(mask))
final_gen_objective = -final_gen_objective
advantages = tf.stack(advantages, axis=1)
if add_summaries:
tf.summary.scalar('losses/mse', loss_mse)
tf.summary.scalar('losses/gen_obj', final_gen_objective)
with tf.name_scope('rewards'):
variable_summaries(c_rewards, mask, 'rewards')
with tf.name_scope('advantages'):
variable_summaries(advantages, mask, 'advantages')
with tf.name_scope('baselines'):
variable_summaries(c_baseline, mask, 'baselines')
with tf.name_scope('log_probs'):
variable_summaries(log_probs, mask, 'log_probs')
with tf.name_scope('d_rewards'):
variable_summaries(d_rewards, mask, 'd_rewards')
with tf.name_scope('l_rewards'):
variable_summaries(l_rewards_mat, mask, 'l_rewards')
with tf.name_scope('o_rewards'):
variable_summaries(o_rewards, mask, 'o_rewards')
o_rewards = tf.where(mask, o_rewards, tf.zeros_like(o_rewards))
minimum = tf.minimum(tf.reduce_min(o_rewards, axis=1, keepdims=True), 0.0)
o_rewards = tf.reduce_sum(
tf.to_float(tf.logical_and(o_rewards > minimum, mask)), axis=1)
o_rewards = tf.reduce_mean(o_rewards)
tf.summary.scalar('mean_found_obj', o_rewards)
return gan_loss._replace(generator_loss=final_gen_objective,
discriminator_loss=gan_loss.discriminator_loss + loss_mse)
def sentence_ae(gan_model, features, labels, add_summaries=True):
"""Sentence auto-encoder."""
with tf.variable_scope(gan_model.discriminator_scope, reuse=True):
feat = discriminator(features['key'], [None, features['lk']])[2]
with tf.variable_scope(gan_model.generator_scope, reuse=True):
embedding = tf.get_variable(
name='embedding',
shape=[FLAGS.vocab_size, FLAGS.emb_dim],
initializer=tf.random_uniform_initializer(-0.08, 0.08))
softmax_w = tf.matrix_transpose(embedding)
softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size])
sentence, ls = labels['sentence'], labels['len']
targets = sentence[:, 1:]
sentence = sentence[:, :-1]
ls -= 1
sentence = tf.nn.embedding_lookup(embedding, sentence)
batch_size = tf.shape(feat)[0]
cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.mem_dim)
cell = tf.nn.rnn_cell.DropoutWrapper(cell, FLAGS.keep_prob, FLAGS.keep_prob)
zero_state = cell.zero_state(batch_size, tf.float32)
_, state = cell(feat, zero_state)
tf.get_variable_scope().reuse_variables()
out, state = tf.nn.dynamic_rnn(cell, sentence, ls, state)
out = tf.reshape(out, [-1, FLAGS.mem_dim])
logits = tf.nn.bias_add(tf.matmul(out, softmax_w), softmax_b)
logits = tf.reshape(logits, [batch_size, -1, FLAGS.vocab_size])
mask = tf.sequence_mask(ls, tf.shape(sentence)[1])
targets = tf.boolean_mask(targets, mask)
logits = tf.boolean_mask(logits, mask)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
logits=logits)
loss = tf.reduce_mean(loss)
if add_summaries:
tf.summary.scalar('losses/sentence_ae', loss)
return loss
def model_fn(features, labels, mode, params):
"""The full unsupervised captioning model."""
is_chief = not tf.get_variable_scope().reuse
with slim.arg_scope(inception_v4.inception_v4_arg_scope()):
net, _ = inception_v4.inception_v4(features['im'], None, is_training=False)
net = tf.squeeze(net, [1, 2])
inc_saver = tf.train.Saver(tf.global_variables('InceptionV4'))
gan_model = tfgan.gan_model(
generator_fn=generator,
discriminator_fn=discriminator,
real_data=labels['sentence'][:, 1:],
generator_inputs=(net, labels['len'] - 1),
check_shapes=False)
if is_chief:
for variable in tf.trainable_variables():
tf.summary.histogram(variable.op.name, variable)
tf.summary.histogram('logits/gen_logits',
gan_model.discriminator_gen_outputs[0])
tf.summary.histogram('logits/real_logits',
gan_model.discriminator_real_outputs[0])
def gen_loss_fn(gan_model, add_summaries):
return 0
def dis_loss_fn(gan_model, add_summaries):
discriminator_real_outputs = gan_model.discriminator_real_outputs
discriminator_gen_outputs = gan_model.discriminator_gen_outputs
real_logits = tf.boolean_mask(discriminator_real_outputs[0],
discriminator_real_outputs[1])
gen_logits = tf.boolean_mask(discriminator_gen_outputs[0],
discriminator_gen_outputs[1])
return modified_discriminator_loss(real_logits, gen_logits,
add_summaries=add_summaries)
with tf.name_scope('losses'):
pool_fn = functools.partial(tfgan.features.tensor_pool,
pool_size=FLAGS.pool_size)
gan_loss = tfgan.gan_loss(
gan_model,
generator_loss_fn=gen_loss_fn,
discriminator_loss_fn=dis_loss_fn,
gradient_penalty_weight=10 if FLAGS.wass else 0,
tensor_pool_fn=pool_fn if FLAGS.use_pool else None,
add_summaries=is_chief)
if is_chief:
tfgan.eval.add_regularization_loss_summaries(gan_model)
gan_loss = rl_loss(gan_model, gan_loss, features['classes'],
features['scores'], features['num'],
add_summaries=is_chief)
sen_ae_loss = sentence_ae(gan_model, features, labels, is_chief)
loss = gan_loss.generator_loss + gan_loss.discriminator_loss + sen_ae_loss
gan_loss = gan_loss._replace(
generator_loss=gan_loss.generator_loss + sen_ae_loss)
with tf.name_scope('train'):
gen_opt = tf.train.AdamOptimizer(params.gen_lr, 0.5)
dis_opt = tf.train.AdamOptimizer(params.dis_lr, 0.5)
if params.multi_gpu:
gen_opt = tf.contrib.estimator.TowerOptimizer(gen_opt)
dis_opt = tf.contrib.estimator.TowerOptimizer(dis_opt)
train_ops = tfgan.gan_train_ops(
gan_model,
gan_loss,
generator_optimizer=gen_opt,
discriminator_optimizer=dis_opt,
transform_grads_fn=transform_grads_fn,
summarize_gradients=is_chief,
check_for_unused_update_ops=not FLAGS.use_pool,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
train_op = train_ops.global_step_inc_op
train_hooks = get_sequential_train_hooks()(train_ops)
# Summary the generated caption on the fly.
if is_chief:
with open('data/word_counts.txt', 'r') as f:
dic = list(f)
dic = [i.split()[0] for i in dic]
dic.append('<unk>')
dic = tf.convert_to_tensor(dic)
sentence = crop_sentence(gan_model.generated_data[0][0], FLAGS.end_id)
sentence = tf.gather(dic, sentence)
real = crop_sentence(gan_model.real_data[0], FLAGS.end_id)
real = tf.gather(dic, real)
train_hooks.append(
tf.train.LoggingTensorHook({'fake': sentence, 'real': real},
every_n_iter=100))
tf.summary.text('fake', sentence)
tf.summary.image('im', features['im'][None, 0])
gen_saver = tf.train.Saver(tf.trainable_variables('Generator'))
dis_var = []
dis_var.extend(tf.trainable_variables('Discriminator/rnn'))
dis_var.extend(tf.trainable_variables('Discriminator/embedding'))
dis_var.extend(tf.trainable_variables('Discriminator/fc'))
dis_saver = tf.train.Saver(dis_var)
def init_fn(scaffold, session):
inc_saver.restore(session, FLAGS.inc_ckpt)
if FLAGS.imcap_ckpt:
gen_saver.restore(session, FLAGS.imcap_ckpt)
if FLAGS.sae_ckpt:
dis_saver.restore(session, FLAGS.sae_ckpt)
scaffold = tf.train.Scaffold(init_fn=init_fn)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
scaffold=scaffold,
training_hooks=train_hooks)
def main(_):
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
if FLAGS.multi_gpu:
validate_batch_size_for_multi_gpu(FLAGS.batch_size)
model_function = tf.contrib.estimator.replicate_model_fn(
model_fn,
loss_reduction=tf.losses.Reduction.MEAN)
else:
model_function = model_fn
sess_config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
gpu_options=tf.GPUOptions(allow_growth=True))
run_config = tf.estimator.RunConfig(
session_config=sess_config,
save_checkpoints_steps=FLAGS.save_checkpoint_steps,
save_summary_steps=FLAGS.save_summary_steps,
keep_checkpoint_max=100)
train_input_fn = functools.partial(input_fn, batch_size=FLAGS.batch_size)
estimator = tf.estimator.Estimator(
model_fn=model_function,
model_dir=FLAGS.job_dir,
config=run_config,
params=FLAGS)
estimator.train(train_input_fn, max_steps=FLAGS.max_steps)
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.unstack",
"tensorflow.tile",
"nets.inception_v4.inception_v4",
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.boolean_mask",
"tensorflow.contrib.gan.gan_train_ops",
"tensorflow.contrib.gan.python.train.get_sequential_train_hooks",
"tensorflow.logging.set_verbosity",
"tensor... | [((933, 983), 'sys.path.append', 'sys.path.append', (["(TF_MODELS_PATH + '/research/slim')"], {}), "(TF_MODELS_PATH + '/research/slim')\n", (948, 983), False, 'import sys\n'), ((1015, 1056), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (1039, 1056), True, 'import tensorflow as tf\n'), ((1058, 1137), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""intra_op_parallelism_threads"""', '(0)', '"""Number of threads"""'], {}), "('intra_op_parallelism_threads', 0, 'Number of threads')\n", (1081, 1137), True, 'import tensorflow as tf\n'), ((1139, 1218), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""inter_op_parallelism_threads"""', '(0)', '"""Number of threads"""'], {}), "('inter_op_parallelism_threads', 0, 'Number of threads')\n", (1162, 1218), True, 'import tensorflow as tf\n'), ((1220, 1278), 'tensorflow.flags.DEFINE_bool', 'tf.flags.DEFINE_bool', (['"""multi_gpu"""', '(False)', '"""use multi gpus"""'], {}), "('multi_gpu', False, 'use multi gpus')\n", (1240, 1278), True, 'import tensorflow as tf\n'), ((1280, 1330), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""emb_dim"""', '(512)', '"""emb dim"""'], {}), "('emb_dim', 512, 'emb dim')\n", (1303, 1330), True, 'import tensorflow as tf\n'), ((1332, 1382), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""mem_dim"""', '(512)', '"""mem dim"""'], {}), "('mem_dim', 512, 'mem dim')\n", (1355, 1382), True, 'import tensorflow as tf\n'), ((1384, 1436), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""keep_prob"""', '(0.8)', '"""keep prob"""'], {}), "('keep_prob', 0.8, 'keep prob')\n", (1405, 1436), True, 'import tensorflow as tf\n'), ((1438, 1492), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""job_dir"""', '"""saving"""', '"""job dir"""'], {}), "('job_dir', 'saving', 'job dir')\n", (1460, 1492), True, 'import tensorflow as tf\n'), ((1494, 1549), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""batch size"""'], {}), "('batch_size', 64, 'batch size')\n", (1517, 1549), True, 'import tensorflow as tf\n'), ((1551, 1622), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""max_steps"""', '(1000000)', '"""maximum training steps"""'], {}), "('max_steps', 1000000, 'maximum training steps')\n", (1574, 1622), True, 'import tensorflow as tf\n'), ((1624, 1680), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""gen_lr"""', '(0.0001)', '"""learning rate"""'], {}), "('gen_lr', 0.0001, 'learning rate')\n", (1645, 1680), True, 'import tensorflow as tf\n'), ((1682, 1738), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""dis_lr"""', '(0.0001)', '"""learning rate"""'], {}), "('dis_lr', 0.0001, 'learning rate')\n", (1703, 1738), True, 'import tensorflow as tf\n'), ((1740, 1812), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""save_summary_steps"""', '(100)', '"""save summary steps"""'], {}), "('save_summary_steps', 100, 'save summary steps')\n", (1763, 1812), True, 'import tensorflow as tf\n'), ((1814, 1881), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""save_checkpoint_steps"""', '(5000)', '"""save ckpt"""'], {}), "('save_checkpoint_steps', 5000, 'save ckpt')\n", (1837, 1881), True, 'import tensorflow as tf\n'), ((1883, 1943), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""max_caption_length"""', '(20)', '"""max len"""'], {}), "('max_caption_length', 20, 'max len')\n", (1906, 1943), True, 'import tensorflow as tf\n'), ((1945, 1992), 'tensorflow.flags.DEFINE_bool', 'tf.flags.DEFINE_bool', (['"""wass"""', '(False)', '"""use wass"""'], {}), "('wass', False, 'use wass')\n", (1965, 1992), True, 'import tensorflow as tf\n'), ((1994, 2045), 'tensorflow.flags.DEFINE_bool', 'tf.flags.DEFINE_bool', (['"""use_pool"""', '(False)', '"""use pool"""'], {}), "('use_pool', False, 'use pool')\n", (2014, 2045), True, 'import tensorflow as tf\n'), ((2047, 2101), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""pool_size"""', '(512)', '"""pool size"""'], {}), "('pool_size', 512, 'pool size')\n", (2070, 2101), True, 'import tensorflow as tf\n'), ((2103, 2177), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""inc_ckpt"""', 'None', '"""path to InceptionV4 checkpoint"""'], {}), "('inc_ckpt', None, 'path to InceptionV4 checkpoint')\n", (2125, 2177), True, 'import tensorflow as tf\n'), ((2179, 2250), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""imcap_ckpt"""', 'None', '"""initialization checkpoint"""'], {}), "('imcap_ckpt', None, 'initialization checkpoint')\n", (2201, 2250), True, 'import tensorflow as tf\n'), ((2252, 2321), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""sae_ckpt"""', 'None', '"""initialization checkpoint"""'], {}), "('sae_ckpt', None, 'initialization checkpoint')\n", (2274, 2321), True, 'import tensorflow as tf\n'), ((2323, 2374), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""w_obj"""', '(10)', '"""object weight"""'], {}), "('w_obj', 10, 'object weight')\n", (2344, 2374), True, 'import tensorflow as tf\n'), ((2376, 2428), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""w_mse"""', '(100)', '"""object weight"""'], {}), "('w_mse', 100, 'object weight')\n", (2397, 2428), True, 'import tensorflow as tf\n'), ((2700, 2730), 'tensorflow.matrix_transpose', 'tf.matrix_transpose', (['embedding'], {}), '(embedding)\n', (2719, 2730), True, 'import tensorflow as tf\n'), ((2745, 2793), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_b"""', '[FLAGS.vocab_size]'], {}), "('softmax_b', [FLAGS.vocab_size])\n", (2760, 2793), True, 'import tensorflow as tf\n'), ((2825, 2888), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['inputs', 'FLAGS.mem_dim'], {'activation_fn': 'None'}), '(inputs, FLAGS.mem_dim, activation_fn=None)\n', (2845, 2888), True, 'import tensorflow.contrib.slim as slim\n'), ((2898, 2930), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['feat'], {'axis': '(1)'}), '(feat, axis=1)\n', (2916, 2930), True, 'import tensorflow as tf\n'), ((2974, 3017), 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', (['FLAGS.mem_dim'], {}), '(FLAGS.mem_dim)\n', (3002, 3017), True, 'import tensorflow as tf\n'), ((3909, 3935), 'tensorflow.stack', 'tf.stack', (['sequence'], {'axis': '(1)'}), '(sequence, axis=1)\n', (3917, 3935), True, 'import tensorflow as tf\n'), ((3950, 3977), 'tensorflow.stack', 'tf.stack', (['log_probs'], {'axis': '(1)'}), '(log_probs, axis=1)\n', (3958, 3977), True, 'import tensorflow as tf\n'), ((3989, 4013), 'tensorflow.stack', 'tf.stack', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (3997, 4013), True, 'import tensorflow as tf\n'), ((4502, 4528), 'tensorflow.stack', 'tf.stack', (['baseline'], {'axis': '(1)'}), '(baseline, axis=1)\n', (4510, 4528), True, 'import tensorflow as tf\n'), ((5184, 5227), 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', (['FLAGS.mem_dim'], {}), '(FLAGS.mem_dim)\n', (5212, 5227), True, 'import tensorflow as tf\n'), ((5343, 5386), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'sequence'], {}), '(embedding, sequence)\n', (5365, 5386), True, 'import tensorflow as tf\n'), ((5406, 5467), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'rnn_inputs', 'length'], {'dtype': 'tf.float32'}), '(cell, rnn_inputs, length, dtype=tf.float32)\n', (5423, 5467), True, 'import tensorflow as tf\n'), ((5477, 5541), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['rnn_out', '(1)'], {'activation_fn': 'None', 'scope': '"""fc"""'}), "(rnn_out, 1, activation_fn=None, scope='fc')\n", (5497, 5541), True, 'import tensorflow.contrib.slim as slim\n'), ((5551, 5570), 'tensorflow.squeeze', 'tf.squeeze', (['pred', '(2)'], {}), '(pred, 2)\n', (5561, 5570), True, 'import tensorflow as tf\n'), ((5717, 5743), 'tensorflow.gather_nd', 'tf.gather_nd', (['rnn_out', 'idx'], {}), '(rnn_out, idx)\n', (5729, 5743), True, 'import tensorflow as tf\n'), ((5753, 5832), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['state_h', 'FLAGS.mem_dim'], {'activation_fn': 'None', 'scope': '"""recon"""'}), "(state_h, FLAGS.mem_dim, activation_fn=None, scope='recon')\n", (5773, 5832), True, 'import tensorflow.contrib.slim as slim\n'), ((5872, 5904), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['feat'], {'axis': '(1)'}), '(feat, axis=1)\n', (5890, 5904), True, 'import tensorflow as tf\n'), ((6568, 6588), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dist'], {}), '(dist)\n', (6582, 6588), True, 'import tensorflow as tf\n'), ((6623, 6665), 'tensorflow.tile', 'tf.tile', (['l_rewards', '[1, sequence.shape[1]]'], {}), '(l_rewards, [1, sequence.shape[1]])\n', (6630, 6665), True, 'import tensorflow as tf\n'), ((6774, 6803), 'tensorflow.unstack', 'tf.unstack', (['l_rewards'], {'axis': '(1)'}), '(l_rewards, axis=1)\n', (6784, 6803), True, 'import tensorflow as tf\n'), ((6825, 6846), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), '(logits)\n', (6838, 6846), True, 'import tensorflow as tf\n'), ((6861, 6890), 'tensorflow.log', 'tf.log', (['(dis_predictions + eps)'], {}), '(dis_predictions + eps)\n', (6867, 6890), True, 'import tensorflow as tf\n'), ((7197, 7232), 'tensorflow.tile', 'tf.tile', (['l_bl', '[1, seq_bl.shape[1]]'], {}), '(l_bl, [1, seq_bl.shape[1]])\n', (7204, 7232), True, 'import tensorflow as tf\n'), ((7293, 7317), 'tensorflow.unstack', 'tf.unstack', (['l_bl'], {'axis': '(1)'}), '(l_bl, axis=1)\n', (7303, 7317), True, 'import tensorflow as tf\n'), ((7332, 7356), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['baselines'], {}), '(baselines)\n', (7345, 7356), True, 'import tensorflow as tf\n'), ((7371, 7394), 'tensorflow.log', 'tf.log', (['(baselines + eps)'], {}), '(baselines + eps)\n', (7377, 7394), True, 'import tensorflow as tf\n'), ((7557, 7586), 'tensorflow.unstack', 'tf.unstack', (['log_probs'], {'axis': '(1)'}), '(log_probs, axis=1)\n', (7567, 7586), True, 'import tensorflow as tf\n'), ((7604, 7631), 'tensorflow.unstack', 'tf.unstack', (['rewards'], {'axis': '(1)'}), '(rewards, axis=1)\n', (7614, 7631), True, 'import tensorflow as tf\n'), ((7676, 7705), 'tensorflow.unstack', 'tf.unstack', (['baselines'], {'axis': '(1)'}), '(baselines, axis=1)\n', (7686, 7705), True, 'import tensorflow as tf\n'), ((8147, 8183), 'tensorflow.stack', 'tf.stack', (['cumulative_rewards'], {'axis': '(1)'}), '(cumulative_rewards, axis=1)\n', (8155, 8183), True, 'import tensorflow as tf\n'), ((8199, 8236), 'tensorflow.stack', 'tf.stack', (['cumulative_baseline'], {'axis': '(1)'}), '(cumulative_baseline, axis=1)\n', (8207, 8236), True, 'import tensorflow as tf\n'), ((8646, 8683), 'tensorflow.stack', 'tf.stack', (['final_gen_objective'], {'axis': '(1)'}), '(final_gen_objective, axis=1)\n', (8654, 8683), True, 'import tensorflow as tf\n'), ((8896, 8924), 'tensorflow.stack', 'tf.stack', (['advantages'], {'axis': '(1)'}), '(advantages, axis=1)\n', (8904, 8924), True, 'import tensorflow as tf\n'), ((11599, 11629), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['targets', 'mask'], {}), '(targets, mask)\n', (11614, 11629), True, 'import tensorflow as tf\n'), ((11641, 11670), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['logits', 'mask'], {}), '(logits, mask)\n', (11656, 11670), True, 'import tensorflow as tf\n'), ((11680, 11757), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'targets', 'logits': 'logits'}), '(labels=targets, logits=logits)\n', (11726, 11757), True, 'import tensorflow as tf\n'), ((11823, 11843), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (11837, 11843), True, 'import tensorflow as tf\n'), ((12222, 12245), 'tensorflow.squeeze', 'tf.squeeze', (['net', '[1, 2]'], {}), '(net, [1, 2])\n', (12232, 12245), True, 'import tensorflow as tf\n'), ((12326, 12506), 'tensorflow.contrib.gan.gan_model', 'tfgan.gan_model', ([], {'generator_fn': 'generator', 'discriminator_fn': 'discriminator', 'real_data': "labels['sentence'][:, 1:]", 'generator_inputs': "(net, labels['len'] - 1)", 'check_shapes': '(False)'}), "(generator_fn=generator, discriminator_fn=discriminator,\n real_data=labels['sentence'][:, 1:], generator_inputs=(net, labels[\n 'len'] - 1), check_shapes=False)\n", (12341, 12506), True, 'import tensorflow.contrib.gan as tfgan\n'), ((16103, 16126), 'tensorflow.train.Saver', 'tf.train.Saver', (['dis_var'], {}), '(dis_var)\n', (16117, 16126), True, 'import tensorflow as tf\n'), ((16371, 16405), 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {'init_fn': 'init_fn'}), '(init_fn=init_fn)\n', (16388, 16405), True, 'import tensorflow as tf\n'), ((16416, 16534), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'train_op': 'train_op', 'scaffold': 'scaffold', 'training_hooks': 'train_hooks'}), '(mode=mode, loss=loss, train_op=train_op,\n scaffold=scaffold, training_hooks=train_hooks)\n', (16442, 16534), True, 'import tensorflow as tf\n'), ((17127, 17309), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'session_config': 'sess_config', 'save_checkpoints_steps': 'FLAGS.save_checkpoint_steps', 'save_summary_steps': 'FLAGS.save_summary_steps', 'keep_checkpoint_max': '(100)'}), '(session_config=sess_config, save_checkpoints_steps=\n FLAGS.save_checkpoint_steps, save_summary_steps=FLAGS.\n save_summary_steps, keep_checkpoint_max=100)\n', (17149, 17309), True, 'import tensorflow as tf\n'), ((17337, 17393), 'functools.partial', 'functools.partial', (['input_fn'], {'batch_size': 'FLAGS.batch_size'}), '(input_fn, batch_size=FLAGS.batch_size)\n', (17354, 17393), False, 'import functools\n'), ((17409, 17518), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'model_function', 'model_dir': 'FLAGS.job_dir', 'config': 'run_config', 'params': 'FLAGS'}), '(model_fn=model_function, model_dir=FLAGS.job_dir,\n config=run_config, params=FLAGS)\n', (17431, 17518), True, 'import tensorflow as tf\n'), ((17625, 17637), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (17635, 17637), True, 'import tensorflow as tf\n'), ((2947, 2961), 'tensorflow.shape', 'tf.shape', (['feat'], {}), '(feat)\n', (2955, 2961), True, 'import tensorflow as tf\n'), ((3047, 3116), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['cell', 'FLAGS.keep_prob', 'FLAGS.keep_prob'], {}), '(cell, FLAGS.keep_prob, FLAGS.keep_prob)\n', (3076, 3116), True, 'import tensorflow as tf\n'), ((3469, 3511), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'rnn_inp'], {}), '(embedding, rnn_inp)\n', (3491, 3511), True, 'import tensorflow as tf\n'), ((3670, 3720), 'tensorflow.contrib.distributions.Categorical', 'tf.contrib.distributions.Categorical', ([], {'logits': 'logit'}), '(logits=logit)\n', (3706, 3720), True, 'import tensorflow as tf\n'), ((4232, 4274), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'rnn_inp'], {}), '(embedding, rnn_inp)\n', (4254, 4274), True, 'import tensorflow as tf\n'), ((4397, 4443), 'tensorflow.argmax', 'tf.argmax', (['logit'], {'axis': '(1)', 'output_type': 'tf.int32'}), '(logit, axis=1, output_type=tf.int32)\n', (4406, 4443), True, 'import tensorflow as tf\n'), ((4838, 4869), 'misc_fn.get_len', 'get_len', (['sequence', 'FLAGS.end_id'], {}), '(sequence, FLAGS.end_id)\n', (4845, 4869), False, 'from misc_fn import get_len\n'), ((5257, 5326), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['cell', 'FLAGS.keep_prob', 'FLAGS.keep_prob'], {}), '(cell, FLAGS.keep_prob, FLAGS.keep_prob)\n', (5286, 5326), True, 'import tensorflow as tf\n'), ((6141, 6201), 'tensorflow.variable_scope', 'tf.variable_scope', (['gan_model.discriminator_scope'], {'reuse': '(True)'}), '(gan_model.discriminator_scope, reuse=True)\n', (6158, 6201), True, 'import tensorflow as tf\n'), ((6309, 6367), 'tensorflow.contrib.framework.nest.map_structure', 'nest.map_structure', (['tf.stop_gradient', '(baselines, feat_bl)'], {}), '(tf.stop_gradient, (baselines, feat_bl))\n', (6327, 6367), False, 'from tensorflow.contrib.framework import nest\n'), ((6706, 6730), 'tensorflow.zeros_like', 'tf.zeros_like', (['l_rewards'], {}), '(l_rewards)\n', (6719, 6730), True, 'import tensorflow as tf\n'), ((6905, 6954), 'misc_fn.obj_rewards', 'obj_rewards', (['sequence', 'mask', 'classes', 'scores', 'num'], {}), '(sequence, mask, classes, scores, num)\n', (6916, 6954), False, 'from misc_fn import obj_rewards\n'), ((7039, 7061), 'tensorflow.zeros_like', 'tf.zeros_like', (['rewards'], {}), '(rewards)\n', (7052, 7061), True, 'import tensorflow as tf\n'), ((7263, 7282), 'tensorflow.zeros_like', 'tf.zeros_like', (['l_bl'], {}), '(l_bl)\n', (7276, 7282), True, 'import tensorflow as tf\n'), ((7410, 7457), 'misc_fn.obj_rewards', 'obj_rewards', (['seq_bl', 'mask', 'classes', 'scores', 'num'], {}), '(seq_bl, mask, classes, scores, num)\n', (7421, 7457), False, 'from misc_fn import obj_rewards\n'), ((7512, 7536), 'tensorflow.zeros_like', 'tf.zeros_like', (['baselines'], {}), '(baselines)\n', (7525, 7536), True, 'import tensorflow as tf\n'), ((8453, 8495), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['cum_advantage', '(-5.0)', '(5.0)'], {}), '(cum_advantage, -5.0, 5.0)\n', (8469, 8495), True, 'import tensorflow as tf\n'), ((8817, 8834), 'tensorflow.to_float', 'tf.to_float', (['mask'], {}), '(mask)\n', (8828, 8834), True, 'import tensorflow as tf\n'), ((8950, 8991), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""losses/mse"""', 'loss_mse'], {}), "('losses/mse', loss_mse)\n", (8967, 8991), True, 'import tensorflow as tf\n'), ((8996, 9052), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""losses/gen_obj"""', 'final_gen_objective'], {}), "('losses/gen_obj', final_gen_objective)\n", (9013, 9052), True, 'import tensorflow as tf\n'), ((10316, 10376), 'tensorflow.variable_scope', 'tf.variable_scope', (['gan_model.discriminator_scope'], {'reuse': '(True)'}), '(gan_model.discriminator_scope, reuse=True)\n', (10333, 10376), True, 'import tensorflow as tf\n'), ((10454, 10510), 'tensorflow.variable_scope', 'tf.variable_scope', (['gan_model.generator_scope'], {'reuse': '(True)'}), '(gan_model.generator_scope, reuse=True)\n', (10471, 10510), True, 'import tensorflow as tf\n'), ((10694, 10724), 'tensorflow.matrix_transpose', 'tf.matrix_transpose', (['embedding'], {}), '(embedding)\n', (10713, 10724), True, 'import tensorflow as tf\n'), ((10741, 10789), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_b"""', '[FLAGS.vocab_size]'], {}), "('softmax_b', [FLAGS.vocab_size])\n", (10756, 10789), True, 'import tensorflow as tf\n'), ((10933, 10976), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'sentence'], {}), '(embedding, sentence)\n', (10955, 10976), True, 'import tensorflow as tf\n'), ((11024, 11067), 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', (['FLAGS.mem_dim'], {}), '(FLAGS.mem_dim)\n', (11052, 11067), True, 'import tensorflow as tf\n'), ((11079, 11148), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['cell', 'FLAGS.keep_prob', 'FLAGS.keep_prob'], {}), '(cell, FLAGS.keep_prob, FLAGS.keep_prob)\n', (11108, 11148), True, 'import tensorflow as tf\n'), ((11307, 11351), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'sentence', 'ls', 'state'], {}), '(cell, sentence, ls, state)\n', (11324, 11351), True, 'import tensorflow as tf\n'), ((11362, 11398), 'tensorflow.reshape', 'tf.reshape', (['out', '[-1, FLAGS.mem_dim]'], {}), '(out, [-1, FLAGS.mem_dim])\n', (11372, 11398), True, 'import tensorflow as tf\n'), ((11478, 11532), 'tensorflow.reshape', 'tf.reshape', (['logits', '[batch_size, -1, FLAGS.vocab_size]'], {}), '(logits, [batch_size, -1, FLAGS.vocab_size])\n', (11488, 11532), True, 'import tensorflow as tf\n'), ((11868, 11913), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""losses/sentence_ae"""', 'loss'], {}), "('losses/sentence_ae', loss)\n", (11885, 11913), True, 'import tensorflow as tf\n'), ((12147, 12213), 'nets.inception_v4.inception_v4', 'inception_v4.inception_v4', (["features['im']", 'None'], {'is_training': '(False)'}), "(features['im'], None, is_training=False)\n", (12172, 12213), False, 'from nets import inception_v4\n'), ((12275, 12309), 'tensorflow.global_variables', 'tf.global_variables', (['"""InceptionV4"""'], {}), "('InceptionV4')\n", (12294, 12309), True, 'import tensorflow as tf\n'), ((12555, 12579), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (12577, 12579), True, 'import tensorflow as tf\n'), ((12640, 12726), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""logits/gen_logits"""', 'gan_model.discriminator_gen_outputs[0]'], {}), "('logits/gen_logits', gan_model.\n discriminator_gen_outputs[0])\n", (12660, 12726), True, 'import tensorflow as tf\n'), ((12751, 12839), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""logits/real_logits"""', 'gan_model.discriminator_real_outputs[0]'], {}), "('logits/real_logits', gan_model.\n discriminator_real_outputs[0])\n", (12771, 12839), True, 'import tensorflow as tf\n'), ((13121, 13198), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['discriminator_real_outputs[0]', 'discriminator_real_outputs[1]'], {}), '(discriminator_real_outputs[0], discriminator_real_outputs[1])\n', (13136, 13198), True, 'import tensorflow as tf\n'), ((13250, 13325), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['discriminator_gen_outputs[0]', 'discriminator_gen_outputs[1]'], {}), '(discriminator_gen_outputs[0], discriminator_gen_outputs[1])\n', (13265, 13325), True, 'import tensorflow as tf\n'), ((13370, 13456), 'tensorflow.contrib.gan.python.losses.python.losses_impl.modified_discriminator_loss', 'modified_discriminator_loss', (['real_logits', 'gen_logits'], {'add_summaries': 'add_summaries'}), '(real_logits, gen_logits, add_summaries=\n add_summaries)\n', (13397, 13456), False, 'from tensorflow.contrib.gan.python.losses.python.losses_impl import modified_discriminator_loss\n'), ((13499, 13522), 'tensorflow.name_scope', 'tf.name_scope', (['"""losses"""'], {}), "('losses')\n", (13512, 13522), True, 'import tensorflow as tf\n'), ((13538, 13610), 'functools.partial', 'functools.partial', (['tfgan.features.tensor_pool'], {'pool_size': 'FLAGS.pool_size'}), '(tfgan.features.tensor_pool, pool_size=FLAGS.pool_size)\n', (13555, 13610), False, 'import functools\n'), ((13658, 13887), 'tensorflow.contrib.gan.gan_loss', 'tfgan.gan_loss', (['gan_model'], {'generator_loss_fn': 'gen_loss_fn', 'discriminator_loss_fn': 'dis_loss_fn', 'gradient_penalty_weight': '(10 if FLAGS.wass else 0)', 'tensor_pool_fn': '(pool_fn if FLAGS.use_pool else None)', 'add_summaries': 'is_chief'}), '(gan_model, generator_loss_fn=gen_loss_fn,\n discriminator_loss_fn=dis_loss_fn, gradient_penalty_weight=10 if FLAGS.\n wass else 0, tensor_pool_fn=pool_fn if FLAGS.use_pool else None,\n add_summaries=is_chief)\n', (13672, 13887), True, 'import tensorflow.contrib.gan as tfgan\n'), ((14399, 14421), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (14412, 14421), True, 'import tensorflow as tf\n'), ((14437, 14479), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['params.gen_lr', '(0.5)'], {}), '(params.gen_lr, 0.5)\n', (14459, 14479), True, 'import tensorflow as tf\n'), ((14494, 14536), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['params.dis_lr', '(0.5)'], {}), '(params.dis_lr, 0.5)\n', (14516, 14536), True, 'import tensorflow as tf\n'), ((14700, 15004), 'tensorflow.contrib.gan.gan_train_ops', 'tfgan.gan_train_ops', (['gan_model', 'gan_loss'], {'generator_optimizer': 'gen_opt', 'discriminator_optimizer': 'dis_opt', 'transform_grads_fn': 'transform_grads_fn', 'summarize_gradients': 'is_chief', 'check_for_unused_update_ops': '(not FLAGS.use_pool)', 'aggregation_method': 'tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N'}), '(gan_model, gan_loss, generator_optimizer=gen_opt,\n discriminator_optimizer=dis_opt, transform_grads_fn=transform_grads_fn,\n summarize_gradients=is_chief, check_for_unused_update_ops=not FLAGS.\n use_pool, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N\n )\n', (14719, 15004), True, 'import tensorflow.contrib.gan as tfgan\n'), ((15388, 15447), 'misc_fn.crop_sentence', 'crop_sentence', (['gan_model.generated_data[0][0]', 'FLAGS.end_id'], {}), '(gan_model.generated_data[0][0], FLAGS.end_id)\n', (15401, 15447), False, 'from misc_fn import crop_sentence\n'), ((15463, 15487), 'tensorflow.gather', 'tf.gather', (['dic', 'sentence'], {}), '(dic, sentence)\n', (15472, 15487), True, 'import tensorflow as tf\n'), ((15499, 15550), 'misc_fn.crop_sentence', 'crop_sentence', (['gan_model.real_data[0]', 'FLAGS.end_id'], {}), '(gan_model.real_data[0], FLAGS.end_id)\n', (15512, 15550), False, 'from misc_fn import crop_sentence\n'), ((15562, 15582), 'tensorflow.gather', 'tf.gather', (['dic', 'real'], {}), '(dic, real)\n', (15571, 15582), True, 'import tensorflow as tf\n'), ((15730, 15763), 'tensorflow.summary.text', 'tf.summary.text', (['"""fake"""', 'sentence'], {}), "('fake', sentence)\n", (15745, 15763), True, 'import tensorflow as tf\n'), ((15768, 15815), 'tensorflow.summary.image', 'tf.summary.image', (['"""im"""', "features['im'][None, 0]"], {}), "('im', features['im'][None, 0])\n", (15784, 15815), True, 'import tensorflow as tf\n'), ((15846, 15881), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""Generator"""'], {}), "('Generator')\n", (15868, 15881), True, 'import tensorflow as tf\n'), ((15915, 15958), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""Discriminator/rnn"""'], {}), "('Discriminator/rnn')\n", (15937, 15958), True, 'import tensorflow as tf\n'), ((15977, 16026), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""Discriminator/embedding"""'], {}), "('Discriminator/embedding')\n", (15999, 16026), True, 'import tensorflow as tf\n'), ((16045, 16087), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""Discriminator/fc"""'], {}), "('Discriminator/fc')\n", (16067, 16087), True, 'import tensorflow as tf\n'), ((16644, 16695), 'misc_fn.validate_batch_size_for_multi_gpu', 'validate_batch_size_for_multi_gpu', (['FLAGS.batch_size'], {}), '(FLAGS.batch_size)\n', (16677, 16695), False, 'from misc_fn import validate_batch_size_for_multi_gpu\n'), ((16717, 16812), 'tensorflow.contrib.estimator.replicate_model_fn', 'tf.contrib.estimator.replicate_model_fn', (['model_fn'], {'loss_reduction': 'tf.losses.Reduction.MEAN'}), '(model_fn, loss_reduction=tf.losses.\n Reduction.MEAN)\n', (16756, 16812), True, 'import tensorflow as tf\n'), ((2642, 2684), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.08)', '(0.08)'], {}), '(-0.08, 0.08)\n', (2671, 2684), True, 'import tensorflow as tf\n'), ((3288, 3311), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (3309, 3311), True, 'import tensorflow as tf\n'), ((3610, 3639), 'tensorflow.matmul', 'tf.matmul', (['rnn_out', 'softmax_w'], {}), '(rnn_out, softmax_w)\n', (3619, 3639), True, 'import tensorflow as tf\n'), ((4344, 4373), 'tensorflow.matmul', 'tf.matmul', (['rnn_out', 'softmax_w'], {}), '(rnn_out, softmax_w)\n', (4353, 4373), True, 'import tensorflow as tf\n'), ((5131, 5173), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.08)', '(0.08)'], {}), '(-0.08, 0.08)\n', (5160, 5173), True, 'import tensorflow as tf\n'), ((5605, 5623), 'tensorflow.shape', 'tf.shape', (['sequence'], {}), '(sequence)\n', (5613, 5623), True, 'import tensorflow as tf\n'), ((6460, 6492), 'tensorflow.squared_difference', 'tf.squared_difference', (['pca', 'feat'], {}), '(pca, feat)\n', (6481, 6492), True, 'import tensorflow as tf\n'), ((9062, 9086), 'tensorflow.name_scope', 'tf.name_scope', (['"""rewards"""'], {}), "('rewards')\n", (9075, 9086), True, 'import tensorflow as tf\n'), ((9094, 9140), 'misc_fn.variable_summaries', 'variable_summaries', (['c_rewards', 'mask', '"""rewards"""'], {}), "(c_rewards, mask, 'rewards')\n", (9112, 9140), False, 'from misc_fn import variable_summaries\n'), ((9151, 9178), 'tensorflow.name_scope', 'tf.name_scope', (['"""advantages"""'], {}), "('advantages')\n", (9164, 9178), True, 'import tensorflow as tf\n'), ((9186, 9236), 'misc_fn.variable_summaries', 'variable_summaries', (['advantages', 'mask', '"""advantages"""'], {}), "(advantages, mask, 'advantages')\n", (9204, 9236), False, 'from misc_fn import variable_summaries\n'), ((9247, 9273), 'tensorflow.name_scope', 'tf.name_scope', (['"""baselines"""'], {}), "('baselines')\n", (9260, 9273), True, 'import tensorflow as tf\n'), ((9281, 9330), 'misc_fn.variable_summaries', 'variable_summaries', (['c_baseline', 'mask', '"""baselines"""'], {}), "(c_baseline, mask, 'baselines')\n", (9299, 9330), False, 'from misc_fn import variable_summaries\n'), ((9341, 9367), 'tensorflow.name_scope', 'tf.name_scope', (['"""log_probs"""'], {}), "('log_probs')\n", (9354, 9367), True, 'import tensorflow as tf\n'), ((9375, 9423), 'misc_fn.variable_summaries', 'variable_summaries', (['log_probs', 'mask', '"""log_probs"""'], {}), "(log_probs, mask, 'log_probs')\n", (9393, 9423), False, 'from misc_fn import variable_summaries\n'), ((9434, 9460), 'tensorflow.name_scope', 'tf.name_scope', (['"""d_rewards"""'], {}), "('d_rewards')\n", (9447, 9460), True, 'import tensorflow as tf\n'), ((9468, 9516), 'misc_fn.variable_summaries', 'variable_summaries', (['d_rewards', 'mask', '"""d_rewards"""'], {}), "(d_rewards, mask, 'd_rewards')\n", (9486, 9516), False, 'from misc_fn import variable_summaries\n'), ((9527, 9553), 'tensorflow.name_scope', 'tf.name_scope', (['"""l_rewards"""'], {}), "('l_rewards')\n", (9540, 9553), True, 'import tensorflow as tf\n'), ((9561, 9613), 'misc_fn.variable_summaries', 'variable_summaries', (['l_rewards_mat', 'mask', '"""l_rewards"""'], {}), "(l_rewards_mat, mask, 'l_rewards')\n", (9579, 9613), False, 'from misc_fn import variable_summaries\n'), ((9624, 9650), 'tensorflow.name_scope', 'tf.name_scope', (['"""o_rewards"""'], {}), "('o_rewards')\n", (9637, 9650), True, 'import tensorflow as tf\n'), ((9658, 9706), 'misc_fn.variable_summaries', 'variable_summaries', (['o_rewards', 'mask', '"""o_rewards"""'], {}), "(o_rewards, mask, 'o_rewards')\n", (9676, 9706), False, 'from misc_fn import variable_summaries\n'), ((9981, 10006), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['o_rewards'], {}), '(o_rewards)\n', (9995, 10006), True, 'import tensorflow as tf\n'), ((10013, 10059), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean_found_obj"""', 'o_rewards'], {}), "('mean_found_obj', o_rewards)\n", (10030, 10059), True, 'import tensorflow as tf\n'), ((10995, 11009), 'tensorflow.shape', 'tf.shape', (['feat'], {}), '(feat)\n', (11003, 11009), True, 'import tensorflow as tf\n'), ((11427, 11452), 'tensorflow.matmul', 'tf.matmul', (['out', 'softmax_w'], {}), '(out, softmax_w)\n', (11436, 11452), True, 'import tensorflow as tf\n'), ((11564, 11582), 'tensorflow.shape', 'tf.shape', (['sentence'], {}), '(sentence)\n', (11572, 11582), True, 'import tensorflow as tf\n'), ((12041, 12064), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (12062, 12064), True, 'import tensorflow as tf\n'), ((12094, 12131), 'nets.inception_v4.inception_v4_arg_scope', 'inception_v4.inception_v4_arg_scope', ([], {}), '()\n', (12129, 12131), False, 'from nets import inception_v4\n'), ((12587, 12635), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['variable.op.name', 'variable'], {}), '(variable.op.name, variable)\n', (12607, 12635), True, 'import tensorflow as tf\n'), ((13935, 13990), 'tensorflow.contrib.gan.eval.add_regularization_loss_summaries', 'tfgan.eval.add_regularization_loss_summaries', (['gan_model'], {}), '(gan_model)\n', (13979, 13990), True, 'import tensorflow.contrib.gan as tfgan\n'), ((14578, 14622), 'tensorflow.contrib.estimator.TowerOptimizer', 'tf.contrib.estimator.TowerOptimizer', (['gen_opt'], {}), '(gen_opt)\n', (14613, 14622), True, 'import tensorflow as tf\n'), ((14639, 14683), 'tensorflow.contrib.estimator.TowerOptimizer', 'tf.contrib.estimator.TowerOptimizer', (['dis_opt'], {}), '(dis_opt)\n', (14674, 14683), True, 'import tensorflow as tf\n'), ((15098, 15126), 'tensorflow.contrib.gan.python.train.get_sequential_train_hooks', 'get_sequential_train_hooks', ([], {}), '()\n', (15124, 15126), False, 'from tensorflow.contrib.gan.python.train import get_sequential_train_hooks\n'), ((15347, 15372), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['dic'], {}), '(dic)\n', (15367, 15372), True, 'import tensorflow as tf\n'), ((15613, 15691), 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', (["{'fake': sentence, 'real': real}"], {'every_n_iter': '(100)'}), "({'fake': sentence, 'real': real}, every_n_iter=100)\n", (15639, 15691), True, 'import tensorflow as tf\n'), ((17077, 17109), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (17090, 17109), True, 'import tensorflow as tf\n'), ((3405, 3437), 'tensorflow.zeros', 'tf.zeros', (['[batch_size]', 'tf.int32'], {}), '([batch_size], tf.int32)\n', (3413, 3437), True, 'import tensorflow as tf\n'), ((4168, 4200), 'tensorflow.zeros', 'tf.zeros', (['[batch_size]', 'tf.int32'], {}), '([batch_size], tf.int32)\n', (4176, 4200), True, 'import tensorflow as tf\n'), ((7089, 7124), 'tensorflow.squared_difference', 'tf.squared_difference', (['pca', 'feat_bl'], {}), '(pca, feat_bl)\n', (7110, 7124), True, 'import tensorflow as tf\n'), ((7874, 7896), 'numpy.power', 'np.power', (['gamma', '(s - t)'], {}), '(gamma, s - t)\n', (7882, 7896), True, 'import numpy as np\n'), ((8049, 8071), 'numpy.power', 'np.power', (['gamma', '(s - t)'], {}), '(gamma, s - t)\n', (8057, 8071), True, 'import numpy as np\n'), ((8589, 8620), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['cum_advantage'], {}), '(cum_advantage)\n', (8605, 8620), True, 'import tensorflow as tf\n'), ((9751, 9775), 'tensorflow.zeros_like', 'tf.zeros_like', (['o_rewards'], {}), '(o_rewards)\n', (9764, 9775), True, 'import tensorflow as tf\n'), ((9804, 9851), 'tensorflow.reduce_min', 'tf.reduce_min', (['o_rewards'], {'axis': '(1)', 'keepdims': '(True)'}), '(o_rewards, axis=1, keepdims=True)\n', (9817, 9851), True, 'import tensorflow as tf\n'), ((10634, 10676), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.08)', '(0.08)'], {}), '(-0.08, 0.08)\n', (10663, 10676), True, 'import tensorflow as tf\n'), ((11248, 11271), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (11269, 11271), True, 'import tensorflow as tf\n'), ((9911, 9952), 'tensorflow.logical_and', 'tf.logical_and', (['(o_rewards > minimum)', 'mask'], {}), '(o_rewards > minimum, mask)\n', (9925, 9952), True, 'import tensorflow as tf\n'), ((5669, 5685), 'tensorflow.shape', 'tf.shape', (['length'], {}), '(length)\n', (5677, 5685), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# @Time : 2019-11-28 13:57
# @Author : yingyuankai
# @Email : <EMAIL>
# @File : tf_model_adapters.py
import re
import numpy as np
from collections import OrderedDict
import tensorflow as tf
__all__ = [
"tf_huggingface_bert_adapter",
"tf_huggingface_ernie_adapter",
"tf_huggingface_xlnet_adapter",
"tf_huggingface_albert_chinese_adapter",
"tf_huggingface_albert_chinese_google_adapter",
"tf_huggingface_electra_adapter",
"tf_huggingface_gpt2_adapter"
]
def tf_huggingface_bert_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface bert names to bert_wwm variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(bert/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for bert/encoder
encoder_matched = re.match("^bert/encoder/layer_._\\d+.*$", matched_name)
if encoder_matched is not None:
matched_name = matched_name.replace("_._", "_")
# for bert/embeddings
if matched_name == "bert/embeddings/weight":
matched_name = "bert/embeddings/word_embeddings"
elif matched_name == "bert/embeddings/position_embeddings/embeddings":
matched_name = "bert/embeddings/position_embeddings"
elif matched_name == "bert/embeddings/token_type_embeddings/embeddings":
matched_name = "bert/embeddings/token_type_embeddings"
elif matched_name == "bert/embeddings/task_type_embeddings/embeddings":
matched_name = "bert/embeddings/task_type_embeddings"
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_ernie_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface bert names to bert_wwm variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(ernie/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for bert/encoder
encoder_matched = re.match("^ernie/encoder/layer_._\\d+.*$", matched_name)
if encoder_matched is not None:
matched_name = matched_name.replace("_._", "_").replace("ernie", "bert")
# for bert/embeddings
if matched_name == "ernie/embeddings/weight":
matched_name = "bert/embeddings/word_embeddings"
elif matched_name == "ernie/embeddings/position_embeddings/embeddings":
matched_name = "bert/embeddings/position_embeddings"
elif matched_name == "ernie/embeddings/token_type_embeddings/embeddings":
matched_name = "bert/embeddings/token_type_embeddings"
elif matched_name == "ernie/embeddings/task_type_embeddings/embeddings":
matched_name = "bert/embeddings/task_type_embeddings"
matched_name = matched_name.replace("ernie", "bert")
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_xlnet_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface xlnet names to xlnet_chinese variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
r_r_bias_values = tf.train.load_variable(init_checkpoint, "model/transformer/r_r_bias")
r_s_bias_values = tf.train.load_variable(init_checkpoint, "model/transformer/r_s_bias")
r_w_bias_values = tf.train.load_variable(init_checkpoint, "model/transformer/r_w_bias")
seg_embed_values = tf.train.load_variable(init_checkpoint, "model/transformer/seg_embed")
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(xl_net/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for bert/encoder
encoder_matched = re.match("^xl_net/layer_._\\d+.*$", matched_name)
if encoder_matched is not None:
matched_name = matched_name.replace("_._", "_").\
replace("xl_net", "model/transformer").\
replace("layer_norm", "LayerNorm")
i = int(re.match("^.*/layer_(\\d+).*$", matched_name).group(1))
# for r_r_bias
r_r_bias_matched = re.match("^.*/r_r_bias$", matched_name)
if r_r_bias_matched is not None:
value = np.squeeze(r_r_bias_values[i])
name_to_values.append((item, value))
continue
# for r_s_bias
r_s_bias_matched = re.match("^.*/r_s_bias$", matched_name)
if r_s_bias_matched is not None:
value = np.squeeze(r_s_bias_values[i])
name_to_values.append((item, value))
continue
# for r_w_bias
r_w_bias_matched = re.match("^.*/r_w_bias$", matched_name)
if r_w_bias_matched is not None:
value = np.squeeze(r_w_bias_values[i])
name_to_values.append((item, value))
continue
# for seq_embed
seg_embed_matched = re.match("^.*/seg_embed$", matched_name)
if seg_embed_matched is not None:
value = np.squeeze(seg_embed_values[i])
name_to_values.append((item, value))
continue
# for ending with kqvor
kqvor_matched = re.match("^.*/[kqvor]$", matched_name)
if kqvor_matched is not None:
matched_name += "/kernel"
# for bert/embeddings
if matched_name == 'xl_net/word_embedding/weight':
matched_name = "model/transformer/word_embedding/lookup_table"
if matched_name.endswith("mask_emb"):
matched_name = "model/transformer/mask_emb/mask_emb"
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_albert_chinese_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface albert names to albert_chinese variables,
and then set values for current model.
brightmart version
ref: https://github.com/brightmart/albert_zh
:param hf_model_variables:
:return:
"""
name_to_values = list()
default_prefix = "bert/encoder/layer_shared/"
default_var_name = "albert_brightmart"
for item in hf_model_variables:
var_name = item.name
matched_name = re.match(f"^.*/({default_var_name}/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for pooler
if matched_name == f"{default_var_name}/pooler/bias":
matched_name = "bert/pooler/dense/bias"
elif matched_name == f"{default_var_name}/pooler/kernel":
matched_name = "bert/pooler/dense/kernel"
# for embeddings
elif matched_name == f"{default_var_name}/embeddings/word_embeddings/weight":
matched_name = "bert/embeddings/word_embeddings"
elif matched_name == f"{default_var_name}/embeddings/position_embeddings/embeddings":
matched_name = "bert/embeddings/position_embeddings"
elif matched_name == f"{default_var_name}/embeddings/token_type_embeddings/embeddings":
matched_name = "bert/embeddings/token_type_embeddings"
elif matched_name == f"{default_var_name}/embeddings/LayerNorm/gamma":
matched_name = "bert/embeddings/LayerNorm/gamma"
elif matched_name == f"{default_var_name}/embeddings/LayerNorm/beta":
matched_name = "bert/embeddings/LayerNorm/beta"
# for encoder
elif matched_name == f"{default_var_name}/embeddings/embedding_hidden_mapping_in":
matched_name = "bert/embeddings/word_embeddings_2"
# for transformer layers
elif matched_name.endswith("ffn/kernel"):
matched_name = f"{default_prefix}intermediate/dense/kernel"
elif matched_name.endswith("ffn/bias"):
matched_name = f"{default_prefix}intermediate/dense/bias"
elif matched_name.endswith("ffn_output/kernel"):
matched_name = f"{default_prefix}output/dense/kernel"
elif matched_name.endswith("ffn_output/bias"):
matched_name = f"{default_prefix}output/dense/bias"
elif matched_name.endswith("full_layer_layer_norm/gamma"):
matched_name = f"{default_prefix}output/LayerNorm/gamma"
elif matched_name.endswith("full_layer_layer_norm/beta"):
matched_name = f"{default_prefix}output/LayerNorm/beta"
elif matched_name.endswith("attention/LayerNorm/gamma"):
matched_name = f"{default_prefix}attention/output/LayerNorm/gamma"
elif matched_name.endswith("attention/LayerNorm/beta"):
matched_name = f"{default_prefix}attention/output/LayerNorm/beta"
elif matched_name.find("attention/dense") != -1:
matched_name = re.match("^.*attention/(.*)$", matched_name).group(1)
matched_name = f"{default_prefix}attention/output/{matched_name}"
elif matched_name.find("attention") != -1:
matched_name = re.match("^.*attention/(.*)$", matched_name).group(1)
matched_name = f"{default_prefix}attention/self/{matched_name}"
# else:
# continue
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_albert_chinese_google_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface albert names to albert_chinese_google variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
default_prefix = "bert/encoder/transformer/group_0/inner_group_0/"
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(albert/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for pooler
if matched_name == "albert/pooler/bias":
matched_name = "bert/pooler/dense/bias"
elif matched_name == "albert/pooler/kernel":
matched_name = "bert/pooler/dense/kernel"
# for embeddings
elif matched_name == "albert/embeddings/word_embeddings/weight":
matched_name = "bert/embeddings/word_embeddings"
elif matched_name == "albert/embeddings/position_embeddings/embeddings":
matched_name = "bert/embeddings/position_embeddings"
elif matched_name == "albert/embeddings/token_type_embeddings/embeddings":
matched_name = "bert/embeddings/token_type_embeddings"
elif matched_name == "albert/embeddings/LayerNorm/gamma":
matched_name = "bert/embeddings/LayerNorm/gamma"
elif matched_name == "albert/embeddings/LayerNorm/beta":
matched_name = "bert/embeddings/LayerNorm/beta"
# for encoder
elif matched_name == "albert/encoder/embedding_hidden_mapping_in/kernel":
matched_name = "bert/encoder/embedding_hidden_mapping_in/kernel"
elif matched_name == "albert/encoder/embedding_hidden_mapping_in/bias":
matched_name = "bert/encoder/embedding_hidden_mapping_in/bias"
# for transformer layers
elif matched_name.endswith("ffn/kernel"):
matched_name = f"{default_prefix}ffn_1/intermediate/dense/kernel"
elif matched_name.endswith("ffn/bias"):
matched_name = f"{default_prefix}ffn_1/intermediate/dense/bias"
elif matched_name.endswith("ffn_output/kernel"):
matched_name = f"{default_prefix}ffn_1/intermediate/output/dense/kernel"
elif matched_name.endswith("ffn_output/bias"):
matched_name = f"{default_prefix}ffn_1/intermediate/output/dense/bias"
elif matched_name.endswith("full_layer_layer_norm/gamma"):
matched_name = f"{default_prefix}LayerNorm_1/gamma"
elif matched_name.endswith("full_layer_layer_norm/beta"):
matched_name = f"{default_prefix}LayerNorm_1/beta"
elif matched_name.endswith("attention/LayerNorm/gamma"):
matched_name = f"{default_prefix}LayerNorm/gamma"
elif matched_name.endswith("attention/LayerNorm/beta"):
matched_name = f"{default_prefix}LayerNorm/beta"
elif matched_name.find("attention/dense") != -1:
matched_name = re.match("^.*attention/(.*)$", matched_name).group(1)
matched_name = f"{default_prefix}attention_1/output/{matched_name}"
elif matched_name.find("attention") != -1:
matched_name = re.match("^.*attention/(.*)$", matched_name).group(1)
matched_name = f"{default_prefix}attention_1/self/{matched_name}"
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_electra_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface electra names to electra variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
name_to_values = list()
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(electra/.*):\\d+$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
# for bert/encoder
encoder_matched = re.match("^electra/encoder/layer_._\\d+.*$", matched_name)
if encoder_matched is not None:
matched_name = matched_name.replace("_._", "_")
# for bert/embeddings
if matched_name == "electra/embeddings/weight":
matched_name = "electra/embeddings/word_embeddings"
elif matched_name == "electra/embeddings/position_embeddings/embeddings":
matched_name = "electra/embeddings/position_embeddings"
elif matched_name == "electra/embeddings/token_type_embeddings/embeddings":
matched_name = "electra/embeddings/token_type_embeddings"
elif matched_name == "electra/embeddings/task_type_embeddings/embeddings":
matched_name = "electra/embeddings/task_type_embeddings"
value = tf.train.load_variable(init_checkpoint, matched_name)
name_to_values.append((item, value))
tf.keras.backend.batch_set_value(name_to_values)
def tf_huggingface_gpt2_adapter(hf_model_variables: list, init_checkpoint: str):
"""Build name to variable map from huggingface gpt2 names to gpt2 variables,
and then set values for current model.
:param hf_model_variables:
:return:
"""
model_gold = tf.keras.models.load_model(init_checkpoint)
vars_gold = model_gold.trainable_variables
vars_gold_refinded = {}
name_to_values = list()
for var in vars_gold:
name, value = var.name, var.numpy()
name = name.replace("kernel", "weight")
name_pieces = name.split("/")
prefix = "/".join(name_pieces[:3] + [name_pieces[-1]])
if name.endswith("bias:0"):
value = np.reshape(value, [1, value.shape[0]])
# need merge
if name.find("query_layer") != -1 or name.find("key_layer") != -1 or name.find("value_layer") != -1:
if prefix not in vars_gold_refinded:
vars_gold_refinded[prefix] = value
else:
vars_gold_refinded[prefix] = np.concatenate((vars_gold_refinded[prefix], value), axis=1)
else:
vars_gold_refinded[name] = value
for item in hf_model_variables:
var_name = item.name
matched_name = re.match("^.*/(gpt2/.*)$", var_name)
if matched_name is None:
continue
matched_name = matched_name.group(1)
matched_name = matched_name.replace("gpt2", "gpt")
name_pieces = matched_name.split("/")
if name_pieces[1] == "wte":
matched_name = "gpt/embedding/embeddings:0"
elif name_pieces[1] == "wpe":
matched_name = "position_embeddings:0"
elif name_pieces[1] == "ln_f":
matched_name = matched_name.replace(name_pieces[1], "LayerNorm_final_norm")
elif name_pieces[1].startswith("h_._"):
layer_name = name_pieces[1]
layer_idx = int(layer_name.split("_._")[-1])
new_layer_name = f"layer{layer_idx:02}"
matched_name = matched_name.replace(layer_name, new_layer_name)
if len(name_pieces) >= 4:
if name_pieces[2] == "attn":
if name_pieces[3] == "c_attn":
matched_name = matched_name.replace("/".join(name_pieces[2: 4]), "attention")
elif name_pieces[3] == "c_proj":
matched_name = matched_name.replace("/".join(name_pieces[2: 4]), "attention/context_projection_layer")
elif name_pieces[2] == "ln_1":
matched_name = matched_name.replace(name_pieces[2], "LayerNorm_mlp_ln0")
elif name_pieces[2] == "ln_2":
matched_name = matched_name.replace(name_pieces[2], "LayerNorm_mlp_ln1")
elif name_pieces[2] == "mlp":
if name_pieces[3] == "c_fc":
matched_name = matched_name.replace("/".join(name_pieces[2: 4]), "intermediate")
elif name_pieces[3] == "c_proj":
matched_name = matched_name.replace("/".join(name_pieces[2: 4]), "output")
else:
continue
value = vars_gold_refinded.get(matched_name)
if value is None:
continue
assert value.shape == item.shape
tf.keras.backend.set_value(item, value)
# name_to_values.append((item, value))
# tf.keras.backend.batch_set_value(name_to_values) | [
"tensorflow.train.load_variable",
"numpy.reshape",
"re.match",
"numpy.squeeze",
"tensorflow.keras.backend.set_value",
"tensorflow.keras.models.load_model",
"numpy.concatenate",
"tensorflow.keras.backend.batch_set_value"
] | [((1949, 1997), 'tensorflow.keras.backend.batch_set_value', 'tf.keras.backend.batch_set_value', (['name_to_values'], {}), '(name_to_values)\n', (1981, 1997), True, 'import tensorflow as tf\n'), ((3524, 3572), 'tensorflow.keras.backend.batch_set_value', 'tf.keras.backend.batch_set_value', (['name_to_values'], {}), '(name_to_values)\n', (3556, 3572), True, 'import tensorflow as tf\n'), ((3895, 3964), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', '"""model/transformer/r_r_bias"""'], {}), "(init_checkpoint, 'model/transformer/r_r_bias')\n", (3917, 3964), True, 'import tensorflow as tf\n'), ((3987, 4056), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', '"""model/transformer/r_s_bias"""'], {}), "(init_checkpoint, 'model/transformer/r_s_bias')\n", (4009, 4056), True, 'import tensorflow as tf\n'), ((4079, 4148), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', '"""model/transformer/r_w_bias"""'], {}), "(init_checkpoint, 'model/transformer/r_w_bias')\n", (4101, 4148), True, 'import tensorflow as tf\n'), ((4172, 4242), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', '"""model/transformer/seg_embed"""'], {}), "(init_checkpoint, 'model/transformer/seg_embed')\n", (4194, 4242), True, 'import tensorflow as tf\n'), ((6541, 6589), 'tensorflow.keras.backend.batch_set_value', 'tf.keras.backend.batch_set_value', (['name_to_values'], {}), '(name_to_values)\n', (6573, 6589), True, 'import tensorflow as tf\n'), ((10174, 10222), 'tensorflow.keras.backend.batch_set_value', 'tf.keras.backend.batch_set_value', (['name_to_values'], {}), '(name_to_values)\n', (10206, 10222), True, 'import tensorflow as tf\n'), ((13750, 13798), 'tensorflow.keras.backend.batch_set_value', 'tf.keras.backend.batch_set_value', (['name_to_values'], {}), '(name_to_values)\n', (13782, 13798), True, 'import tensorflow as tf\n'), ((15267, 15315), 'tensorflow.keras.backend.batch_set_value', 'tf.keras.backend.batch_set_value', (['name_to_values'], {}), '(name_to_values)\n', (15299, 15315), True, 'import tensorflow as tf\n'), ((15593, 15636), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['init_checkpoint'], {}), '(init_checkpoint)\n', (15619, 15636), True, 'import tensorflow as tf\n'), ((897, 938), 're.match', 're.match', (['"""^.*/(bert/.*):\\\\d+$"""', 'var_name'], {}), "('^.*/(bert/.*):\\\\d+$', var_name)\n", (905, 938), False, 'import re\n'), ((1091, 1146), 're.match', 're.match', (['"""^bert/encoder/layer_._\\\\d+.*$"""', 'matched_name'], {}), "('^bert/encoder/layer_._\\\\d+.*$', matched_name)\n", (1099, 1146), False, 'import re\n'), ((1845, 1898), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', 'matched_name'], {}), '(init_checkpoint, matched_name)\n', (1867, 1898), True, 'import tensorflow as tf\n'), ((2380, 2422), 're.match', 're.match', (['"""^.*/(ernie/.*):\\\\d+$"""', 'var_name'], {}), "('^.*/(ernie/.*):\\\\d+$', var_name)\n", (2388, 2422), False, 'import re\n'), ((2575, 2631), 're.match', 're.match', (['"""^ernie/encoder/layer_._\\\\d+.*$"""', 'matched_name'], {}), "('^ernie/encoder/layer_._\\\\d+.*$', matched_name)\n", (2583, 2631), False, 'import re\n'), ((3420, 3473), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', 'matched_name'], {}), '(init_checkpoint, matched_name)\n', (3442, 3473), True, 'import tensorflow as tf\n'), ((4332, 4375), 're.match', 're.match', (['"""^.*/(xl_net/.*):\\\\d+$"""', 'var_name'], {}), "('^.*/(xl_net/.*):\\\\d+$', var_name)\n", (4340, 4375), False, 'import re\n'), ((4528, 4577), 're.match', 're.match', (['"""^xl_net/layer_._\\\\d+.*$"""', 'matched_name'], {}), "('^xl_net/layer_._\\\\d+.*$', matched_name)\n", (4536, 4577), False, 'import re\n'), ((6030, 6068), 're.match', 're.match', (['"""^.*/[kqvor]$"""', 'matched_name'], {}), "('^.*/[kqvor]$', matched_name)\n", (6038, 6068), False, 'import re\n'), ((6437, 6490), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', 'matched_name'], {}), '(init_checkpoint, matched_name)\n', (6459, 6490), True, 'import tensorflow as tf\n'), ((7153, 7209), 're.match', 're.match', (['f"""^.*/({default_var_name}/.*):\\\\d+$"""', 'var_name'], {}), "(f'^.*/({default_var_name}/.*):\\\\d+$', var_name)\n", (7161, 7209), False, 'import re\n'), ((10070, 10123), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', 'matched_name'], {}), '(init_checkpoint, matched_name)\n', (10092, 10123), True, 'import tensorflow as tf\n'), ((10706, 10749), 're.match', 're.match', (['"""^.*/(albert/.*):\\\\d+$"""', 'var_name'], {}), "('^.*/(albert/.*):\\\\d+$', var_name)\n", (10714, 10749), False, 'import re\n'), ((13646, 13699), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', 'matched_name'], {}), '(init_checkpoint, matched_name)\n', (13668, 13699), True, 'import tensorflow as tf\n'), ((14185, 14229), 're.match', 're.match', (['"""^.*/(electra/.*):\\\\d+$"""', 'var_name'], {}), "('^.*/(electra/.*):\\\\d+$', var_name)\n", (14193, 14229), False, 'import re\n'), ((14382, 14440), 're.match', 're.match', (['"""^electra/encoder/layer_._\\\\d+.*$"""', 'matched_name'], {}), "('^electra/encoder/layer_._\\\\d+.*$', matched_name)\n", (14390, 14440), False, 'import re\n'), ((15163, 15216), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['init_checkpoint', 'matched_name'], {}), '(init_checkpoint, matched_name)\n', (15185, 15216), True, 'import tensorflow as tf\n'), ((16557, 16593), 're.match', 're.match', (['"""^.*/(gpt2/.*)$"""', 'var_name'], {}), "('^.*/(gpt2/.*)$', var_name)\n", (16565, 16593), False, 'import re\n'), ((18612, 18651), 'tensorflow.keras.backend.set_value', 'tf.keras.backend.set_value', (['item', 'value'], {}), '(item, value)\n', (18638, 18651), True, 'import tensorflow as tf\n'), ((4922, 4961), 're.match', 're.match', (['"""^.*/r_r_bias$"""', 'matched_name'], {}), "('^.*/r_r_bias$', matched_name)\n", (4930, 4961), False, 'import re\n'), ((5198, 5237), 're.match', 're.match', (['"""^.*/r_s_bias$"""', 'matched_name'], {}), "('^.*/r_s_bias$', matched_name)\n", (5206, 5237), False, 'import re\n'), ((5474, 5513), 're.match', 're.match', (['"""^.*/r_w_bias$"""', 'matched_name'], {}), "('^.*/r_w_bias$', matched_name)\n", (5482, 5513), False, 'import re\n'), ((5752, 5792), 're.match', 're.match', (['"""^.*/seg_embed$"""', 'matched_name'], {}), "('^.*/seg_embed$', matched_name)\n", (5760, 5792), False, 'import re\n'), ((16017, 16055), 'numpy.reshape', 'np.reshape', (['value', '[1, value.shape[0]]'], {}), '(value, [1, value.shape[0]])\n', (16027, 16055), True, 'import numpy as np\n'), ((5031, 5061), 'numpy.squeeze', 'np.squeeze', (['r_r_bias_values[i]'], {}), '(r_r_bias_values[i])\n', (5041, 5061), True, 'import numpy as np\n'), ((5307, 5337), 'numpy.squeeze', 'np.squeeze', (['r_s_bias_values[i]'], {}), '(r_s_bias_values[i])\n', (5317, 5337), True, 'import numpy as np\n'), ((5583, 5613), 'numpy.squeeze', 'np.squeeze', (['r_w_bias_values[i]'], {}), '(r_w_bias_values[i])\n', (5593, 5613), True, 'import numpy as np\n'), ((5863, 5894), 'numpy.squeeze', 'np.squeeze', (['seg_embed_values[i]'], {}), '(seg_embed_values[i])\n', (5873, 5894), True, 'import numpy as np\n'), ((16349, 16408), 'numpy.concatenate', 'np.concatenate', (['(vars_gold_refinded[prefix], value)'], {'axis': '(1)'}), '((vars_gold_refinded[prefix], value), axis=1)\n', (16363, 16408), True, 'import numpy as np\n'), ((4808, 4853), 're.match', 're.match', (['"""^.*/layer_(\\\\d+).*$"""', 'matched_name'], {}), "('^.*/layer_(\\\\d+).*$', matched_name)\n", (4816, 4853), False, 'import re\n'), ((9674, 9718), 're.match', 're.match', (['"""^.*attention/(.*)$"""', 'matched_name'], {}), "('^.*attention/(.*)$', matched_name)\n", (9682, 9718), False, 'import re\n'), ((9884, 9928), 're.match', 're.match', (['"""^.*attention/(.*)$"""', 'matched_name'], {}), "('^.*attention/(.*)$', matched_name)\n", (9892, 9928), False, 'import re\n'), ((13285, 13329), 're.match', 're.match', (['"""^.*attention/(.*)$"""', 'matched_name'], {}), "('^.*attention/(.*)$', matched_name)\n", (13293, 13329), False, 'import re\n'), ((13497, 13541), 're.match', 're.match', (['"""^.*attention/(.*)$"""', 'matched_name'], {}), "('^.*attention/(.*)$', matched_name)\n", (13505, 13541), False, 'import re\n')] |
#Import necessary libraries
# return a secure version of the user input file name
from werkzeug.utils import secure_filename
# Flask - Flask is an API of Python that allows us to build up web-applications.
# flash - used to generate informative messages in the flask
# request - used to gain access
# redirect - used to returns a response object and redirects the user to another target location
# url_for - used for creating a URL to prevent the overhead of having to change URLs throughout an application
# render_template - used to generate output from a template file based on the Jinja2 engine
# Response - container for the response data returned by application route functions, plus some additional information needed to create an HTTP response
from flask import Flask, flash, request, redirect, url_for, render_template, Response
# open cv for image processing
import cv2
# used to manipulate different parts
import sys
# used for manipulating array/matrix
import numpy as np
# used for accessing the file and folder in the machine
import os
# used for landmark's facial detector with pre-trained models, the dlib is used to estimate the location of 68 coordinates
import dlib
# VideoStream - used for video stream using webcam
from imutils.video import VideoStream
# imutils - used to make basic image processing functions such as translation, rotation, resizing, skeletonization, and displaying Matplotlib images
import imutils
import tensorflow as tf
# used to load the json model file
from tensorflow.keras.models import model_from_json
# used to load the model
from tensorflow.keras.models import load_model
# act as a primary database where the user uploaded data is store and accessed for future works
UPLOAD_FOLDER = './static/uploaded_image'
# allowing the user to upload only certain file types
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# Initializing the Flask app
app = Flask(__name__)
# setting up the upload folder to the app
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# for making the user input to a secret
app.secret_key = "secret-key"
# Rejecting files greater than a specific amount
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# use to retrieve the faces information
detector = dlib.get_frontal_face_detector()
# print(detector)
# opening, reading and closing the json file
# json_file = open('model.json', 'r')
# loaded_model_json = json_file.read()
# json_file.close()
# loading the json model using model_from_json
# model = model_from_json(loaded_model_json)
# loading the model weight
# model.load_weights('model.h5')
# loading the model from the folder faceNet
model = load_model('./faceNet')
# function which is used to encode the image if the image is at a specific location/imagepath
def img_path_to_encoding(image_path,model):
# reading the image
img = cv2.imread(image_path) # value while be of whole number
# print('\nImage data :',img)
# print("\nImage Data :",img.shape) # image size -> (223, 223, 3) -> (n,n,nc)
# print("\nImage :\n",plt.imshow(img))->BGR image with size (223, 223, 3)
return img_to_encoding(img,model)
# function which is used to encode the image if the image is already loaded and read the image data
def img_to_encoding(image, model):
# resizing the image
img = cv2.resize(image, (160, 160))
# print('\nImage resize :',img)
# print("\nImage resize :",img.shape) # image size -> (160, 160, 3) -> (n,n,nc)
# print("\nImage resize :\n",plt.imshow(img))->BGR image with size (160, 160, 3)
# converting the img data in the form of pixel values
img = np.around(np.array(img) / 255.0, decimals=12)
# print('\nImage pixel value :',img)
# print("\nImage pixel shape :",img.shape) # (160, 160, 3) -> (n, n, nc)
# expanding the dimension for making the image to fit for the input
x_train = np.expand_dims(img, axis=0)
# print('\nx_train :',x_train)
# print("\nx_train shape :",x_train.shape) # (1, 160, 160, 3) input shape of model (ni, n, n, nc)
# predicting the embedding of the image by passing the image to the model
embedding = model.predict(x_train)
# print('\nEmbedding :',embedding) # value in range of 0 to 9, + or - -> 0.18973544
# print('\nEmbedding shape :',embedding.shape) # (1, 128) - 128 features
# predicting the embedding of the image by passing the image to the model
# Euclidean distance is the shortest between the 2 points
# ord = Order of the norms
embedding = embedding / np.linalg.norm(embedding, ord=2)
# print('\nEmbedding :',embedding) # value in range or 0.00 to 0.19 -> 0.01639363
# print('\nEmbedding shape :',embedding.shape) # (1, 128)
return embedding
# function used to load the face database which load the user name and encoded details of the user face
def load_database():
# importing the database in the program as a dict datatype
face_database = {}
# listdir - used to get the list of all files and directories in the specified directory.
for folder_name in os.listdir('static/database'):
# print('\nfolder_name :',folder_name) # base folder name(user name)
# path.join - concatenates various path components with exactly one directory separator ('/')
for image_name in os.listdir(os.path.join('static/database',folder_name)): # database/folder_name
# print('\nimage_name :',image_name) # image name with extension
# splitext - used to split the path name into a pair root and extension.
# basename - used to get the base name in specified path
user_name = os.path.splitext(os.path.basename(image_name))[0]
# print('\nUser name : ',user_name) # image name with out extension
# img_path_to_encoding - used to get the face embedding for a image
face_database[user_name] = img_path_to_encoding(os.path.join('static/database',folder_name,image_name), model)
# print(face_database)
return face_database
def recognize_image(imagePath):
# loading the face_database
face_database = {}
face_database = load_database()
# reading the uploaded image from ImagePath
image = cv2.imread(imagePath)
# print(image) -> print the image value in array [n,n,nc]
# plt.imshow(image) -> displaying the image
# converting the RGB Image into Gray scale Image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# print(gray_image) -> print the image value in array [n,n]
# plt.imshow(gray_image) # -> displaying the image
# faces = faceClassifier.detectMultiScale(gray_image)
# print(faces)
# detecting the faces in the image, which is similar to detectMultiScale()
# The 1 in the second argument indicates that we should upsample the image 1 time.
# This will make everything bigger and allow us to detect more faces.
faces = detector(gray_image, 1)
# print(faces) # -> print the image value in array [(x,y)(w,h)]
# adds a counter to an iterable and returns it in a form of enumerate object
for i, d in enumerate(faces):
# top, bottom, left, right = x, y, w, h
# x = left(), y = top()
# w = right() - x, h = bottom() - y
# roi - region of interest
roi_image = image[d.top():d.top() + (d.bottom() - d.top()), d.left():d.left() + (d.right() - d.left())]
# encoding the faces in new image
encoding = img_to_encoding(roi_image, model)
# print("\nEncoding :\n",encoding)
min_dist = 10
for(username, encoded_image_name) in face_database.items():
# calculating the Euclidean distance which is the shortest between the 2 points
dist = np.linalg.norm(encoding - encoded_image_name)
if(dist < min_dist):
min_dist = dist
user_name = username
# print('\nMin dist: ',min_dist,"\nUser_Name :",user_name)
# if min_dist is high then it denoted the person face is not in the database
if min_dist > 0.8:
# drawing the boundary boxes for the real time detecting face
cv2.rectangle(image, (d.left(), d.top()), (d.left() + (d.right() - d.left()), d.top() + (d.bottom() - d.top())), (0, 0, 255), 2)
cv2.putText(image, 'Unknown', (d.left(), d.top() - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
# for less min_dist the person face is in database
else:
# drawing the boundary boxes for the real time detecting face
cv2.rectangle(image, (d.left(), d.top()), (d.left() + (d.right() - d.left()), d.top() + (d.bottom() - d.top())), (0, 255, 0), 2)
cv2.putText(image, user_name[:-3], (d.left(), d.top() - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 2)
# saving the image
cv2.imwrite('static/uploaded_image/recognized_faces.jpg', image)
# function to recgonize the user face in the real time stream
def recognize_video():
# loading the face_database
face_database = {}
face_database = load_database()
# Starting the video stream from webcam using imutils lib
vs = VideoStream(src=0).start()
while True:
# capturing the frames and reading it
frame = vs.read()
# print(frame) -> print the image value in array [n,n,nc]
# plt.imshow(frame) -> displaying the image
frame = imutils.resize(frame, width = 400)
# converting the RGB Image into Gray scale Image
# gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# print(gray_image) -> print the image value in array [n,n]
# plt.imshow(gray_image) # -> displaying the image
# faces = faceClassifier.detectMultiScale(gray_image)
# print(faces)
# detecting the faces in the image, which is similar to detectMultiScale()
# The 1 in the second argument indicates that we should upsample the image 1 time.
# This will make everything bigger and allow us to detect more faces.
faces = detector(frame, 1)
# print(faces) # -> print the image value in array [(x,y)(w,h)]
# adds a counter to an iterable and returns it in a form of enumerate object
for i, d in enumerate(faces):
# top, bottom, left, right = x, y, w, h
# x = left(), y = top()
# w = right() - x, h = bottom() - y
# roi - region of interest
roi_frame = frame[d.top():d.top() + (d.bottom() - d.top()), d.left():d.left() + (d.right() - d.left())]
# encoding the faces in new image
encoding = img_to_encoding(roi_frame, model)
# print("\nEncoding :\n",encoding)
min_dist = 100
for(username, encoded_image_name) in face_database.items():
# calculating the Euclidean distance which is the shortest between the 2 points
dist = np.linalg.norm(encoding - encoded_image_name)
if(dist < min_dist):
min_dist = dist
user_name = username
print('\nMin dist: ',min_dist,"\nUser_Name :",user_name)
# if min_dist is high then it denoted the person face is not in the database
if min_dist > 0.8:
# drawing the boundary boxes for the real time detecting face
cv2.rectangle(frame, (d.left(), d.top()), (d.left() + (d.right() - d.left()), d.top() + (d.bottom() - d.top())), (0, 0, 255), 2)
cv2.putText(frame, 'Unknown', (d.left(), d.top() - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
# for less min_dist the person face is in database
else:
# drawing the boundary boxes for the real time detecting face
cv2.rectangle(frame, (d.left(), d.top()), (d.left() + (d.right() - d.left()), d.top() + (d.bottom() - d.top())), (0, 255, 0), 2)
cv2.putText(frame, user_name[:-3], (d.left(), d.top() - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 2)
# convert the image format into streaming data and assign it to memory cache
res,buffer = cv2.imencode('.jpg',frame)
# converting to the byte data
frame = buffer.tobytes()
# for continuous frame to make a stream video
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
# breaking up the stream
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
# stoping the video stream
vs.stream.release()
# closing all the windows
cv2.destoryAllWindows()
# function for checking the upload image extension
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# function for face detecting and save the Face ROI(embedding) for static images
# takes 2 parameter, imagepath = Uploaded image location, username = user name
def image_data_generator(imagePath,username):
# setting up the path for saving the image
path = 'static/database'
# print(path) output -> path
# folder for the user to store user image
directory = os.path.join(path,username)
# print(directory) output -> path/name
# Creating the folder for user if the user folder not exist
if not os.path.exists(directory):
# exist_ok - Checks for the presence of the folder
# exist_ok = 'False' - Error is raised if the target directory already exists
# exist_ok = 'True' - Error exceptions will be ignored
os.makedirs(directory, exist_ok = 'True')
# print("\nDirectory with the name {} is created successful".format(name))
# reading the uploaded image
image = cv2.imread(imagePath)
# print(image) -> print the image value in array [n,n,nc]
# plt.imshow(image) -> displaying the image
# converting the RGB Image into Gray scale Image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# print(gray_image) -> print the image value in array [n,n]
# plt.imshow(gray_image) # -> displaying the image
# detecting the faces in the image, which is similar to detectMultiScale()
# faces = face_cascade.detectMultiScale(gray_image)
# print(faces)
# The 1 in the second argument indicates that we should upsample the image 1 time. This will make everything bigger and allow us to detect more faces.
faces = detector(gray_image, 1)
#print(faces) # -> print the image value in array [(x,y)(w,h)]
# adds a counter to an iterable and returns it in a form of enumerate object
for i, d in enumerate(faces):
# top, bottom, left, right = x, y, w, h
# x = left(), y = top()
# w = right() - x, h = bottom() - y
# roi - region of interest
roi_image = image[d.top():d.top() + (d.bottom() - d.top()), d.left():d.left() + (d.right() - d.left())]
# saving the roi cropped images
cv2.imwrite(directory + '/' + username +".jpg",roi_image)
# function for face detecting and save the Face ROI(embedding) for real time images
# takes 1 parameter username = user name
def video_data_generator(username):
# setting up the path for saving the image
path = 'static/database'
# print(path) output -> path
# folder for the user to store user image
directory = os.path.join(path, username)
# print(directory) output -> path/name
# Creating the folder for user if the user folder not exist
if not os.path.exists(directory):
# exist_ok - Checks for the presence of the folder
# exist_ok = 'False' - Error is raised if the target directory already exists
# exist_ok = 'True' - Error exceptions will be ignored
os.makedirs(directory, exist_ok = 'True')
# print("\nDirectory with the name {} is created successful".format(name))
# setting up the no. of image to detect and store in the database
number_of_images = 1
max_number_of_images = 50
# Starting the video stream from webcam using imutils lib
vs = VideoStream(src=0).start()
while number_of_images <= max_number_of_images:
# read the frame from the threaded videostream and resize it and have width of 400
frame = vs.read()
frame = imutils.resize(frame, width = 400)
# The 1 in the second argument indicates that we should upsample the image 1 time.
# This will make everything bigger and allow us to detect more faces
faces = detector(frame, 1)
#print(detector(gray_frame, 1)) # -> print the image value in array [(x,y)(w,h)]
# adds a counter to an iterable and returns it in a form of enumerate object
for i, d in enumerate(faces):
# top, bottom, left, right = x, y, w, h
# x = left(), y = top()
# w = right() - x, h = bottom() - y
# roi - region of interest
roi_image = frame[d.top():d.top() + (d.bottom() - d.top()), d.left():d.left() + (d.right() - d.left())]
# saving the cropped image
cv2.imwrite(os.path.join(directory, str(username+str(number_of_images)+'.jpg')), roi_image)
# drawing the boundary boxes for the real time detecting face
cv2.rectangle(frame, (d.left(), d.top()), (d.left() + (d.right() - d.left()), d.top() + (d.bottom() - d.top())), (0, 255, 0), 2)
number_of_images += 1
# convert the image format into streaming data and assign it to memory cache
res,buffer = cv2.imencode('.jpg',frame)
# converting to the byte data
frame = buffer.tobytes()
# for continuous frame to make a stream video
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
# breaking up the stream
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
# stoping the video stream
vs.stream.release()
# closing all the windows
cv2.destoryAllWindows()
# function for displaying the index page
@app.route('/')
def index():
return render_template('index.html')
# function for displaying the registration page
@app.route('/register')
def register():
return render_template('register.html')
# function for displaying the recognition page
@app.route('/recognition')
def recognition():
return render_template('recognition.html')
# function for displaying the image page for uploading the user image into the database
@app.route('/uploadImageData')
def uploadImageData():
return render_template('uploadImageData.html')
# once the upload & display button has been click it invoke the following function
# for storing the uploaded image to the database
@app.route('/uploadImageData', methods=['POST'])
def upload_image_data():
# checking the presence of file
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
# checking whethere the uploaded file is of allowed file type
if file and allowed_file(file.filename):
# storing the file name and user name
filename = secure_filename(file.filename)
username = request.form.get("userName")
# saving the uploaded file
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
flash('Image successfully uploaded and displayed below')
# redirecting to the url with some information
return render_template('uploadImageData.html', filename=filename, username=username)
else:
flash('Allowed image types are png, jpg, jpeg, gif')
return redirect(request.url)
# displaying the uploaded user image which is store
@app.route('/uploadImageData/display/<filename>')
def display_uploaded_image(filename):
return redirect(url_for('static' , filename="uploaded_image/" + filename ))
# displaying the user face which is detected and stored in the database
@app.route('/uploadImageData/detect/<username>/<filename>')
def display_database_image(filename,username):
# setting up the path of the image for which the face as to be detected
imagePath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# calling up the function to generate the data for the uploaded image
image_data_generator(imagePath,username)
return redirect(url_for('static', filename='database/' + username + "/" + username + ".jpg"))
# function for displaying the live page for detecting the user real time face using the webcam
@app.route('/webcamVideoData')
def webcamVideoData():
return render_template('webcamVideoData.html')
# function for getting the user name from the form
@app.route('/webcamVideoData', methods=['POST'])
def webcam_detect():
username = request.form.get("userName")
return render_template('webcamVideoData.html', username=username)
# function for generating the user face data for real time stream
@app.route('/webcamVideoData/<username>')
def webcam_face_detect(username):
return Response(video_data_generator(username), mimetype='multipart/x-mixed-replace; boundary=frame')
# function redirect to the upload page where recognition using static image can be done
@app.route('/upload')
def upload():
return render_template('upload.html')
# once the upload & predict button has been click it invoke the following function
@app.route('/upload', methods=['POST'])
def upload_image():
# checking for the presence of the file
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
# checking the uploaded file type is of allowed file type
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
# saving the file
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
flash('Image successfully uploaded and displayed below')
return render_template('upload.html', filename = filename)
else:
flash('Allowed image types are png, jpg, jpeg, gif')
return redirect(request.url)
# displaying the uploaded user file
@app.route('/upload/display/<filename>')
def display_image(filename):
return redirect(url_for('static', filename= 'uploaded_image/' + filename))
# displaying the predicted image
@app.route('/upload/recognized/<filename>')
def recognized_image_display(filename):
imagePath = os.path.join('static', 'uploaded_image', filename)
recognize_image(imagePath)
return redirect(url_for('static', filename='uploaded_image/recognized_faces.jpg' ))
# redirect to the live page
@app.route('/live')
def live():
return render_template('live.html')
@app.route('/webcam')
def webcam():
return Response(recognize_video(), mimetype='multipart/x-mixed-replace; boundary=frame')
# running the flask app
if __name__ == "__main__":
app.run(debug=True) | [
"flask.render_template",
"flask.Flask",
"numpy.array",
"tensorflow.keras.models.load_model",
"werkzeug.utils.secure_filename",
"numpy.linalg.norm",
"os.path.exists",
"os.listdir",
"cv2.destoryAllWindows",
"flask.flash",
"imutils.video.VideoStream",
"flask.request.form.get",
"dlib.get_frontal... | [((1969, 1984), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1974, 1984), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((2356, 2388), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (2386, 2388), False, 'import dlib\n'), ((2772, 2795), 'tensorflow.keras.models.load_model', 'load_model', (['"""./faceNet"""'], {}), "('./faceNet')\n", (2782, 2795), False, 'from tensorflow.keras.models import load_model\n'), ((2976, 2998), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2986, 2998), False, 'import cv2\n'), ((3449, 3478), 'cv2.resize', 'cv2.resize', (['image', '(160, 160)'], {}), '(image, (160, 160))\n', (3459, 3478), False, 'import cv2\n'), ((4016, 4043), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (4030, 4043), True, 'import numpy as np\n'), ((5214, 5243), 'os.listdir', 'os.listdir', (['"""static/database"""'], {}), "('static/database')\n", (5224, 5243), False, 'import os\n'), ((6285, 6306), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (6295, 6306), False, 'import cv2\n'), ((6481, 6520), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (6493, 6520), False, 'import cv2\n'), ((12089, 12112), 'cv2.destoryAllWindows', 'cv2.destoryAllWindows', ([], {}), '()\n', (12110, 12112), False, 'import cv2\n'), ((12663, 12691), 'os.path.join', 'os.path.join', (['path', 'username'], {}), '(path, username)\n', (12675, 12691), False, 'import os\n'), ((13202, 13223), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (13212, 13223), False, 'import cv2\n'), ((13402, 13441), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (13414, 13441), False, 'import cv2\n'), ((14780, 14808), 'os.path.join', 'os.path.join', (['path', 'username'], {}), '(path, username)\n', (14792, 14808), False, 'import os\n'), ((17194, 17217), 'cv2.destoryAllWindows', 'cv2.destoryAllWindows', ([], {}), '()\n', (17215, 17217), False, 'import cv2\n'), ((17305, 17334), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (17320, 17334), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((17440, 17472), 'flask.render_template', 'render_template', (['"""register.html"""'], {}), "('register.html')\n", (17455, 17472), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((17583, 17618), 'flask.render_template', 'render_template', (['"""recognition.html"""'], {}), "('recognition.html')\n", (17598, 17618), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((17778, 17817), 'flask.render_template', 'render_template', (['"""uploadImageData.html"""'], {}), "('uploadImageData.html')\n", (17793, 17817), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((19421, 19472), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (19433, 19472), False, 'import os\n'), ((19854, 19893), 'flask.render_template', 'render_template', (['"""webcamVideoData.html"""'], {}), "('webcamVideoData.html')\n", (19869, 19893), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((20033, 20061), 'flask.request.form.get', 'request.form.get', (['"""userName"""'], {}), "('userName')\n", (20049, 20061), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((20071, 20129), 'flask.render_template', 'render_template', (['"""webcamVideoData.html"""'], {'username': 'username'}), "('webcamVideoData.html', username=username)\n", (20086, 20129), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((20520, 20550), 'flask.render_template', 'render_template', (['"""upload.html"""'], {}), "('upload.html')\n", (20535, 20550), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((21748, 21798), 'os.path.join', 'os.path.join', (['"""static"""', '"""uploaded_image"""', 'filename'], {}), "('static', 'uploaded_image', filename)\n", (21760, 21798), False, 'import os\n'), ((21993, 22021), 'flask.render_template', 'render_template', (['"""live.html"""'], {}), "('live.html')\n", (22008, 22021), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((4679, 4711), 'numpy.linalg.norm', 'np.linalg.norm', (['embedding'], {'ord': '(2)'}), '(embedding, ord=2)\n', (4693, 4711), True, 'import numpy as np\n'), ((8714, 8778), 'cv2.imwrite', 'cv2.imwrite', (['"""static/uploaded_image/recognized_faces.jpg"""', 'image'], {}), "('static/uploaded_image/recognized_faces.jpg', image)\n", (8725, 8778), False, 'import cv2\n'), ((9252, 9284), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(400)'}), '(frame, width=400)\n', (9266, 9284), False, 'import imutils\n'), ((11696, 11723), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame'], {}), "('.jpg', frame)\n", (11708, 11723), False, 'import cv2\n'), ((12809, 12834), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (12823, 12834), False, 'import os\n'), ((13036, 13075), 'os.makedirs', 'os.makedirs', (['directory'], {'exist_ok': '"""True"""'}), "(directory, exist_ok='True')\n", (13047, 13075), False, 'import os\n'), ((14389, 14448), 'cv2.imwrite', 'cv2.imwrite', (["(directory + '/' + username + '.jpg')", 'roi_image'], {}), "(directory + '/' + username + '.jpg', roi_image)\n", (14400, 14448), False, 'import cv2\n'), ((14924, 14949), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (14938, 14949), False, 'import os\n'), ((15155, 15194), 'os.makedirs', 'os.makedirs', (['directory'], {'exist_ok': '"""True"""'}), "(directory, exist_ok='True')\n", (15166, 15194), False, 'import os\n'), ((15664, 15696), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(400)'}), '(frame, width=400)\n', (15678, 15696), False, 'import imutils\n'), ((16801, 16828), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame'], {}), "('.jpg', frame)\n", (16813, 16828), False, 'import cv2\n'), ((18103, 18124), 'flask.flash', 'flash', (['"""No file part"""'], {}), "('No file part')\n", (18108, 18124), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((18135, 18156), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (18143, 18156), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((18217, 18257), 'flask.flash', 'flash', (['"""No image selected for uploading"""'], {}), "('No image selected for uploading')\n", (18222, 18257), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((18268, 18289), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (18276, 18289), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((18456, 18486), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (18471, 18486), False, 'from werkzeug.utils import secure_filename\n'), ((18501, 18529), 'flask.request.form.get', 'request.form.get', (['"""userName"""'], {}), "('userName')\n", (18517, 18529), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((18631, 18687), 'flask.flash', 'flash', (['"""Image successfully uploaded and displayed below"""'], {}), "('Image successfully uploaded and displayed below')\n", (18636, 18687), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((18750, 18827), 'flask.render_template', 'render_template', (['"""uploadImageData.html"""'], {'filename': 'filename', 'username': 'username'}), "('uploadImageData.html', filename=filename, username=username)\n", (18765, 18827), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((18839, 18891), 'flask.flash', 'flash', (['"""Allowed image types are png, jpg, jpeg, gif"""'], {}), "('Allowed image types are png, jpg, jpeg, gif')\n", (18844, 18891), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((18902, 18923), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (18910, 18923), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((19087, 19143), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': "('uploaded_image/' + filename)"}), "('static', filename='uploaded_image/' + filename)\n", (19094, 19143), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((19610, 19686), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': "('database/' + username + '/' + username + '.jpg')"}), "('static', filename='database/' + username + '/' + username + '.jpg')\n", (19617, 19686), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((20778, 20799), 'flask.flash', 'flash', (['"""No file part"""'], {}), "('No file part')\n", (20783, 20799), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((20810, 20831), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (20818, 20831), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((20892, 20932), 'flask.flash', 'flash', (['"""No image selected for uploading"""'], {}), "('No image selected for uploading')\n", (20897, 20932), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((20943, 20964), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (20951, 20964), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((21084, 21114), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (21099, 21114), False, 'from werkzeug.utils import secure_filename\n'), ((21207, 21263), 'flask.flash', 'flash', (['"""Image successfully uploaded and displayed below"""'], {}), "('Image successfully uploaded and displayed below')\n", (21212, 21263), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((21274, 21323), 'flask.render_template', 'render_template', (['"""upload.html"""'], {'filename': 'filename'}), "('upload.html', filename=filename)\n", (21289, 21323), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((21339, 21391), 'flask.flash', 'flash', (['"""Allowed image types are png, jpg, jpeg, gif"""'], {}), "('Allowed image types are png, jpg, jpeg, gif')\n", (21344, 21391), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((21402, 21423), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (21410, 21423), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((21553, 21609), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': "('uploaded_image/' + filename)"}), "('static', filename='uploaded_image/' + filename)\n", (21560, 21609), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((21846, 21911), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': '"""uploaded_image/recognized_faces.jpg"""'}), "('static', filename='uploaded_image/recognized_faces.jpg')\n", (21853, 21911), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, Response\n'), ((3770, 3783), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3778, 3783), True, 'import numpy as np\n'), ((5450, 5494), 'os.path.join', 'os.path.join', (['"""static/database"""', 'folder_name'], {}), "('static/database', folder_name)\n", (5462, 5494), False, 'import os\n'), ((7713, 7758), 'numpy.linalg.norm', 'np.linalg.norm', (['(encoding - encoded_image_name)'], {}), '(encoding - encoded_image_name)\n', (7727, 7758), True, 'import numpy as np\n'), ((9024, 9042), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (9035, 9042), False, 'from imutils.video import VideoStream\n'), ((15465, 15483), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (15476, 15483), False, 'from imutils.video import VideoStream\n'), ((18575, 18626), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (18587, 18626), False, 'import os\n'), ((21151, 21202), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (21163, 21202), False, 'import os\n'), ((5992, 6048), 'os.path.join', 'os.path.join', (['"""static/database"""', 'folder_name', 'image_name'], {}), "('static/database', folder_name, image_name)\n", (6004, 6048), False, 'import os\n'), ((10610, 10655), 'numpy.linalg.norm', 'np.linalg.norm', (['(encoding - encoded_image_name)'], {}), '(encoding - encoded_image_name)\n', (10624, 10655), True, 'import numpy as np\n'), ((11956, 11970), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (11967, 11970), False, 'import cv2\n'), ((17061, 17075), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (17072, 17075), False, 'import cv2\n'), ((5761, 5789), 'os.path.basename', 'os.path.basename', (['image_name'], {}), '(image_name)\n', (5777, 5789), False, 'import os\n')] |
#!/usr/bin/env python
"""
---------- Import libraries ----------
"""
import os
import sys
sys.path.append(os.path.join(".."))
import argparse
# Import teaching utils
import numpy as np
import utils.classifier_utils as clf_util
from utils.neuralnetwork import NeuralNetwork
# Import sklearn metrics
from sklearn import metrics
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from sklearn import datasets
"""
---------- Main function ----------
"""
def main():
"""
---------- Parameters ----------
"""
# Create an argument parser from argparse
ap = argparse.ArgumentParser()
# add argument about size of training data with 80% as default
ap.add_argument("-trs", "--train_size",
required=False, default = 0.8,
type = float,
help="The size of the train data as percent, the default is 0.8")
# add argument about size of test data with 20 % as default
ap.add_argument("-tes", "--test_size",
required=False,
default = 0.2,
type = float,
help="The size of the test data as percent, the default is 0.2")
# add argument about number of epochs with 20 epochs as default
ap.add_argument("-epo", "--epochs_number",
required=False,
default = 20,
type = int,
help="The number of epochs, the default is 20")
args = vars(ap.parse_args())
trs_size = args["train_size"]
tes_size = args["test_size"]
epochs_number = args["epochs_number"]
"""
---------- Neural network model ----------
"""
print("[nfo] Neural network model...")
# Fetch data. When fetching the data like this, the X and y is already defined as the data and the labels.
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
# Convert to numpy arrays
X = np.array(X)
y = np.array(y)
# MinMax regularization
X = ( X - X.min())/(X.max() - X.min())
("[nfo] Splitting into train and test...")
# Split data. X contains the data and will be split into training and test data. y contains the labels and will split into train and test as well.
X_train, X_test, y_train, y_test = train_test_split(X,
y,
train_size = trs_size,
test_size=tes_size)
# Convert labels from integers to vectors
y_train = LabelBinarizer().fit_transform(y_train)
y_test = LabelBinarizer().fit_transform(y_test)
# Train the network
print("[INFO] training network...")
# The layers are 32 and 16 and the output is 10
nn = NeuralNetwork([X_train.shape[1], 32, 16, 10])
print("[INFO] {}".format(nn))
nn.fit(X_train, y_train, epochs=epochs_number)
# Evaluate network
print(["[INFO] Evaluating network..."])
predictions = nn.predict(X_test)
predictions = predictions.argmax(axis=1)
print(classification_report(y_test.argmax(axis=1), predictions))
#Define behaviour when called from command line
if __name__ == "__main__":
main()
| [
"sklearn.preprocessing.LabelBinarizer",
"sklearn.datasets.fetch_openml",
"utils.neuralnetwork.NeuralNetwork",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.array"
] | [((107, 125), 'os.path.join', 'os.path.join', (['""".."""'], {}), "('..')\n", (119, 125), False, 'import os\n'), ((815, 840), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (838, 840), False, 'import argparse\n'), ((2113, 2166), 'sklearn.datasets.fetch_openml', 'fetch_openml', (['"""mnist_784"""'], {'version': '(1)', 'return_X_y': '(True)'}), "('mnist_784', version=1, return_X_y=True)\n", (2125, 2166), False, 'from sklearn.datasets import fetch_openml\n'), ((2205, 2216), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2213, 2216), True, 'import numpy as np\n'), ((2225, 2236), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2233, 2236), True, 'import numpy as np\n'), ((2551, 2614), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'train_size': 'trs_size', 'test_size': 'tes_size'}), '(X, y, train_size=trs_size, test_size=tes_size)\n', (2567, 2614), False, 'from sklearn.model_selection import train_test_split\n'), ((3069, 3114), 'utils.neuralnetwork.NeuralNetwork', 'NeuralNetwork', (['[X_train.shape[1], 32, 16, 10]'], {}), '([X_train.shape[1], 32, 16, 10])\n', (3082, 3114), False, 'from utils.neuralnetwork import NeuralNetwork\n'), ((2847, 2863), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (2861, 2863), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((2900, 2916), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (2914, 2916), False, 'from sklearn.preprocessing import LabelBinarizer\n')] |
# Author: <NAME>
# License: BSD
import numpy as np
from seglearn import feature_functions
def test_mv_feature_functions():
""" test feature functions with multivariate data """
# sliding window data is shape [n_segments, width, variables]
N = 20
W = 30
mv_data = np.random.rand(N, W, 3)
ftr_funcs = {}
ftr_funcs.update(feature_functions.all_features())
ftr_funcs.update(feature_functions.base_features())
ftr_funcs.update(feature_functions.hudgins_features())
ftr_funcs.update(feature_functions.emg_features())
for f in ftr_funcs:
mvf = ftr_funcs[f](mv_data)
assert len(mvf) == N
def test_uv_feature_functions():
""" test feature functions with univariate data """
N = 20
W = 30
uv_data = np.random.rand(N, W)
ftr_funcs = {}
ftr_funcs.update(feature_functions.all_features())
ftr_funcs.update(feature_functions.base_features())
ftr_funcs.update(feature_functions.hudgins_features())
ftr_funcs.update(feature_functions.emg_features())
for f in ftr_funcs:
uvf = ftr_funcs[f](uv_data)
assert len(uvf) == N
| [
"numpy.random.rand",
"seglearn.feature_functions.base_features",
"seglearn.feature_functions.all_features",
"seglearn.feature_functions.emg_features",
"seglearn.feature_functions.hudgins_features"
] | [((288, 311), 'numpy.random.rand', 'np.random.rand', (['N', 'W', '(3)'], {}), '(N, W, 3)\n', (302, 311), True, 'import numpy as np\n'), ((774, 794), 'numpy.random.rand', 'np.random.rand', (['N', 'W'], {}), '(N, W)\n', (788, 794), True, 'import numpy as np\n'), ((353, 385), 'seglearn.feature_functions.all_features', 'feature_functions.all_features', ([], {}), '()\n', (383, 385), False, 'from seglearn import feature_functions\n'), ((408, 441), 'seglearn.feature_functions.base_features', 'feature_functions.base_features', ([], {}), '()\n', (439, 441), False, 'from seglearn import feature_functions\n'), ((464, 500), 'seglearn.feature_functions.hudgins_features', 'feature_functions.hudgins_features', ([], {}), '()\n', (498, 500), False, 'from seglearn import feature_functions\n'), ((523, 555), 'seglearn.feature_functions.emg_features', 'feature_functions.emg_features', ([], {}), '()\n', (553, 555), False, 'from seglearn import feature_functions\n'), ((836, 868), 'seglearn.feature_functions.all_features', 'feature_functions.all_features', ([], {}), '()\n', (866, 868), False, 'from seglearn import feature_functions\n'), ((891, 924), 'seglearn.feature_functions.base_features', 'feature_functions.base_features', ([], {}), '()\n', (922, 924), False, 'from seglearn import feature_functions\n'), ((947, 983), 'seglearn.feature_functions.hudgins_features', 'feature_functions.hudgins_features', ([], {}), '()\n', (981, 983), False, 'from seglearn import feature_functions\n'), ((1006, 1038), 'seglearn.feature_functions.emg_features', 'feature_functions.emg_features', ([], {}), '()\n', (1036, 1038), False, 'from seglearn import feature_functions\n')] |
import numpy as np
import utilities as util
import astropy.units as u
# -------------------------------------------------------------------------
# Derived Units
# -------------------------------------------------------------------------
cm3 = u.cm**3
dm3 = u.dm**3
m3 = u.m**3
g_cm3 = (u.g / cm3)
molar = u.mol / dm3
# -------------------------------------------------------------------------
# Constants
# -------------------------------------------------------------------------
avogadro_constant = 6.0221415E+23 / u.mol # atoms [mol^-1]
hydrogen_mass_amu = 1.008 # a.m.u., standard atomic weight
hydrogen_mass_cgs = 1.6733E-24 * u.g # [g]
# -------------------------------------------------------------------------
# Conversion functions for physical quantities
# -------------------------------------------------------------------------
def gas_density_to_hydrogen_number_density(gas_density, percentage_hydrogen=1, log=False):
# Converts gas density (assumed [g cm^-3]) to a hydrogen number density
# based on the percentage of hydrogen (default 100% hydrogen)
hydrogen_number_density = (gas_density / hydrogen_mass_cgs) * percentage_hydrogen
if log:
return np.log10(hydrogen_number_density.value)
else:
return hydrogen_number_density
def log_abundance_to_number_density(log_abundance, log_hydrogen_number_density, log_hydrogen_abundance=12):
# Using the log(hydrogen abundance) (default '12' for solar physics),
# convert from stellar photospheric abundance to a number density
log_number_density = log_abundance - log_hydrogen_abundance + log_hydrogen_number_density
number_density = 10**log_number_density
return number_density
def number_density_to_concentration(number_density):
# Assumes number density in [cm^-n] & 'avogadro_constant' in [mol^-1]
# so concentration ~ [mol cm^-n]
concentration = number_density / avogadro_constant
return concentration
def concentration_to_number_density(concentration):
# Assumes concentration ~ [mol cm^-3] & 'avogadro_constant' in [mol^-1]
# so number density in [cm^-3]
number_density = concentration * avogadro_constant
return number_density
# -------------------------------------------------------------------------
# Stoichiometry functions
# -------------------------------------------------------------------------
def calculate_stoichiometry(reactants_list, products_list, return_dicts=False):
reactant_stoichiometry = util.list_instances_to_dict(reactants_list)
product_stoichiometry = util.list_instances_to_dict(products_list)
if return_dicts:
return reactant_stoichiometry, product_stoichiometry
else:
return reactant_stoichiometry.values, product_stoichiometry.values
# -------------------------------------------------------------------------
# Reaction rate and timescale functions
# -------------------------------------------------------------------------
def modified_to_arrhenius_prefactor(alpha, beta, temperature):
# Modified Arrhenius rate prefactor to Arrhenius rate prefactor
return alpha * (temperature / 300)**beta
def modified_arrhenius_rate(alpha=1, beta=0, gamma=0, temperature=300):
# rate = alpha * (T/300[K])^beta * EXP(-gamma / T)
# Equivalent to standard Arrhenius for beta=0
return alpha * (temperature/300)**beta * np.exp(-gamma / temperature)
def arrhenius_rate(prefactor, activation_energy, temperature):
return prefactor * np.exp(-activation_energy / temperature)
def calculate_unitless_timescale(forward_rate, reverse_rate=None, reactant_stoichiometry=[1, 1]):
# Example case: A + B -> C
# Note that this is only valid if rates are dimensionless. Othwerwise,
# the rates need to be related to one another based on the number
# densities of the reactants and products
total_rate = forward_rate.value * np.sum(reactant_stoichiometry)
if reverse_rate != None:
total_rate += reverse_rate.value
timescale = 1 / total_rate
return timescale
# -------------------------------------------------------------------------
# Functions for determining equilibrium quantities
# -------------------------------------------------------------------------
def equilibrium_constant_dimensional(forward_rate_coefficient,
reverse_rate_coefficient):
# dimensional equilibrium constant K_eq = k_f / k_r
equilibrium_constant = forward_rate_coefficient / reverse_rate_coefficient
return equilibrium_constant
# -------------------------------------------------------------------------
# Utility functions for rates and timescales
# -------------------------------------------------------------------------
def scale_timescale_with_number_density(timescale, reactant_number_density):
# Scale timescale based on number of primary reactants; Wedemeyer-Boehm(2005)
scaled_timescale = timescale / reactant_number_density
return scaled_timescale
def convert_rate_to_molar(rate, order):
# Assumes rate is in [cm^-3*n s^-1] where 'n' is order of reaction
# Convert from cm^3 -> dm^3 & use Avogadro number to change from
# volume / atom -> volume / mol
volume_conversion = cm3.to(dm3)
conversion_factor = (volume_conversion * avogadro_constant)**order
molar_rate = rate * conversion_factor # units of [mol^n dm^-3n s^-1]
return molar_rate
# -------------------------------------------------------------------------
# Unit conversion functions
# -------------------------------------------------------------------------
def get_rate_unit_from_order(order, unit_system='cgs'):
# Based on the order of the reaction (n), determine the proper units for
# the reaction rate.
unit = None
if unit_system == 'cgs':
# [cm^3*n s^-1]
unit = (cm3)**order
elif unit_system == 'molar':
# [M^(1-n) s^-1]
unit = (u.M)**(1-order)
unit /= u.s
return unit | [
"numpy.exp",
"utilities.list_instances_to_dict",
"numpy.log10",
"numpy.sum"
] | [((2442, 2485), 'utilities.list_instances_to_dict', 'util.list_instances_to_dict', (['reactants_list'], {}), '(reactants_list)\n', (2469, 2485), True, 'import utilities as util\n'), ((2512, 2554), 'utilities.list_instances_to_dict', 'util.list_instances_to_dict', (['products_list'], {}), '(products_list)\n', (2539, 2554), True, 'import utilities as util\n'), ((1185, 1224), 'numpy.log10', 'np.log10', (['hydrogen_number_density.value'], {}), '(hydrogen_number_density.value)\n', (1193, 1224), True, 'import numpy as np\n'), ((3290, 3318), 'numpy.exp', 'np.exp', (['(-gamma / temperature)'], {}), '(-gamma / temperature)\n', (3296, 3318), True, 'import numpy as np\n'), ((3404, 3444), 'numpy.exp', 'np.exp', (['(-activation_energy / temperature)'], {}), '(-activation_energy / temperature)\n', (3410, 3444), True, 'import numpy as np\n'), ((3796, 3826), 'numpy.sum', 'np.sum', (['reactant_stoichiometry'], {}), '(reactant_stoichiometry)\n', (3802, 3826), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""evaluation"""
import os
from os.path import join
import argparse
import glob
import numpy as np
from scipy.io import wavfile
from hparams import hparams, hparams_debug_string
import audio
from tqdm import tqdm
from mindspore import context, Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import mindspore.dataset.engine as de
from nnmnkwii import preprocessing as P
from nnmnkwii.datasets import FileSourceDataset
from wavenet_vocoder import WaveNet
from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_scalar_input
from src.dataset import RawAudioDataSource, MelSpecDataSource, DualDataset
parser = argparse.ArgumentParser(description='TTS training')
parser.add_argument('--data_path', type=str, required=True, default='',
help='Directory contains preprocessed features.')
parser.add_argument('--preset', type=str, required=True, default='', help='Path of preset parameters (json).')
parser.add_argument('--pretrain_ckpt', type=str, default='', help='Pretrained checkpoint path')
parser.add_argument('--is_numpy', action="store_true", default=False, help='Using numpy for inference or not')
parser.add_argument('--output_path', type=str, default='./out_wave/', help='Path to save generated audios')
parser.add_argument('--speaker_id', type=str, default='',
help=' Use specific speaker of data in case for multi-speaker datasets.')
parser.add_argument('--platform', type=str, default='GPU', choices=('GPU', 'CPU'),
help='run platform, support GPU and CPU. Default: GPU')
args = parser.parse_args()
def get_data_loader(hparam, data_dir):
"""
test data loader
"""
wav_paths = glob.glob(os.path.join(data_dir, "*-wave.npy"))
if wav_paths:
X = FileSourceDataset(RawAudioDataSource(data_dir,
hop_size=audio.get_hop_size(),
max_steps=None, cin_pad=hparam.cin_pad))
else:
X = None
C = FileSourceDataset(MelSpecDataSource(data_dir,
hop_size=audio.get_hop_size(),
max_steps=None, cin_pad=hparam.cin_pad))
length_x = np.array(C.file_data_source.lengths)
if C[0].shape[-1] != hparam.cin_channels:
raise RuntimeError("Invalid cin_channnels {}. Expected to be {}.".format(hparam.cin_channels, C[0].shape[-1]))
dataset = DualDataset(X, C, length_x, batch_size=hparam.batch_size, hparams=hparam)
data_loader = de.GeneratorDataset(dataset, ["x_batch", "y_batch", "c_batch", "g_batch", "input_lengths", "mask"])
return data_loader, dataset
def batch_wavegen(hparam, net, c_input=None, g_input=None, tqdm_=None, is_numpy=True):
"""
generate audio
"""
assert c_input is not None
B = c_input.shape[0]
net.set_train(False)
if hparam.upsample_conditional_features:
length = (c_input.shape[-1] - hparam.cin_pad * 2) * audio.get_hop_size()
else:
# already dupulicated
length = c_input.shape[-1]
y_hat = net.incremental_forward(c=c_input, g=g_input, T=length, tqdm=tqdm_, softmax=True, quantize=True,
log_scale_min=hparam.log_scale_min, is_numpy=is_numpy)
if is_mulaw_quantize(hparam.input_type):
# needs to be float since mulaw_inv returns in range of [-1, 1]
y_hat = np.reshape(np.argmax(y_hat, 1), (B, -1))
y_hat = y_hat.astype(np.float32)
for k in range(B):
y_hat[k] = P.inv_mulaw_quantize(y_hat[k], hparam.quantize_channels - 1)
elif is_mulaw(hparam.input_type):
y_hat = np.reshape(y_hat, (B, -1))
for k in range(B):
y_hat[k] = P.inv_mulaw(y_hat[k], hparam.quantize_channels - 1)
else:
y_hat = np.reshape(y_hat, (B, -1))
if hparam.postprocess is not None and hparam.postprocess not in ["", "none"]:
for k in range(B):
y_hat[k] = getattr(audio, hparam.postprocess)(y_hat[k])
if hparam.global_gain_scale > 0:
for k in range(B):
y_hat[k] /= hparam.global_gain_scale
return y_hat
def to_int16(x_):
"""
convert datatype to int16
"""
if x_.dtype == np.int16:
return x_
assert x_.dtype == np.float32
assert x_.min() >= -1 and x_.max() <= 1.0
return (x_ * 32767).astype(np.int16)
def get_reference_file(hparam, dataset_source, idx):
"""
get reference files
"""
reference_files = []
reference_feats = []
for _ in range(hparam.batch_size):
if hasattr(dataset_source, "X"):
reference_files.append(dataset_source.X.collected_files[idx][0])
else:
pass
if hasattr(dataset_source, "Mel"):
reference_feats.append(dataset_source.Mel.collected_files[idx][0])
else:
reference_feats.append(dataset_source.collected_files[idx][0])
idx += 1
return reference_files, reference_feats, idx
def get_saved_audio_name(has_ref_file_, ref_file, ref_feat, g_fp):
"""get path to save reference audio"""
if has_ref_file_:
target_audio_path = ref_file
name = os.path.splitext(os.path.basename(target_audio_path))[0].replace("-wave", "")
else:
target_feat_path = ref_feat
name = os.path.splitext(os.path.basename(target_feat_path))[0].replace("-feats", "")
# Paths
if g_fp is None:
dst_wav_path_ = join(args.output_path, "{}_gen.wav".format(name))
target_wav_path_ = join(args.output_path, "{}_ref.wav".format(name))
else:
dst_wav_path_ = join(args.output_path, "speaker{}_{}_gen.wav".format(g, name))
target_wav_path_ = join(args.output_path, "speaker{}_{}_ref.wav".format(g, name))
return dst_wav_path_, target_wav_path_
def save_ref_audio(hparam, ref, length, target_wav_path_):
"""
save reference audio
"""
if is_mulaw_quantize(hparam.input_type):
ref = np.reshape(np.argmax(ref, 0), (-1))[:length]
ref = ref.astype(np.float32)
else:
ref = np.reshape(ref, (-1))[:length]
if is_mulaw_quantize(hparam.input_type):
ref = P.inv_mulaw_quantize(ref, hparam.quantize_channels - 1)
elif is_mulaw(hparam.input_type):
ref = P.inv_mulaw(ref, hparam.quantize_channels - 1)
if hparam.postprocess is not None and hparam.postprocess not in ["", "none"]:
ref = getattr(audio, hparam.postprocess)(ref)
if hparam.global_gain_scale > 0:
ref /= hparam.global_gain_scale
ref = np.clip(ref, -1.0, 1.0)
wavfile.write(target_wav_path_, hparam.sample_rate, to_int16(ref))
if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE, device_target=args.platform, save_graphs=False)
speaker_id = int(args.speaker_id) if args.speaker_id != '' else None
if args.preset is not None:
with open(args.preset) as f:
hparams.parse_json(f.read())
assert hparams.name == "wavenet_vocoder"
print(hparams_debug_string())
fs = hparams.sample_rate
hparams.batch_size = 10
hparams.max_time_sec = None
hparams.max_time_steps = None
data_loaders, source_dataset = get_data_loader(hparam=hparams, data_dir=args.data_path)
upsample_params = hparams.upsample_params
upsample_params["cin_channels"] = hparams.cin_channels
upsample_params["cin_pad"] = hparams.cin_pad
model = WaveNet(
out_channels=hparams.out_channels,
layers=hparams.layers,
stacks=hparams.stacks,
residual_channels=hparams.residual_channels,
gate_channels=hparams.gate_channels,
skip_out_channels=hparams.skip_out_channels,
cin_channels=hparams.cin_channels,
gin_channels=hparams.gin_channels,
n_speakers=hparams.n_speakers,
dropout=hparams.dropout,
kernel_size=hparams.kernel_size,
cin_pad=hparams.cin_pad,
upsample_conditional_features=hparams.upsample_conditional_features,
upsample_params=upsample_params,
scalar_input=is_scalar_input(hparams.input_type),
output_distribution=hparams.output_distribution,
)
param_dict = load_checkpoint(args.pretrain_ckpt)
load_param_into_net(model, param_dict)
print('Successfully loading the pre-trained model')
os.makedirs(args.output_path, exist_ok=True)
cin_pad = hparams.cin_pad
file_idx = 0
for data in data_loaders.create_dict_iterator():
x, y, c, g, input_lengths = data['x_batch'], data['y_batch'], data['c_batch'], data['g_batch'], data[
'input_lengths']
if cin_pad > 0:
c = c.asnumpy()
c = np.pad(c, pad_width=(cin_pad, cin_pad), mode="edge")
c = Tensor(c)
ref_files, ref_feats, file_idx = get_reference_file(hparams, source_dataset, file_idx)
# Generate
y_hats = batch_wavegen(hparams, model, data['c_batch'], tqdm_=tqdm, is_numpy=args.is_numpy)
x = x.asnumpy()
input_lengths = input_lengths.asnumpy()
# Save each utt.
has_ref_file = bool(ref_files)
for i, (ref_, gen_, length_) in enumerate(zip(x, y_hats, input_lengths)):
dst_wav_path, target_wav_path = get_saved_audio_name(has_ref_file_=has_ref_file, ref_file=ref_files[i],
ref_feat=ref_feats[i], g_fp=g)
save_ref_audio(hparams, ref_, length_, target_wav_path)
gen = gen_[:length_]
gen = np.clip(gen, -1.0, 1.0)
wavfile.write(dst_wav_path, hparams.sample_rate, to_int16(gen))
| [
"numpy.clip",
"audio.get_hop_size",
"wavenet_vocoder.util.is_mulaw_quantize",
"nnmnkwii.preprocessing.inv_mulaw_quantize",
"numpy.array",
"src.dataset.DualDataset",
"numpy.reshape",
"argparse.ArgumentParser",
"hparams.hparams_debug_string",
"mindspore.Tensor",
"wavenet_vocoder.util.is_scalar_inp... | [((1355, 1406), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""TTS training"""'}), "(description='TTS training')\n", (1378, 1406), False, 'import argparse\n'), ((2988, 3024), 'numpy.array', 'np.array', (['C.file_data_source.lengths'], {}), '(C.file_data_source.lengths)\n', (2996, 3024), True, 'import numpy as np\n'), ((3209, 3282), 'src.dataset.DualDataset', 'DualDataset', (['X', 'C', 'length_x'], {'batch_size': 'hparam.batch_size', 'hparams': 'hparam'}), '(X, C, length_x, batch_size=hparam.batch_size, hparams=hparam)\n', (3220, 3282), False, 'from src.dataset import RawAudioDataSource, MelSpecDataSource, DualDataset\n'), ((3304, 3407), 'mindspore.dataset.engine.GeneratorDataset', 'de.GeneratorDataset', (['dataset', "['x_batch', 'y_batch', 'c_batch', 'g_batch', 'input_lengths', 'mask']"], {}), "(dataset, ['x_batch', 'y_batch', 'c_batch', 'g_batch',\n 'input_lengths', 'mask'])\n", (3323, 3407), True, 'import mindspore.dataset.engine as de\n'), ((4075, 4111), 'wavenet_vocoder.util.is_mulaw_quantize', 'is_mulaw_quantize', (['hparam.input_type'], {}), '(hparam.input_type)\n', (4092, 4111), False, 'from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_scalar_input\n'), ((6786, 6822), 'wavenet_vocoder.util.is_mulaw_quantize', 'is_mulaw_quantize', (['hparam.input_type'], {}), '(hparam.input_type)\n', (6803, 6822), False, 'from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_scalar_input\n'), ((6989, 7025), 'wavenet_vocoder.util.is_mulaw_quantize', 'is_mulaw_quantize', (['hparam.input_type'], {}), '(hparam.input_type)\n', (7006, 7025), False, 'from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_scalar_input\n'), ((7429, 7452), 'numpy.clip', 'np.clip', (['ref', '(-1.0)', '(1.0)'], {}), '(ref, -1.0, 1.0)\n', (7436, 7452), True, 'import numpy as np\n'), ((7564, 7660), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'args.platform', 'save_graphs': '(False)'}), '(mode=context.GRAPH_MODE, device_target=args.platform,\n save_graphs=False)\n', (7583, 7660), False, 'from mindspore import context, Tensor\n'), ((9094, 9129), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['args.pretrain_ckpt'], {}), '(args.pretrain_ckpt)\n', (9109, 9129), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((9135, 9173), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['model', 'param_dict'], {}), '(model, param_dict)\n', (9154, 9173), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((9238, 9282), 'os.makedirs', 'os.makedirs', (['args.output_path'], {'exist_ok': '(True)'}), '(args.output_path, exist_ok=True)\n', (9249, 9282), False, 'import os\n'), ((2435, 2471), 'os.path.join', 'os.path.join', (['data_dir', '"""*-wave.npy"""'], {}), "(data_dir, '*-wave.npy')\n", (2447, 2471), False, 'import os\n'), ((4409, 4436), 'wavenet_vocoder.util.is_mulaw', 'is_mulaw', (['hparam.input_type'], {}), '(hparam.input_type)\n', (4417, 4436), False, 'from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_scalar_input\n'), ((7042, 7097), 'nnmnkwii.preprocessing.inv_mulaw_quantize', 'P.inv_mulaw_quantize', (['ref', '(hparam.quantize_channels - 1)'], {}), '(ref, hparam.quantize_channels - 1)\n', (7062, 7097), True, 'from nnmnkwii import preprocessing as P\n'), ((7108, 7135), 'wavenet_vocoder.util.is_mulaw', 'is_mulaw', (['hparam.input_type'], {}), '(hparam.input_type)\n', (7116, 7135), False, 'from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_scalar_input\n'), ((7903, 7925), 'hparams.hparams_debug_string', 'hparams_debug_string', ([], {}), '()\n', (7923, 7925), False, 'from hparams import hparams, hparams_debug_string\n'), ((3762, 3782), 'audio.get_hop_size', 'audio.get_hop_size', ([], {}), '()\n', (3780, 3782), False, 'import audio\n'), ((4214, 4233), 'numpy.argmax', 'np.argmax', (['y_hat', '(1)'], {}), '(y_hat, 1)\n', (4223, 4233), True, 'import numpy as np\n'), ((4338, 4398), 'nnmnkwii.preprocessing.inv_mulaw_quantize', 'P.inv_mulaw_quantize', (['y_hat[k]', '(hparam.quantize_channels - 1)'], {}), '(y_hat[k], hparam.quantize_channels - 1)\n', (4358, 4398), True, 'from nnmnkwii import preprocessing as P\n'), ((4455, 4481), 'numpy.reshape', 'np.reshape', (['y_hat', '(B, -1)'], {}), '(y_hat, (B, -1))\n', (4465, 4481), True, 'import numpy as np\n'), ((4614, 4640), 'numpy.reshape', 'np.reshape', (['y_hat', '(B, -1)'], {}), '(y_hat, (B, -1))\n', (4624, 4640), True, 'import numpy as np\n'), ((6948, 6967), 'numpy.reshape', 'np.reshape', (['ref', '(-1)'], {}), '(ref, -1)\n', (6958, 6967), True, 'import numpy as np\n'), ((7152, 7198), 'nnmnkwii.preprocessing.inv_mulaw', 'P.inv_mulaw', (['ref', '(hparam.quantize_channels - 1)'], {}), '(ref, hparam.quantize_channels - 1)\n', (7163, 7198), True, 'from nnmnkwii import preprocessing as P\n'), ((8972, 9007), 'wavenet_vocoder.util.is_scalar_input', 'is_scalar_input', (['hparams.input_type'], {}), '(hparams.input_type)\n', (8987, 9007), False, 'from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_scalar_input\n'), ((9600, 9652), 'numpy.pad', 'np.pad', (['c'], {'pad_width': '(cin_pad, cin_pad)', 'mode': '"""edge"""'}), "(c, pad_width=(cin_pad, cin_pad), mode='edge')\n", (9606, 9652), True, 'import numpy as np\n'), ((9670, 9679), 'mindspore.Tensor', 'Tensor', (['c'], {}), '(c)\n', (9676, 9679), False, 'from mindspore import context, Tensor\n'), ((10460, 10483), 'numpy.clip', 'np.clip', (['gen', '(-1.0)', '(1.0)'], {}), '(gen, -1.0, 1.0)\n', (10467, 10483), True, 'import numpy as np\n'), ((2862, 2882), 'audio.get_hop_size', 'audio.get_hop_size', ([], {}), '()\n', (2880, 2882), False, 'import audio\n'), ((4534, 4585), 'nnmnkwii.preprocessing.inv_mulaw', 'P.inv_mulaw', (['y_hat[k]', '(hparam.quantize_channels - 1)'], {}), '(y_hat[k], hparam.quantize_channels - 1)\n', (4545, 4585), True, 'from nnmnkwii import preprocessing as P\n'), ((6850, 6867), 'numpy.argmax', 'np.argmax', (['ref', '(0)'], {}), '(ref, 0)\n', (6859, 6867), True, 'import numpy as np\n'), ((2611, 2631), 'audio.get_hop_size', 'audio.get_hop_size', ([], {}), '()\n', (2629, 2631), False, 'import audio\n'), ((6045, 6080), 'os.path.basename', 'os.path.basename', (['target_audio_path'], {}), '(target_audio_path)\n', (6061, 6080), False, 'import os\n'), ((6187, 6221), 'os.path.basename', 'os.path.basename', (['target_feat_path'], {}), '(target_feat_path)\n', (6203, 6221), False, 'import os\n')] |
from sklearn import svm
from random import shuffle
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import numpy as np
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
f = open("data/svmdata_e_test.txt", "r")
a_test_mass = f.readlines()
f.close()
a_test_mass = a_test_mass[1:]
shuffle(a_test_mass)
v = open("data/svmdata_e.txt", "r")
a_mass = v.readlines()
v.close()
a_mass = a_mass[1:]
shuffle(a_mass)
translator = {
"red" : 0,
"green" : 1
}
test_val = len(a_mass)
globalX = []
globalY = []
for i in range(0, test_val):
line = a_mass[i]
line = line.rstrip("\n")
arr = line.split("\t")
currX = arr[1:3]
currY = translator[arr[3]]
globalX.append(currX)
globalY.append(currY)
testX = []
testY = []
for i in range(0, len(a_test_mass)):
line = a_test_mass[i]
line = line.rstrip("\n")
arr = line.split("\t")
currX = arr[1:3]
currY = translator[arr[3]]
testX.append(currX)
testY.append(currY)
globalX = np.array(globalX)
globalY = np.array(globalY)
globalX = globalX.astype(np.float)
globalY = globalY.astype(np.int)
testX = np.array(testX)
testY = np.array(testY)
testX = testX.astype(np.float)
testY = testY.astype(np.int)
X = globalX
y = globalY
C = 0.2 # SVM regularization parameter
gamma = 0.1
models = (
svm.SVC(kernel='rbf', gamma=gamma, C=C),
svm.SVC(kernel='poly', degree=1, gamma=gamma, C=C),
svm.SVC(kernel='poly', degree=2, gamma=gamma, C=C),
svm.SVC(kernel='poly', degree=3, gamma=gamma, C=C),
svm.SVC(kernel='poly', degree=4, gamma=gamma, C=C),
svm.SVC(kernel='poly', degree=5, gamma=gamma, C=C),
svm.SVC(kernel="sigmoid", gamma=gamma))
models = (clf.fit(X, y) for clf in models)
# predictions = (clf.predict(testX) for clf in models)
# for i in predictions:
# print(accuracy_score(testY, i))
# c_matrix = confusion_matrix(testY, i)
# print(c_matrix)
# print("***")
titles = (
'RBF (Gauss)',
'Poly 1',
'Poly 2',
'Poly 3',
'Poly 4',
'Poly 5',
'sigmoid')
# Set-up 4x2 grid for plotting.
fig, sub = plt.subplots(4, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.title('Current Gamma = ' + str(gamma) )
plt.show() | [
"sklearn.svm.SVC",
"random.shuffle",
"numpy.arange",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((729, 749), 'random.shuffle', 'shuffle', (['a_test_mass'], {}), '(a_test_mass)\n', (736, 749), False, 'from random import shuffle\n'), ((840, 855), 'random.shuffle', 'shuffle', (['a_mass'], {}), '(a_mass)\n', (847, 855), False, 'from random import shuffle\n'), ((1419, 1436), 'numpy.array', 'np.array', (['globalX'], {}), '(globalX)\n', (1427, 1436), True, 'import numpy as np\n'), ((1447, 1464), 'numpy.array', 'np.array', (['globalY'], {}), '(globalY)\n', (1455, 1464), True, 'import numpy as np\n'), ((1542, 1557), 'numpy.array', 'np.array', (['testX'], {}), '(testX)\n', (1550, 1557), True, 'import numpy as np\n'), ((1566, 1581), 'numpy.array', 'np.array', (['testY'], {}), '(testY)\n', (1574, 1581), True, 'import numpy as np\n'), ((2590, 2608), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(2)'], {}), '(4, 2)\n', (2602, 2608), True, 'import matplotlib.pyplot as plt\n'), ((2609, 2652), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.4)', 'hspace': '(0.4)'}), '(wspace=0.4, hspace=0.4)\n', (2628, 2652), True, 'import matplotlib.pyplot as plt\n'), ((3180, 3190), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3188, 3190), True, 'import matplotlib.pyplot as plt\n'), ((1741, 1780), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': 'gamma', 'C': 'C'}), "(kernel='rbf', gamma=gamma, C=C)\n", (1748, 1780), False, 'from sklearn import svm\n'), ((1792, 1842), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""poly"""', 'degree': '(1)', 'gamma': 'gamma', 'C': 'C'}), "(kernel='poly', degree=1, gamma=gamma, C=C)\n", (1799, 1842), False, 'from sklearn import svm\n'), ((1854, 1904), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""poly"""', 'degree': '(2)', 'gamma': 'gamma', 'C': 'C'}), "(kernel='poly', degree=2, gamma=gamma, C=C)\n", (1861, 1904), False, 'from sklearn import svm\n'), ((1916, 1966), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""poly"""', 'degree': '(3)', 'gamma': 'gamma', 'C': 'C'}), "(kernel='poly', degree=3, gamma=gamma, C=C)\n", (1923, 1966), False, 'from sklearn import svm\n'), ((1978, 2028), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""poly"""', 'degree': '(4)', 'gamma': 'gamma', 'C': 'C'}), "(kernel='poly', degree=4, gamma=gamma, C=C)\n", (1985, 2028), False, 'from sklearn import svm\n'), ((2040, 2090), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""poly"""', 'degree': '(5)', 'gamma': 'gamma', 'C': 'C'}), "(kernel='poly', degree=5, gamma=gamma, C=C)\n", (2047, 2090), False, 'from sklearn import svm\n'), ((2102, 2140), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""sigmoid"""', 'gamma': 'gamma'}), "(kernel='sigmoid', gamma=gamma)\n", (2109, 2140), False, 'from sklearn import svm\n'), ((336, 362), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (345, 362), True, 'import numpy as np\n'), ((389, 415), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (398, 415), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Solar resource downscaling utility methods.
Created on April 8 2019
@author: gbuster
"""
import numpy as np
import pandas as pd
import logging
from rex.utilities.solar_position import SolarPosition
from rex.utilities.utilities import get_lat_lon_cols
from nsrdb.all_sky import CLEAR_TYPES
from nsrdb.all_sky.all_sky import all_sky
from nsrdb.utilities.interpolation import temporal_lin, temporal_step
logger = logging.getLogger(__name__)
def make_time_index(year, frequency, set_timezone=True):
"""Make the NSRDB target time index.
Parameters
----------
year : int
Year for time index.
frequency : str
String in the Pandas frequency format, e.g. '5min'.
set_timezone : bool
Flag to set a timezone-aware time index. will be set to UTC with
zero offset.
Returns
-------
ti : pd.DatetimeIndex
Pandas datetime index for a full year at the requested resolution.
"""
ti = pd.date_range('1-1-{y}'.format(y=year), '1-1-{y}'.format(y=year + 1),
freq=frequency)[:-1]
if set_timezone:
ti = ti.tz_localize('UTC')
return ti
def interp_cld_props(data, ti_native, ti_new,
var_list=('cld_reff_dcomp', 'cld_opd_dcomp')):
"""Interpolate missing cloud properties (NOT CLOUD TYPE).
Parameters
----------
data : dict
Namespace of variables for input to all_sky. Must include the cloud
variables in var_list and "cloud_type".
ti_native : pd.DateTimeIndex
Native time index of the original NSRDB data.
ti_new : pd.DateTimeIndex
Intended downscaled time index.
var_list : list | tuple
Cloud variables to downscale.
Returns
-------
data : dict
Namespace of variables with the cloud variables in var_list downscaled
to the requested ti_new.
"""
for var in var_list:
# make sparse dataframe with new time_index
data[var] = pd.DataFrame(data[var], index=ti_native).reindex(ti_new)
# find location of bad data
cld_fill_flag = ((data[var] < 0) | data[var].isnull())
# replace to-fill values with nan
data[var].values[cld_fill_flag] = np.nan
# set clear timesteps cloud props to zero for better transitions
data[var].values[np.isin(data['cloud_type'], CLEAR_TYPES)] = 0.0
# interpolate empty values
data[var] = data[var].interpolate(method='linear', axis=0).values
logger.debug('Downscaled array for "{}" has shape {} and {} NaN values'
.format(var, data[var].shape, np.isnan(data[var]).sum()))
return data
def downscale_nsrdb(SAM_res, res, frequency='5min',
sam_vars=('dhi', 'dni', 'wind_speed', 'air_temperature'),
variability_kwargs=None):
"""Downscale the NSRDB resource and return the preloaded SAM_res.
Parameters
----------
SAM_res : SAMResource
SAM resource object.
res : NSRDB
NSRDB resource handler.
frequency : str
String in the Pandas frequency format, e.g. '5min'.
sam_vars : tuple | list
Variables to save to SAM resource handler before returning.
variability_kwargs : Nonetype | dict
Downscaling kwargs to the NSRDB all sky method call. Should include
maximum GHI synthetic variability fraction ("var_frac") which
will be set to 0.05 (5%) if variability_kwargs is None.
Returns
-------
SAM_res : SAMResource
SAM resource object with downscaled solar resource data loaded.
Time index and shape are also updated.
"""
logger.debug('Downscaling NSRDB resource data to "{}".'.format(frequency))
# variables required for all-sky not including clouds, ti, sza
var_list = ('aod',
'surface_pressure',
'surface_albedo',
'ssa',
'asymmetry',
'alpha',
'ozone',
'total_precipitable_water',
)
# Indexing variable
sites_slice = SAM_res.sites_slice
# get downscaled time_index
time_index = make_time_index(res.time_index.year[0], frequency)
SAM_res._time_index = time_index
SAM_res._shape = (len(time_index), len(SAM_res.sites))
logger.debug('Native resource time index has length {}: \n{}'
.format(len(res.time_index), res.time_index))
logger.debug('Target resource time index has length {}: \n{}'
.format(len(time_index), time_index))
# downscale variables into an all-sky input variable namespace
all_sky_ins = {'time_index': time_index}
for var in var_list:
arr = res[var, :, sites_slice]
arr = temporal_lin(res[var, :, sites_slice], res.time_index,
time_index)
all_sky_ins[var] = arr
logger.debug('Downscaled array for "{}" has shape {} and {} NaN values'
.format(var, arr.shape, np.isnan(arr).sum()))
# calculate downscaled solar zenith angle
lat_lon_cols = get_lat_lon_cols(res.meta)
lat_lon = res.meta.loc[SAM_res.sites, lat_lon_cols]\
.values.astype(np.float32)
sza = SolarPosition(time_index, lat_lon).zenith
all_sky_ins['solar_zenith_angle'] = sza
logger.debug('Downscaled array for "solar_zenith_angle" '
'has shape {} and {} NaN values'
.format(sza.shape, np.isnan(sza).sum()))
# get downscaled cloud properties
all_sky_ins['cloud_type'] = temporal_step(
res['cloud_type', :, sites_slice], res.time_index, time_index)
all_sky_ins['cld_opd_dcomp'] = res['cld_opd_dcomp', :, sites_slice]
all_sky_ins['cld_reff_dcomp'] = res['cld_reff_dcomp', :, sites_slice]
all_sky_ins = interp_cld_props(all_sky_ins, res.time_index, time_index)
# add all sky kwargs such as variability
if variability_kwargs is None:
variability_kwargs = {'var_frac': 0.05}
all_sky_ins['variability_kwargs'] = variability_kwargs
# run all-sky
logger.debug('Running all-sky for "{}".'.format(SAM_res))
all_sky_outs = all_sky(**all_sky_ins)
# set downscaled data to sam resource handler
for k, v in all_sky_outs.items():
if k in sam_vars:
SAM_res[k] = v
# downscale extra vars needed for SAM but not for all-sky
for var in sam_vars:
if var not in SAM_res._res_arrays:
SAM_res[var] = temporal_lin(res[var, :, sites_slice],
res.time_index, time_index)
return SAM_res
| [
"logging.getLogger",
"rex.utilities.solar_position.SolarPosition",
"nsrdb.utilities.interpolation.temporal_step",
"nsrdb.all_sky.all_sky.all_sky",
"numpy.isin",
"numpy.isnan",
"rex.utilities.utilities.get_lat_lon_cols",
"pandas.DataFrame",
"nsrdb.utilities.interpolation.temporal_lin"
] | [((443, 470), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (460, 470), False, 'import logging\n'), ((5120, 5146), 'rex.utilities.utilities.get_lat_lon_cols', 'get_lat_lon_cols', (['res.meta'], {}), '(res.meta)\n', (5136, 5146), False, 'from rex.utilities.utilities import get_lat_lon_cols\n'), ((5576, 5652), 'nsrdb.utilities.interpolation.temporal_step', 'temporal_step', (["res['cloud_type', :, sites_slice]", 'res.time_index', 'time_index'], {}), "(res['cloud_type', :, sites_slice], res.time_index, time_index)\n", (5589, 5652), False, 'from nsrdb.utilities.interpolation import temporal_lin, temporal_step\n'), ((6173, 6195), 'nsrdb.all_sky.all_sky.all_sky', 'all_sky', ([], {}), '(**all_sky_ins)\n', (6180, 6195), False, 'from nsrdb.all_sky.all_sky import all_sky\n'), ((4782, 4848), 'nsrdb.utilities.interpolation.temporal_lin', 'temporal_lin', (['res[var, :, sites_slice]', 'res.time_index', 'time_index'], {}), '(res[var, :, sites_slice], res.time_index, time_index)\n', (4794, 4848), False, 'from nsrdb.utilities.interpolation import temporal_lin, temporal_step\n'), ((5249, 5283), 'rex.utilities.solar_position.SolarPosition', 'SolarPosition', (['time_index', 'lat_lon'], {}), '(time_index, lat_lon)\n', (5262, 5283), False, 'from rex.utilities.solar_position import SolarPosition\n'), ((2353, 2393), 'numpy.isin', 'np.isin', (["data['cloud_type']", 'CLEAR_TYPES'], {}), "(data['cloud_type'], CLEAR_TYPES)\n", (2360, 2393), True, 'import numpy as np\n'), ((6496, 6562), 'nsrdb.utilities.interpolation.temporal_lin', 'temporal_lin', (['res[var, :, sites_slice]', 'res.time_index', 'time_index'], {}), '(res[var, :, sites_slice], res.time_index, time_index)\n', (6508, 6562), False, 'from nsrdb.utilities.interpolation import temporal_lin, temporal_step\n'), ((2005, 2045), 'pandas.DataFrame', 'pd.DataFrame', (['data[var]'], {'index': 'ti_native'}), '(data[var], index=ti_native)\n', (2017, 2045), True, 'import pandas as pd\n'), ((5483, 5496), 'numpy.isnan', 'np.isnan', (['sza'], {}), '(sza)\n', (5491, 5496), True, 'import numpy as np\n'), ((2643, 2662), 'numpy.isnan', 'np.isnan', (['data[var]'], {}), '(data[var])\n', (2651, 2662), True, 'import numpy as np\n'), ((5032, 5045), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (5040, 5045), True, 'import numpy as np\n')] |
import streamlit as st
import pandas as pd
import numpy as np
import pickle
import base64
@st.cache(suppress_st_warning=True)
def get_fvalue(val):
feature_dict = {"No":1,"Yes":2}
for key,value in feature_dict.items():
if val == key:
return value
def get_value(val,my_dict):
for key,value in my_dict.items():
if val == key:
return value
app_mode = st.sidebar.selectbox('Select Page',['Home','Prediction'])
if app_mode=='Home':
st.title('LOAN PREDICTION :')
st.image('hipster_loan-1.jpg')
st.write('App realised by : <NAME>')
elif app_mode =='Prediction':
st.image('slider-short-3.jpg')
st.subheader('Sir/Mme , YOU need to fill all neccesary informations in order to get a reply to your loan request !')
st.sidebar.header("Informations about the client :")
gender_dict = {"Male":1,"Female":2}
feature_dict = {"No":1,"Yes":2}
edu={'Graduate':1,'Not Graduate':2}
prop={'Rural':1,'Urban':2,'Semiurban':3}
Gender=st.sidebar.radio('Gender',tuple(gender_dict.keys()))
Married=st.sidebar.radio('Married',tuple(feature_dict.keys()))
Self_Employed=st.sidebar.radio('Self Employed',tuple(feature_dict.keys()))
Dependents=st.sidebar.radio('Dependents',options=['0','1' , '2' , '3+'])
Education=st.sidebar.radio('Education',tuple(edu.keys()))
ApplicantIncome=st.sidebar.slider('ApplicantIncome',0,10000,0,)
CoapplicantIncome=st.sidebar.slider('CoapplicantIncome',0,10000,0,)
LoanAmount=st.sidebar.slider('LoanAmount in K$',9.0,700.0,200.0)
Loan_Amount_Term=st.sidebar.selectbox('Loan_Amount_Term',(12.0,36.0,60.0,84.0,120.0,180.0,240.0,300.0,360.0))
Credit_History=st.sidebar.radio('Credit_History',(0.0,1.0))
Property_Area=st.sidebar.radio('Property_Area',tuple(prop.keys()))
class_0 , class_3 , class_1,class_2 = 0,0,0,0
if Dependents == '0':
class_0 = 1
elif Dependents == '1':
class_1 = 1
elif Dependents == '2' :
class_2 = 1
else:
class_3= 1
Rural,Urban,Semiurban=0,0,0
if Property_Area == 'Urban' :
Urban = 1
elif Property_Area == 'Semiurban' :
Semiurban = 1
else :
Rural=1
data1={
'Gender':Gender,
'Married':Married,
'Dependents':[class_0,class_1,class_2,class_3],
'Education':Education,
'ApplicantIncome':ApplicantIncome,
'CoapplicantIncome':CoapplicantIncome,
'Self Employed':Self_Employed,
'LoanAmount':LoanAmount,
'Loan_Amount_Term':Loan_Amount_Term,
'Credit_History':Credit_History,
'Property_Area':[Rural,Urban,Semiurban],
}
feature_list=[ApplicantIncome,CoapplicantIncome,LoanAmount,Loan_Amount_Term,Credit_History,get_value(Gender,gender_dict),get_fvalue(Married),data1['Dependents'][0],data1['Dependents'][1],data1['Dependents'][2],data1['Dependents'][3],get_value(Education,edu),get_fvalue(Self_Employed),data1['Property_Area'][0],data1['Property_Area'][1],data1['Property_Area'][2]]
single_sample = np.array(feature_list).reshape(1,-1)
if st.button("Predict"):
file_ = open("6m-rain.gif", "rb")
contents = file_.read()
data_url = base64.b64encode(contents).decode("utf-8")
file_.close()
file = open("green-cola-no.gif", "rb")
contents = file.read()
data_url_no = base64.b64encode(contents).decode("utf-8")
file.close()
loaded_model = pickle.load(open('Random_Forest.sav', 'rb'))
prediction = loaded_model.predict(single_sample)
if prediction[0] == 0 :
st.error(
'According to our Calculations, you will not get the loan from Bank'
)
st.markdown(
f'<img src="data:image/gif;base64,{data_url_no}" alt="cat gif">',
unsafe_allow_html=True,)
elif prediction[0] == 1 :
st.success(
'Congratulations!! you will get the loan from Bank'
)
st.markdown(
f'<img src="data:image/gif;base64,{data_url}" alt="cat gif">',
unsafe_allow_html=True,
)
| [
"streamlit.image",
"streamlit.cache",
"streamlit.markdown",
"base64.b64encode",
"streamlit.button",
"streamlit.write",
"streamlit.sidebar.header",
"numpy.array",
"streamlit.sidebar.slider",
"streamlit.error",
"streamlit.success",
"streamlit.subheader",
"streamlit.sidebar.selectbox",
"strea... | [((94, 128), 'streamlit.cache', 'st.cache', ([], {'suppress_st_warning': '(True)'}), '(suppress_st_warning=True)\n', (102, 128), True, 'import streamlit as st\n'), ((411, 470), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select Page"""', "['Home', 'Prediction']"], {}), "('Select Page', ['Home', 'Prediction'])\n", (431, 470), True, 'import streamlit as st\n'), ((494, 523), 'streamlit.title', 'st.title', (['"""LOAN PREDICTION :"""'], {}), "('LOAN PREDICTION :')\n", (502, 523), True, 'import streamlit as st\n'), ((528, 558), 'streamlit.image', 'st.image', (['"""hipster_loan-1.jpg"""'], {}), "('hipster_loan-1.jpg')\n", (536, 558), True, 'import streamlit as st\n'), ((564, 600), 'streamlit.write', 'st.write', (['"""App realised by : <NAME>"""'], {}), "('App realised by : <NAME>')\n", (572, 600), True, 'import streamlit as st\n'), ((658, 688), 'streamlit.image', 'st.image', (['"""slider-short-3.jpg"""'], {}), "('slider-short-3.jpg')\n", (666, 688), True, 'import streamlit as st\n'), ((694, 820), 'streamlit.subheader', 'st.subheader', (['"""Sir/Mme , YOU need to fill all neccesary informations in order to get a reply to your loan request !"""'], {}), "(\n 'Sir/Mme , YOU need to fill all neccesary informations in order to get a reply to your loan request !'\n )\n", (706, 820), True, 'import streamlit as st\n'), ((815, 867), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Informations about the client :"""'], {}), "('Informations about the client :')\n", (832, 867), True, 'import streamlit as st\n'), ((1254, 1315), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Dependents"""'], {'options': "['0', '1', '2', '3+']"}), "('Dependents', options=['0', '1', '2', '3+'])\n", (1270, 1315), True, 'import streamlit as st\n'), ((1398, 1447), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""ApplicantIncome"""', '(0)', '(10000)', '(0)'], {}), "('ApplicantIncome', 0, 10000, 0)\n", (1415, 1447), True, 'import streamlit as st\n'), ((1468, 1519), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""CoapplicantIncome"""', '(0)', '(10000)', '(0)'], {}), "('CoapplicantIncome', 0, 10000, 0)\n", (1485, 1519), True, 'import streamlit as st\n'), ((1533, 1589), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""LoanAmount in K$"""', '(9.0)', '(700.0)', '(200.0)'], {}), "('LoanAmount in K$', 9.0, 700.0, 200.0)\n", (1550, 1589), True, 'import streamlit as st\n'), ((1608, 1714), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Loan_Amount_Term"""', '(12.0, 36.0, 60.0, 84.0, 120.0, 180.0, 240.0, 300.0, 360.0)'], {}), "('Loan_Amount_Term', (12.0, 36.0, 60.0, 84.0, 120.0, \n 180.0, 240.0, 300.0, 360.0))\n", (1628, 1714), True, 'import streamlit as st\n'), ((1720, 1766), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Credit_History"""', '(0.0, 1.0)'], {}), "('Credit_History', (0.0, 1.0))\n", (1736, 1766), True, 'import streamlit as st\n'), ((3082, 3102), 'streamlit.button', 'st.button', (['"""Predict"""'], {}), "('Predict')\n", (3091, 3102), True, 'import streamlit as st\n'), ((3037, 3059), 'numpy.array', 'np.array', (['feature_list'], {}), '(feature_list)\n', (3045, 3059), True, 'import numpy as np\n'), ((3607, 3685), 'streamlit.error', 'st.error', (['"""According to our Calculations, you will not get the loan from Bank"""'], {}), "('According to our Calculations, you will not get the loan from Bank')\n", (3615, 3685), True, 'import streamlit as st\n'), ((3708, 3813), 'streamlit.markdown', 'st.markdown', (['f"""<img src="data:image/gif;base64,{data_url_no}" alt="cat gif">"""'], {'unsafe_allow_html': '(True)'}), '(f\'<img src="data:image/gif;base64,{data_url_no}" alt="cat gif">\',\n unsafe_allow_html=True)\n', (3719, 3813), True, 'import streamlit as st\n'), ((3197, 3223), 'base64.b64encode', 'base64.b64encode', (['contents'], {}), '(contents)\n', (3213, 3223), False, 'import base64\n'), ((3366, 3392), 'base64.b64encode', 'base64.b64encode', (['contents'], {}), '(contents)\n', (3382, 3392), False, 'import base64\n'), ((3866, 3929), 'streamlit.success', 'st.success', (['"""Congratulations!! you will get the loan from Bank"""'], {}), "('Congratulations!! you will get the loan from Bank')\n", (3876, 3929), True, 'import streamlit as st\n'), ((3952, 4054), 'streamlit.markdown', 'st.markdown', (['f"""<img src="data:image/gif;base64,{data_url}" alt="cat gif">"""'], {'unsafe_allow_html': '(True)'}), '(f\'<img src="data:image/gif;base64,{data_url}" alt="cat gif">\',\n unsafe_allow_html=True)\n', (3963, 4054), True, 'import streamlit as st\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
sine
~~~~
Run a TSM procedure on a signal generated with numpy.
"""
# pylint: disable=invalid-name
import numpy as np
import sounddevice as sd
from audiotsm import wsola
from audiotsm.io.array import ArrayReader, ArrayWriter
# The parameters of the input signal
length = 1 # in seconds
samplerate = 44100 # in Hz
frequency = 440 # an A4
# Generate the input signal
time = np.linspace(0, length, int(length * samplerate))
input_signal = np.sin(np.pi * frequency * time).reshape((1, -1))
# Run the TSM procedure
reader = ArrayReader(input_signal)
writer = ArrayWriter(channels=1)
tsm = wsola(channels=1, speed=0.5)
tsm.run(reader, writer)
# Play the output
# This example was written to show how to use an ArrayWriter. If you want to
# play the output of a TSM procedure you should use an
# audiotsm.io.stream.StreamWriter.
sd.play(np.ascontiguousarray(writer.data.T), samplerate, blocking=True)
| [
"audiotsm.io.array.ArrayWriter",
"numpy.sin",
"numpy.ascontiguousarray",
"audiotsm.io.array.ArrayReader",
"audiotsm.wsola"
] | [((580, 605), 'audiotsm.io.array.ArrayReader', 'ArrayReader', (['input_signal'], {}), '(input_signal)\n', (591, 605), False, 'from audiotsm.io.array import ArrayReader, ArrayWriter\n'), ((615, 638), 'audiotsm.io.array.ArrayWriter', 'ArrayWriter', ([], {'channels': '(1)'}), '(channels=1)\n', (626, 638), False, 'from audiotsm.io.array import ArrayReader, ArrayWriter\n'), ((646, 674), 'audiotsm.wsola', 'wsola', ([], {'channels': '(1)', 'speed': '(0.5)'}), '(channels=1, speed=0.5)\n', (651, 674), False, 'from audiotsm import wsola\n'), ((893, 928), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['writer.data.T'], {}), '(writer.data.T)\n', (913, 928), True, 'import numpy as np\n'), ((496, 528), 'numpy.sin', 'np.sin', (['(np.pi * frequency * time)'], {}), '(np.pi * frequency * time)\n', (502, 528), True, 'import numpy as np\n')] |
import os
import numpy as np
import cv2 as cv
import random
import paddle
class DataLogger(object):
def __init__(self):
self.clear()
def clear(self):
self.value = 0
self.sum = 0
self.cnt = 0
self.avg = 0
def update(self, value, n=1):
self.value = value
self.sum += value * n
self.cnt += n
self._cal_avg()
def _cal_avg(self):
self.avg = self.sum / self.cnt
class data_read(paddle.io.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.is_train = train # training set or test set
self.inputResH =512
self.inputResW = 512
self.outputResH = 512
self.outputResW = 512
self.scale_factor = scale_factor
open_flie = open('train.txt','r')
X_data_flie=open_flie.read().split('\n')
open_flie.close()
batch_img = []
batch_lab = []
couns_num =len(X_data_flie)-1 # 150000
count = 0
for a_data in X_data_flie:
if count>couns_num:
break
batch_img.append(a_data.split(' ')[1])
batch_lab.append(a_data.split(' ')[0])
count += 1
batch_img = np.array(batch_img)
batch_lab = np.array(batch_lab)
# train
self.imgname_coco_train = batch_img
self.part_coco_train = batch_lab
self.size_train = self.imgname_coco_train.shape[0]
def __getitem__(self, index): # 如果在类中定义了__getitem__()方法,那么他的实例对象(假设为P)就可以这样P[key]取值。当实例对象做P[key]运算时,就会调用类中的__getitem__()方法。
# sf = self.scale_factor
# if self.is_train:
imgname = self.imgname_coco_train[index]
part = self.part_coco_train[index]
inp_img = cv.imread(imgname)
out_img = cv.imread(part, 0)
# out_img = cv.resize(out_img, (self.outputResW, self.outputResH), interpolation=cv.INTER_CUBIC)
# inp_img = cv.resize(inp_img, (self.inputResW, self.inputResH), interpolation=cv.INTER_CUBIC)
left = random.randint(0, inp_img.shape[1]-1-self.outputResH)
top = random.randint(0, inp_img.shape[0]-1-self.outputResH)
cinp = inp_img[top:top+self.outputResH, left:left+self.outputResH]
cout = out_img[top:top+self.outputResH, left:left+self.outputResH]
inp_img = cinp
out_img = cout
# #
# cv.imshow('inp_imgy', inp_img)
# cv.imshow('out_img', out_img * 255)
# cv.waitKey()
if (random.randint(0, 1)): # 水平翻转
# fp = random.randint(0, 1)
inp_img = cv.flip(inp_img, 1)
# out_img = cv.flip(out_img, fp)
if (random.randint(0, 10) > 6): # 随机遮盖
randx = random.randint(10, 200)
randy = random.randint(10, 200)
ix = random.randint(0, self.inputResH - randx - 1)
iy = random.randint(0, self.inputResH - randy - 1)
# if (random.randint(0, 1)):
# inp_img[ix:ix+randx,iy:iy+randy]=[0,0,0]
# else:
inp_img[ix:ix + randx, iy:iy + randy] = [255, 255, 255]
out_img[ix:ix + randx, iy:iy + randy] = 0
if (random.randint(0, 10) > 6): # 随机旋转
angle = random.randint(-10, 10)
ix = random.randint(200, 300)
iy = random.randint(200, 300)
bei = random.randint(8, 10) / 10
M = cv.getRotationMatrix2D((ix, iy), angle, bei)
inp_img = cv.warpAffine(inp_img, M, (self.inputResH, self.inputResW),borderValue=(255,255,255)) # ,flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE
out_img = cv.warpAffine(out_img, M, (
self.inputResH, self.inputResW)) # ,flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE
if (random.randint(0, 10) > 6): # 随机填充
left = random.randint(1, 80)
top = random.randint(1, 80)
right = random.randint(1, 80)
bottom = random.randint(1, 80)
new_img_inp = np.ones((top + bottom + self.inputResH, left + right + self.inputResW, 3), np.uint8) * 255
new_img_out = np.zeros((top + bottom + self.inputResH, left + right + self.inputResW), np.uint8)
new_img_inp[top:top + self.inputResH, left:left + self.inputResW, :] = inp_img.copy()
new_img_out[top:top + self.inputResH, left: left + self.inputResW] = out_img
inp_img = cv.resize(new_img_inp, (self.inputResW, self.inputResH), interpolation=cv.INTER_CUBIC)
out_img = cv.resize(new_img_out, (self.outputResW, self.outputResH), interpolation=cv.INTER_CUBIC)
img_ = inp_img[:, :, ::-1].transpose((2, 0, 1)).copy()
inp = paddle.to_tensor(np.array(img_, np.float32) / 255.0)
out = paddle.to_tensor(np.array([out_img], np.float32))
return inp, out
def __len__(self):
return self.size_train
# if self.is_train:
# return self.size_train
# else:
# return self.size_val
from tqdm import tqdm
if __name__ == '__main__':
train_dataset = data_read(train = True)
train_loader = paddle.io.DataLoader(train_dataset, batch_size=2, shuffle=True, num_workers=0, drop_last=True)
lossLogger = DataLogger()
train_loader_desc = tqdm(train_dataset)
for i, (inps, labels) in enumerate(train_loader_desc):
lossLogger.update(i,2)
train_loader_desc.set_description('loss: {loss:.8f}'.format(loss=lossLogger.avg, )) | [
"cv2.warpAffine",
"cv2.flip",
"numpy.ones",
"tqdm.tqdm",
"numpy.array",
"numpy.zeros",
"paddle.io.DataLoader",
"cv2.getRotationMatrix2D",
"cv2.resize",
"cv2.imread",
"random.randint"
] | [((5323, 5422), 'paddle.io.DataLoader', 'paddle.io.DataLoader', (['train_dataset'], {'batch_size': '(2)', 'shuffle': '(True)', 'num_workers': '(0)', 'drop_last': '(True)'}), '(train_dataset, batch_size=2, shuffle=True, num_workers\n =0, drop_last=True)\n', (5343, 5422), False, 'import paddle\n'), ((5476, 5495), 'tqdm.tqdm', 'tqdm', (['train_dataset'], {}), '(train_dataset)\n', (5480, 5495), False, 'from tqdm import tqdm\n'), ((1359, 1378), 'numpy.array', 'np.array', (['batch_img'], {}), '(batch_img)\n', (1367, 1378), True, 'import numpy as np\n'), ((1400, 1419), 'numpy.array', 'np.array', (['batch_lab'], {}), '(batch_lab)\n', (1408, 1419), True, 'import numpy as np\n'), ((1898, 1916), 'cv2.imread', 'cv.imread', (['imgname'], {}), '(imgname)\n', (1907, 1916), True, 'import cv2 as cv\n'), ((1938, 1956), 'cv2.imread', 'cv.imread', (['part', '(0)'], {}), '(part, 0)\n', (1947, 1956), True, 'import cv2 as cv\n'), ((2185, 2242), 'random.randint', 'random.randint', (['(0)', '(inp_img.shape[1] - 1 - self.outputResH)'], {}), '(0, inp_img.shape[1] - 1 - self.outputResH)\n', (2199, 2242), False, 'import random\n'), ((2254, 2311), 'random.randint', 'random.randint', (['(0)', '(inp_img.shape[0] - 1 - self.outputResH)'], {}), '(0, inp_img.shape[0] - 1 - self.outputResH)\n', (2268, 2311), False, 'import random\n'), ((2652, 2672), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (2666, 2672), False, 'import random\n'), ((2747, 2766), 'cv2.flip', 'cv.flip', (['inp_img', '(1)'], {}), '(inp_img, 1)\n', (2754, 2766), True, 'import cv2 as cv\n'), ((2828, 2849), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (2842, 2849), False, 'import random\n'), ((2885, 2908), 'random.randint', 'random.randint', (['(10)', '(200)'], {}), '(10, 200)\n', (2899, 2908), False, 'import random\n'), ((2930, 2953), 'random.randint', 'random.randint', (['(10)', '(200)'], {}), '(10, 200)\n', (2944, 2953), False, 'import random\n'), ((2972, 3017), 'random.randint', 'random.randint', (['(0)', '(self.inputResH - randx - 1)'], {}), '(0, self.inputResH - randx - 1)\n', (2986, 3017), False, 'import random\n'), ((3036, 3081), 'random.randint', 'random.randint', (['(0)', '(self.inputResH - randy - 1)'], {}), '(0, self.inputResH - randy - 1)\n', (3050, 3081), False, 'import random\n'), ((3344, 3365), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (3358, 3365), False, 'import random\n'), ((3401, 3424), 'random.randint', 'random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (3415, 3424), False, 'import random\n'), ((3443, 3467), 'random.randint', 'random.randint', (['(200)', '(300)'], {}), '(200, 300)\n', (3457, 3467), False, 'import random\n'), ((3486, 3510), 'random.randint', 'random.randint', (['(200)', '(300)'], {}), '(200, 300)\n', (3500, 3510), False, 'import random\n'), ((3574, 3618), 'cv2.getRotationMatrix2D', 'cv.getRotationMatrix2D', (['(ix, iy)', 'angle', 'bei'], {}), '((ix, iy), angle, bei)\n', (3596, 3618), True, 'import cv2 as cv\n'), ((3644, 3737), 'cv2.warpAffine', 'cv.warpAffine', (['inp_img', 'M', '(self.inputResH, self.inputResW)'], {'borderValue': '(255, 255, 255)'}), '(inp_img, M, (self.inputResH, self.inputResW), borderValue=(\n 255, 255, 255))\n', (3657, 3737), True, 'import cv2 as cv\n'), ((3810, 3869), 'cv2.warpAffine', 'cv.warpAffine', (['out_img', 'M', '(self.inputResH, self.inputResW)'], {}), '(out_img, M, (self.inputResH, self.inputResW))\n', (3823, 3869), True, 'import cv2 as cv\n'), ((3956, 3977), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (3970, 3977), False, 'import random\n'), ((4014, 4035), 'random.randint', 'random.randint', (['(1)', '(80)'], {}), '(1, 80)\n', (4028, 4035), False, 'import random\n'), ((4055, 4076), 'random.randint', 'random.randint', (['(1)', '(80)'], {}), '(1, 80)\n', (4069, 4076), False, 'import random\n'), ((4098, 4119), 'random.randint', 'random.randint', (['(1)', '(80)'], {}), '(1, 80)\n', (4112, 4119), False, 'import random\n'), ((4142, 4163), 'random.randint', 'random.randint', (['(1)', '(80)'], {}), '(1, 80)\n', (4156, 4163), False, 'import random\n'), ((4309, 4396), 'numpy.zeros', 'np.zeros', (['(top + bottom + self.inputResH, left + right + self.inputResW)', 'np.uint8'], {}), '((top + bottom + self.inputResH, left + right + self.inputResW), np\n .uint8)\n', (4317, 4396), True, 'import numpy as np\n'), ((4604, 4695), 'cv2.resize', 'cv.resize', (['new_img_inp', '(self.inputResW, self.inputResH)'], {'interpolation': 'cv.INTER_CUBIC'}), '(new_img_inp, (self.inputResW, self.inputResH), interpolation=cv.\n INTER_CUBIC)\n', (4613, 4695), True, 'import cv2 as cv\n'), ((4714, 4807), 'cv2.resize', 'cv.resize', (['new_img_out', '(self.outputResW, self.outputResH)'], {'interpolation': 'cv.INTER_CUBIC'}), '(new_img_out, (self.outputResW, self.outputResH), interpolation=cv\n .INTER_CUBIC)\n', (4723, 4807), True, 'import cv2 as cv\n'), ((4971, 5002), 'numpy.array', 'np.array', (['[out_img]', 'np.float32'], {}), '([out_img], np.float32)\n', (4979, 5002), True, 'import numpy as np\n'), ((3530, 3551), 'random.randint', 'random.randint', (['(8)', '(10)'], {}), '(8, 10)\n', (3544, 3551), False, 'import random\n'), ((4191, 4279), 'numpy.ones', 'np.ones', (['(top + bottom + self.inputResH, left + right + self.inputResW, 3)', 'np.uint8'], {}), '((top + bottom + self.inputResH, left + right + self.inputResW, 3),\n np.uint8)\n', (4198, 4279), True, 'import numpy as np\n'), ((4903, 4929), 'numpy.array', 'np.array', (['img_', 'np.float32'], {}), '(img_, np.float32)\n', (4911, 4929), True, 'import numpy as np\n')] |
try:
raise ModuleNotFoundError
from numba import jit
using_numba = True
except ModuleNotFoundError:
print('Module numba not found. Proceeding without, probably slower.')
using_numba = False
def jit(stuff):
"""Blank placeholder decorator for numba JIT, used in the absence of numba."""
return stuff
import numpy as np
from PIL import Image
# from pprint import pprint
def pil_analysis(pilimg):
"""Take image as a PIL Image object, return its palette in a dictionary of the form
tuple(color):list(color) for further editing of the palette."""
return palette_analysis(np.array(pilimg))
def img_dimensions(img):
"""Return dimensions of an image in numpy array form. Most of the times, equivalent to np.shape(img)."""
try:
width, height, channels = np.shape(img)
except ValueError:
width, height = np.shape(img)
channels = 1
return (width, height, channels)
def flat_img(img, dims=None):
"""Return the image flattened, i.e. a 2-dimensional array, where the second dimension maps only colors."""
if dims is None:
dims = img_dimensions(img)
return np.reshape(img, (dims[0]*dims[1], dims[2]))
@jit
def make_palette(flatimg):
output = np.unique(flatimg, axis=0)
"""Return all the colors in a flattened image."""
return np.unique(flatimg, axis=0)
def dict_palette(palette):
"""Take the palette as in the output of make_palette, return them in a dictionary of the form
tuple(color):list(color) for further editing of the palette."""
return {tuple(col) : list(col) for col in palette}
def palette_analysis(img):
"""Take image, return its palette in a dictionary of the form
tuple(color):list(color) for further editing of the palette."""
return dict_palette(make_palette(flat_img(img)))
def crude_remappers(flatimg, dictpalette):
"""Not to be used alone, responsible for internal transformation. Dict comprehension just extracted to allow JITting
of the rest with numba."""
return {tuple(dictpalette[col]): flatimg == np.array(col) for col in dictpalette.keys()}
@jit
def cleared_remappers(crudemap):
"""Return a Boolean array where each color now belongs."""
for col in crudemap.keys():
crudemap[col] = np.all(crudemap[col], axis=1)
return crudemap
@jit
def remap(flatimg, clearmap):
"""Return a flattened form of an image after palette swap according to the mapping generated by cleared_remappers."""
for col in clearmap.keys():
flatimg[clearmap[col]] = np.array(col)
return flatimg
def paletteswap_flat(flatimg, dictpalette):
"""Return a flattened image according to the dictionary of old and new palette as made by palette_analysis."""
return remap(flatimg, cleared_remappers(crude_remappers(flatimg, dictpalette)))
def paletteswap(img, dictpalette):
"""Return an array representing an image with palette-swapped colors. Takes the original image in an array
representation and a dictionary like one generated from palette_analysis, just with changed colors."""
return np.reshape(paletteswap_flat(flat_img(img), dictpalette), img_dimensions(img))
def fullswap(pilimg, dictpalette):
"""Return a PIL Image object from taken PIL Image object and the
dictionary of a palette (palette like the one generated from pil_analysis]."""
return Image.fromarray(paletteswap(np.array(pilimg), dictpalette))
| [
"numpy.all",
"numpy.reshape",
"numpy.unique",
"numpy.array",
"numpy.shape"
] | [((1194, 1239), 'numpy.reshape', 'np.reshape', (['img', '(dims[0] * dims[1], dims[2])'], {}), '(img, (dims[0] * dims[1], dims[2]))\n', (1204, 1239), True, 'import numpy as np\n'), ((1288, 1314), 'numpy.unique', 'np.unique', (['flatimg'], {'axis': '(0)'}), '(flatimg, axis=0)\n', (1297, 1314), True, 'import numpy as np\n'), ((1382, 1408), 'numpy.unique', 'np.unique', (['flatimg'], {'axis': '(0)'}), '(flatimg, axis=0)\n', (1391, 1408), True, 'import numpy as np\n'), ((637, 653), 'numpy.array', 'np.array', (['pilimg'], {}), '(pilimg)\n', (645, 653), True, 'import numpy as np\n'), ((840, 853), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (848, 853), True, 'import numpy as np\n'), ((2341, 2370), 'numpy.all', 'np.all', (['crudemap[col]'], {'axis': '(1)'}), '(crudemap[col], axis=1)\n', (2347, 2370), True, 'import numpy as np\n'), ((2621, 2634), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (2629, 2634), True, 'import numpy as np\n'), ((903, 916), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (911, 916), True, 'import numpy as np\n'), ((2132, 2145), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (2140, 2145), True, 'import numpy as np\n'), ((3484, 3500), 'numpy.array', 'np.array', (['pilimg'], {}), '(pilimg)\n', (3492, 3500), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import multiprocessing
if __name__ == '__main__':
multiprocessing.set_start_method('forkserver')
import re
from itertools import product, chain
import warnings
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib as mpl
import matplotlib.pyplot as plt
import nltk
from nltk.corpus import stopwords
from sklearn_pandas import DataFrameMapper, gen_features
import sklearn
from sklearn.model_selection import train_test_split, cross_validate, cross_val_predict, LeavePOut
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import f1_score, confusion_matrix, make_scorer
from sklearn.ensemble import VotingClassifier
import xgboost
from joblib import dump, load
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
np.random.seed(0)
stop = stopwords.words('english')
def init_nltk():
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
def build_pipeline():
vectorizer = TfidfVectorizer(ngram_range=(1, 4))
X = vectorizer.fit_transform(course_df['Title'])
def clean_str_col(df, col_names):
"""Removes stop words and other extra characters
df: DataFrame
col_names: list of strs
"""
df = df[col_names].copy()
df = df.applymap(lambda x: re.sub('[^a-zA-Z ]+', '', x.replace('\"', '').lower()))
return df.applymap(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
def clean_csc_courses():
course_df = pd.DataFrame(pd.read_csv('ncsu_course_info.csv', header=None).values.reshape((-1, 2)), columns = ['Title', 'Description'])
course_df[['Title', 'Description']] = clean_str_col(course_df, ['Title', 'Description'])
course_df.columns = ['Name', 'Desc']
course_df.to_csv('clean_courses.csv', index=0)
def clean_software_desc():
soft_df = pd.read_csv('soft_descs.csv')
soft_df[['Desc']] = clean_str_col(soft_df, ['Desc'])
soft_df.to_csv('clean_soft_descs.csv', index=0)
def generate_raw_train():
soft_df = pd.read_csv('clean_soft_descs.csv')
course_df = pd.read_csv('three_categories_labeled_data.csv')
soft_cols = [s+'_soft' for s in soft_df.columns]
course_cols = [c+'_course' for c in course_df.columns]
soft_list = soft_df.values.tolist()
course_list = course_df.values.tolist()
out_df = pd.DataFrame(
[list(chain(*r)) for r in list(product(soft_list, course_list))],
columns=soft_cols+course_cols
)
out_df['Label'] = (out_df['Name_soft'] == out_df['Label_course']).astype(int)
out_df.drop(columns=['Name_soft', 'Label_course'], inplace=True)
out_df.columns = ['soft_desc', 'course_name', 'course_desc', 'label']
out_df.to_csv('raw_ml_construction.csv', index=0)
def build_model():
# data_df = pd.read_csv('raw_ml_construction.csv')
data_df = pd.read_csv('three_categories_labeled_data.csv')
data_df.columns = ['course_name', 'course_desc', 'label']
# quit()
x_data_df = data_df.drop(columns='label')
y_data_df = data_df[['label']]
print(y_data_df['label'].value_counts())
# X_train, X_test, y_train, y_test = train_test_split(x_data_df,
# y_data_df, test_size=0.2)
tf_idf_args = {
'sublinear_tf': True,
'max_df': 0.3,
'norm': 'l2',
'min_df': 0.05
}
data_mapper = DataFrameMapper(
[
# ("soft_desc", sklearn.feature_extraction.text.TfidfVectorizer(**tf_idf_args, ngram_range=(1, 4), max_features=300)),
("course_name", sklearn.feature_extraction.text.TfidfVectorizer(**tf_idf_args, ngram_range=(1, 3), max_features=20)),
("course_desc", sklearn.feature_extraction.text.TfidfVectorizer(**tf_idf_args, ngram_range=(2, 5), max_features=80)),
],
df_out=True,
)
# X_train = data_mapper.fit_transform(X_train)
# selected_cols = X_train.columns
# X_train = X_train.values
# X_test = data_mapper.transform(X_test).values
X_data = data_mapper.fit_transform(x_data_df)
y_data = y_data_df
model = xgboost.XGBClassifier(
max_depth = 12,
subsample = 0.8,
scale_pos_weight = 9
)
# cv = LeavePOut(88)
f1 = make_scorer(f1_score, average='weighted')
cv_scores = cross_validate(
model,
X_data,
y_data,
scoring=f1,
cv=5,
return_train_score=False,
# return_estimator=True,
# fit_params=fit_params
)
test_cv_scores = cv_scores["test_score"]
test_cv_summary = (cv_scores["test_score"].mean(), cv_scores["test_score"].std())
print('Cross val scores: ', ['{:.3f}'.format(x) for x in test_cv_scores])
print('Mean and Std: ', ['{:.3f}'.format(x) for x in test_cv_summary])
model.fit(X_data, y_data)
dump(model, 'model.joblib')
y_pred = cross_val_predict(model, X_data, y_data, cv=3)
print('Confusion Matrix')
cm = confusion_matrix(y_data, y_pred)
print(cm)
fig, ax = plt.subplots(figsize=(3, 10))
xgboost.plot_importance(model, max_num_features=10, ax=ax)
plt.gcf().subplots_adjust(left=0.30)
plt.title("XGBOOST Feature Importance")
# sn.heatmap(cm)
plt.show()
if __name__ == '__main__':
# clean_csc_courses()
# clean_software_desc()
# generate_raw_train()
build_model()
| [
"itertools.chain",
"nltk.download",
"pandas.read_csv",
"multiprocessing.set_start_method",
"nltk.corpus.stopwords.words",
"xgboost.plot_importance",
"itertools.product",
"numpy.random.seed",
"joblib.dump",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.gcf",
"sklearn.model_selection.cr... | [((782, 854), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'DataConversionWarning'}), "(action='ignore', category=DataConversionWarning)\n", (805, 854), False, 'import warnings\n'), ((856, 873), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (870, 873), True, 'import numpy as np\n'), ((881, 907), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (896, 907), False, 'from nltk.corpus import stopwords\n'), ((76, 122), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""forkserver"""'], {}), "('forkserver')\n", (108, 122), False, 'import multiprocessing\n'), ((930, 952), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (943, 952), False, 'import nltk\n'), ((957, 983), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (970, 983), False, 'import nltk\n'), ((988, 1031), 'nltk.download', 'nltk.download', (['"""averaged_perceptron_tagger"""'], {}), "('averaged_perceptron_tagger')\n", (1001, 1031), False, 'import nltk\n'), ((1919, 1948), 'pandas.read_csv', 'pd.read_csv', (['"""soft_descs.csv"""'], {}), "('soft_descs.csv')\n", (1930, 1948), True, 'import pandas as pd\n'), ((2099, 2134), 'pandas.read_csv', 'pd.read_csv', (['"""clean_soft_descs.csv"""'], {}), "('clean_soft_descs.csv')\n", (2110, 2134), True, 'import pandas as pd\n'), ((2151, 2199), 'pandas.read_csv', 'pd.read_csv', (['"""three_categories_labeled_data.csv"""'], {}), "('three_categories_labeled_data.csv')\n", (2162, 2199), True, 'import pandas as pd\n'), ((2914, 2962), 'pandas.read_csv', 'pd.read_csv', (['"""three_categories_labeled_data.csv"""'], {}), "('three_categories_labeled_data.csv')\n", (2925, 2962), True, 'import pandas as pd\n'), ((4132, 4202), 'xgboost.XGBClassifier', 'xgboost.XGBClassifier', ([], {'max_depth': '(12)', 'subsample': '(0.8)', 'scale_pos_weight': '(9)'}), '(max_depth=12, subsample=0.8, scale_pos_weight=9)\n', (4153, 4202), False, 'import xgboost\n'), ((4275, 4316), 'sklearn.metrics.make_scorer', 'make_scorer', (['f1_score'], {'average': '"""weighted"""'}), "(f1_score, average='weighted')\n", (4286, 4316), False, 'from sklearn.metrics import f1_score, confusion_matrix, make_scorer\n'), ((4333, 4419), 'sklearn.model_selection.cross_validate', 'cross_validate', (['model', 'X_data', 'y_data'], {'scoring': 'f1', 'cv': '(5)', 'return_train_score': '(False)'}), '(model, X_data, y_data, scoring=f1, cv=5, return_train_score=\n False)\n', (4347, 4419), False, 'from sklearn.model_selection import train_test_split, cross_validate, cross_val_predict, LeavePOut\n'), ((4856, 4883), 'joblib.dump', 'dump', (['model', '"""model.joblib"""'], {}), "(model, 'model.joblib')\n", (4860, 4883), False, 'from joblib import dump, load\n'), ((4897, 4943), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['model', 'X_data', 'y_data'], {'cv': '(3)'}), '(model, X_data, y_data, cv=3)\n', (4914, 4943), False, 'from sklearn.model_selection import train_test_split, cross_validate, cross_val_predict, LeavePOut\n'), ((4983, 5015), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_data', 'y_pred'], {}), '(y_data, y_pred)\n', (4999, 5015), False, 'from sklearn.metrics import f1_score, confusion_matrix, make_scorer\n'), ((5044, 5073), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3, 10)'}), '(figsize=(3, 10))\n', (5056, 5073), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5136), 'xgboost.plot_importance', 'xgboost.plot_importance', (['model'], {'max_num_features': '(10)', 'ax': 'ax'}), '(model, max_num_features=10, ax=ax)\n', (5101, 5136), False, 'import xgboost\n'), ((5182, 5221), 'matplotlib.pyplot.title', 'plt.title', (['"""XGBOOST Feature Importance"""'], {}), "('XGBOOST Feature Importance')\n", (5191, 5221), True, 'import matplotlib.pyplot as plt\n'), ((5247, 5257), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5255, 5257), True, 'import matplotlib.pyplot as plt\n'), ((5141, 5150), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5148, 5150), True, 'import matplotlib.pyplot as plt\n'), ((2440, 2449), 'itertools.chain', 'chain', (['*r'], {}), '(*r)\n', (2445, 2449), False, 'from itertools import product, chain\n'), ((3602, 3706), 'sklearn.feature_extraction.text.TfidfVectorizer', 'sklearn.feature_extraction.text.TfidfVectorizer', ([], {'ngram_range': '(1, 3)', 'max_features': '(20)'}), '(**tf_idf_args, ngram_range=\n (1, 3), max_features=20)\n', (3649, 3706), False, 'import sklearn\n'), ((3732, 3836), 'sklearn.feature_extraction.text.TfidfVectorizer', 'sklearn.feature_extraction.text.TfidfVectorizer', ([], {'ngram_range': '(2, 5)', 'max_features': '(80)'}), '(**tf_idf_args, ngram_range=\n (2, 5), max_features=80)\n', (3779, 3836), False, 'import sklearn\n'), ((1581, 1629), 'pandas.read_csv', 'pd.read_csv', (['"""ncsu_course_info.csv"""'], {'header': 'None'}), "('ncsu_course_info.csv', header=None)\n", (1592, 1629), True, 'import pandas as pd\n'), ((2465, 2496), 'itertools.product', 'product', (['soft_list', 'course_list'], {}), '(soft_list, course_list)\n', (2472, 2496), False, 'from itertools import product, chain\n')] |
# -*- coding: utf-8 -*-
"""shapeDetection_usingCoordinates.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1vSZNBh1UQxlfz13aA1JPf0_SrPlA0L8J
#Import
"""
import math
import random
import numpy as np
from numpy import asarray
import cv2
from PIL import Image, ImageDraw
import sys
from skimage.feature import hog
from sklearn import svm
from sklearn.metrics import classification_report,accuracy_score
import pickle
import sys
import os
import pathlib
"""#Obtain the path using OS argv"""
folder_path = str(pathlib.Path().absolute()) + "\\models"
#folder_path = "C:\\Users\\khale\\source\\repos\\Kinect-Drawing\\Python_part\\GP_2"
#Obtain the path of the text file using OS argv
data_path = sys.argv[1]
"""#Preprocessing and obtain the data coord of many shapes from the txt file
"""
x_list = []
y_list = []
file_1 = open(data_path)
Data= file_1.readlines()
Coordinates=list()
count=0
Shapes=dict()
data_list_coordinates = []
for i in range(len(Data)):
if "End" in Data[i]:
Shapes[i]=Data[i-1]
for i in range(len(Data)):
if "End" in Data[i]:
Coordinates.append(Data[i-1])
for j in range(len(Coordinates)):
data_list = Coordinates[j].split(" ")
data_list_coordinates.append("Shape")
for item in data_list:
(x, y) = item.split(",")
(x, y) = math.ceil(float(x)), math.ceil(float(y))
data_list_coordinates.append((x, y))
x_list.append(x)
y_list.append(y)
len(Coordinates)
"""# Construct binary image using coords"""
my_screen_width = 1920
my_screen_height = 1080
length=len(data_list_coordinates)
K="Shape"
res1=[]
res2=[]
number_of_shapes=[data_list_coordinates.index(i,1) for i in data_list_coordinates[1:] if i == K]
number_of_shapes.insert(0,1)
res1=[i for i,x in enumerate(data_list_coordinates) if x==K]
res2=[i for i in range(len(data_list_coordinates)) if data_list_coordinates[i]==K]
#number_of_shapes.insert(len(number_of_shapes),length)
# let's create a 6 x 6 matrix with all pixels in black color
img = np.ones((my_screen_width, my_screen_height), np.uint8) * 255
data_list_coordinates1=[i for i in data_list_coordinates if i != K]
for data in data_list_coordinates1:
img[data[0], data[1]] = 0
cv2.imwrite("t1.png", img)
res1
"""# Mirroring"""
# load the image, create the mirrored image, and the result placeholder
img = Image.open("t1.png")
mirror = img.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_90)
mirror.save("t1.png")
"""# Connect points using a thick line"""
# from google.colab.patches import cv2_imshow
img = cv2.imread("t1.png")
for z in range(len(res1)):
(pre_x, pre_y) = data_list_coordinates[res1[z]+1]
if z != (len(res1)-1):
for (x, y) in data_list_coordinates[res1[z]+1:res1[z+1]]:
img = cv2.line(img, (pre_x, pre_y), (x, y), (0, 0, 0), 4)
(pre_x, pre_y) = (x, y)
else:
for (x, y) in data_list_coordinates[res1[z]+1:]:
img = cv2.line(img, (pre_x, pre_y), (x, y), (0, 0, 0), 4)
(pre_x, pre_y) = (x, y)
# save our image as a "png" image
# cv2_imshow(img)
cv2.imwrite("t2.png", img)
# Cropping
x_min = min(x_list)
x_max = max(x_list)
y_min = min(y_list)
y_max = max(y_list)
w = x_max - x_min
h = y_max - y_min
img_orig = cv2.imread('t2.png', 0)
img_crop = img_orig[y_min:y_min + h, x_min:x_min + w]
cv2.imwrite('t2_cropped.png', img_crop)
# -------------------------------------------------------
#print("666")
# Padding
# read image
img = cv2.imread('t2_cropped.png')
ht, wd, cc = img.shape
ww = hh = (math.ceil(max(wd, ht) / 28) + 1) * 28
# create new image of desired size and color (blue) for padding
color = (255, 255, 255)
result = np.full((hh, ww, cc), color, dtype=np.uint8)
# compute center offset
xx = (ww - wd) // 2
yy = (hh - ht) // 2
# copy img image into center of result image
result[yy:yy + ht, xx:xx + wd] = img
# view result
# cv2_imshow(result)
# save result
cv2.imwrite("padded_cropped_img.png", result)
# -------------------------------------------------------
#print("777")
# resizing PIL ->perfect
# image = Image.open('padded_cropped_img.png')
# new_image = image.resize((28, 28))
# new_image.save('padded_cropped_shrinked_img.png')
# final_img_path = 'padded_cropped_shrinked_img.png'
# # -------------------------------------------------------
final_img_path = "padded_cropped_img.png"
# # Preprocessing
# final_img_path = 'padded_cropped_shrinked_img.png'
# from keras.preprocessing import image
# img = image.load_img(final_img_path, color_mode="grayscale")
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
# x = x.reshape(1, 784)
# x = x.astype('float32')
# # normalizing the data to help with the training
# x /= 255
# -------------------------------------------------------
# Prediction
def preprocess(img_path): #input an image
# load the image
image = Image.open(img_path)
# convert image to numpy array
img_data = asarray(image)
our_test = []
our_test.append(img_data)
our_test_resized = [cv2.resize(our_test[0], (320, 320))]
ppc = 16
hog_images = []
hog_features = []
for image in our_test_resized:
fd,hog_image = hog(image, orientations=8, pixels_per_cell=(ppc,ppc),cells_per_block=(4, 4),block_norm= 'L2',visualize=True)
hog_images.append(hog_image)
hog_features.append(fd)
hog_features = np.array(hog_features)
return hog_features
#Test1
#folder_path = "C:\\Users\\khale\\Source\\Repos\\Kinect-Drawing\\Python_part\\GP_2"
filename = os.path.join(folder_path, 'Complex_v4_finalized_model.pkl')
loaded_model = pickle.load(open(filename, 'rb'))
#print("888")
predicted_class = loaded_model.predict(preprocess(final_img_path))
prob_of_classes = loaded_model.predict_proba(preprocess(final_img_path))
#print("999")
predicted_class = str(predicted_class)
prob_of_classes = prob_of_classes[0]
prob_of_classes_rounded = []
for class_i in prob_of_classes:
prob_of_classes_rounded.append(int(round(class_i,2) * 100))
#mapping part
if predicted_class[1] == '0' and prob_of_classes_rounded[0] >= 0:
predicted_class = "Candle"
elif predicted_class[1] == '1' and prob_of_classes_rounded[1] >= 0:
predicted_class = "Beer-Mug"
elif predicted_class[1] == '2' and prob_of_classes_rounded[2] >= 0:
predicted_class = "TV"
elif predicted_class[1] == '3' and prob_of_classes_rounded[3] >= 0:
predicted_class = "axe"
elif predicted_class[1] == '4' and prob_of_classes_rounded[4] >= 0:
predicted_class = "House"
elif predicted_class[1] == '5' and prob_of_classes_rounded[5] >= 0:
predicted_class = "bowl"
else:
predicted_class = "I Don't know!"
print(predicted_class + " " + str(prob_of_classes_rounded))
#print(predicted_class + " " + str(max(prob_of_classes_rounded))) | [
"cv2.imwrite",
"PIL.Image.open",
"numpy.ones",
"pathlib.Path",
"cv2.line",
"os.path.join",
"numpy.asarray",
"numpy.array",
"skimage.feature.hog",
"numpy.full",
"cv2.resize",
"cv2.imread"
] | [((2237, 2263), 'cv2.imwrite', 'cv2.imwrite', (['"""t1.png"""', 'img'], {}), "('t1.png', img)\n", (2248, 2263), False, 'import cv2\n'), ((2368, 2388), 'PIL.Image.open', 'Image.open', (['"""t1.png"""'], {}), "('t1.png')\n", (2378, 2388), False, 'from PIL import Image, ImageDraw\n'), ((2580, 2600), 'cv2.imread', 'cv2.imread', (['"""t1.png"""'], {}), "('t1.png')\n", (2590, 2600), False, 'import cv2\n'), ((3068, 3094), 'cv2.imwrite', 'cv2.imwrite', (['"""t2.png"""', 'img'], {}), "('t2.png', img)\n", (3079, 3094), False, 'import cv2\n'), ((3237, 3260), 'cv2.imread', 'cv2.imread', (['"""t2.png"""', '(0)'], {}), "('t2.png', 0)\n", (3247, 3260), False, 'import cv2\n'), ((3316, 3355), 'cv2.imwrite', 'cv2.imwrite', (['"""t2_cropped.png"""', 'img_crop'], {}), "('t2_cropped.png', img_crop)\n", (3327, 3355), False, 'import cv2\n'), ((3458, 3486), 'cv2.imread', 'cv2.imread', (['"""t2_cropped.png"""'], {}), "('t2_cropped.png')\n", (3468, 3486), False, 'import cv2\n'), ((3657, 3701), 'numpy.full', 'np.full', (['(hh, ww, cc)', 'color'], {'dtype': 'np.uint8'}), '((hh, ww, cc), color, dtype=np.uint8)\n', (3664, 3701), True, 'import numpy as np\n'), ((3900, 3945), 'cv2.imwrite', 'cv2.imwrite', (['"""padded_cropped_img.png"""', 'result'], {}), "('padded_cropped_img.png', result)\n", (3911, 3945), False, 'import cv2\n'), ((5502, 5561), 'os.path.join', 'os.path.join', (['folder_path', '"""Complex_v4_finalized_model.pkl"""'], {}), "(folder_path, 'Complex_v4_finalized_model.pkl')\n", (5514, 5561), False, 'import os\n'), ((2041, 2095), 'numpy.ones', 'np.ones', (['(my_screen_width, my_screen_height)', 'np.uint8'], {}), '((my_screen_width, my_screen_height), np.uint8)\n', (2048, 2095), True, 'import numpy as np\n'), ((4840, 4860), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (4850, 4860), False, 'from PIL import Image, ImageDraw\n'), ((4911, 4925), 'numpy.asarray', 'asarray', (['image'], {}), '(image)\n', (4918, 4925), False, 'from numpy import asarray\n'), ((5350, 5372), 'numpy.array', 'np.array', (['hog_features'], {}), '(hog_features)\n', (5358, 5372), True, 'import numpy as np\n'), ((5001, 5036), 'cv2.resize', 'cv2.resize', (['our_test[0]', '(320, 320)'], {}), '(our_test[0], (320, 320))\n', (5011, 5036), False, 'import cv2\n'), ((5152, 5268), 'skimage.feature.hog', 'hog', (['image'], {'orientations': '(8)', 'pixels_per_cell': '(ppc, ppc)', 'cells_per_block': '(4, 4)', 'block_norm': '"""L2"""', 'visualize': '(True)'}), "(image, orientations=8, pixels_per_cell=(ppc, ppc), cells_per_block=(4, \n 4), block_norm='L2', visualize=True)\n", (5155, 5268), False, 'from skimage.feature import hog\n'), ((2779, 2830), 'cv2.line', 'cv2.line', (['img', '(pre_x, pre_y)', '(x, y)', '(0, 0, 0)', '(4)'], {}), '(img, (pre_x, pre_y), (x, y), (0, 0, 0), 4)\n', (2787, 2830), False, 'import cv2\n'), ((2934, 2985), 'cv2.line', 'cv2.line', (['img', '(pre_x, pre_y)', '(x, y)', '(0, 0, 0)', '(4)'], {}), '(img, (pre_x, pre_y), (x, y), (0, 0, 0), 4)\n', (2942, 2985), False, 'import cv2\n'), ((582, 596), 'pathlib.Path', 'pathlib.Path', ([], {}), '()\n', (594, 596), False, 'import pathlib\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.