repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
more | more-main/log_utils.py | from collections import defaultdict, deque
import datetime
import time
import logging
from termcolor import colored
import sys
import os
import torch
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
https://github.com/pytorch/vision/blob/master/references/detection/utils.py
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} (global_avg: {global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value
)
class MetricLogger(object):
"""https://github.com/pytorch/vision/blob/master/references/segmentation/utils.py"""
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, logger, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == (len(iterable) - 1):
eta_seconds = iter_time.avg * (len(iterable) - 1 - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
logger.info(
log_msg.format(
i,
len(iterable) - 1,
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
logger.info(
log_msg.format(
i,
len(iterable) - 1,
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info("{} Total time: {}".format(header, total_time_str))
class _ColorfulFormatter(logging.Formatter):
"""https://github.com/facebookresearch/detectron2/blob/299c4b0dbab6fe5fb81d3870636cfd86fc334447/detectron2/utils/logger.py"""
def __init__(self, *args, **kwargs):
self._root_name = kwargs.pop("root_name") + "."
super().__init__(*args, **kwargs)
def formatMessage(self, record):
log = super().formatMessage(record)
if record.levelno == logging.WARNING:
prefix = colored("WARNING", "red", attrs=["blink"])
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
elif record.levelno == logging.DEBUG:
prefix = colored("DEBUG", "grey")
else:
return log
return prefix + " " + log
def setup_logger(output_dir=None, name="Training"):
"""https://github.com/facebookresearch/detectron2/blob/299c4b0dbab6fe5fb81d3870636cfd86fc334447/detectron2/utils/logger.py"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
plain_formatter = logging.Formatter("[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S")
color_formatter = _ColorfulFormatter(
colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
datefmt="%m/%d %H:%M:%S",
root_name=name,
)
# stdout logging
ch = logging.StreamHandler(stream=sys.stdout)
ch.setFormatter(color_formatter)
logger.addHandler(ch)
# file logging
if output_dir is not None:
filename = os.path.join(output_dir, "log.txt")
fh = logging.FileHandler(filename)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger
| 6,646 | 32.741117 | 129 | py |
more | more-main/action_utils_mask.py | import cv2
import imutils
import math
import random
from constants import (
GRIPPER_PUSH_ADD_PIXEL,
colors_lower,
colors_upper,
IMAGE_PAD_SIZE,
IMAGE_SIZE,
IMAGE_PAD_WIDTH,
PUSH_DISTANCE,
GRIPPER_PUSH_RADIUS_PIXEL,
PIXEL_SIZE,
DEPTH_MIN,
IMAGE_SIZE,
CONSECUTIVE_DISTANCE_THRESHOLD,
IMAGE_PAD_WIDTH,
PUSH_BUFFER,
IMAGE_PAD_DIFF,
GRIPPER_GRASP_WIDTH_PIXEL,
)
import numpy as np
import torch
from dataset import PushPredictionMultiDatasetEvaluation
from push_net import PushPredictionNet
from train_maskrcnn import get_model_instance_segmentation
from torchvision.transforms import functional as TF
import copy
import utils
class Predictor:
"""
Predict and generate images after push actions.
Assume the color image and depth image are well matched.
We use the masks to generate new images, so the quality of mask is important.
The input to this forward function should be returned from the sample_actions.
"""
def __init__(self, snapshot):
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
push_model = PushPredictionNet()
state = torch.load(snapshot)["model"]
push_model.load_state_dict(state)
self.push_model = push_model.to(self.device)
self.push_model.eval()
# only rotated_color_image, rotated_depth_image are padding to 320x320
@torch.no_grad()
def forward(
self,
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
rotated_mask_objs,
plot=False,
):
# get data
dataset = PushPredictionMultiDatasetEvaluation(
rotated_depth_image, rotated_action, rotated_center, rotated_binary_objs
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=len(rotated_depth_image), shuffle=False, num_workers=0
)
(
prev_poses,
action,
action_start_ori,
action_end_ori,
used_binary_img,
binary_objs_total,
num_obj,
) = next(iter(data_loader))
prev_poses = prev_poses.to(self.device, non_blocking=True)
used_binary_img = used_binary_img.to(self.device, non_blocking=True, dtype=torch.float)
binary_objs_total = binary_objs_total.to(self.device, non_blocking=True)
action = action.to(self.device, non_blocking=True)
# get output
output = self.push_model(prev_poses, action, used_binary_img, binary_objs_total, num_obj[0])
output = output.cpu().numpy()
# generate new images
prev_poses_input = prev_poses.cpu().numpy().astype(int)
prev_poses = copy.deepcopy(prev_poses_input)
action_start_ori = action_start_ori.numpy().astype(int)
action_end_ori = action_end_ori.numpy().astype(int)
action_start_ori_tile = np.tile(action_start_ori, num_obj[0])
action_start = action[:, :2].cpu().numpy().astype(int)
action_start_tile = np.tile(action_start, num_obj[0])
generated_color_images = []
generated_depth_images = []
validations = []
for i in range(len(rotated_depth_image)):
i_output = output[i]
i_prev_poses = prev_poses[i]
i_action_start_ori_tile = action_start_ori_tile[i]
i_action_start_tile = action_start_tile[i]
i_prev_poses += i_action_start_ori_tile
i_prev_poses -= i_action_start_tile
i_rotated_angle = rotated_angle[i]
i_rotated_mask_objs = rotated_mask_objs[i]
color_image = rotated_color_image[i]
depth_image = rotated_depth_image[i]
# transform points and fill them into a black image
generated_color_image = np.zeros_like(color_image)
generated_depth_image = np.zeros_like(depth_image)
post_points_pad = []
post_new_points_pad = []
# for each object
valid = True
for pi in range(num_obj[i]):
# if the object is out of the boundary, then, we can skip this action
center = [
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
]
center = np.array([[center]])
M = cv2.getRotationMatrix2D(
(
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
),
-i_output[pi * 3 + 2],
1,
)
ori_M = M.copy()
M[0, 2] += i_output[pi * 3]
M[1, 2] += i_output[pi * 3 + 1]
new_center = cv2.transform(center, M)
new_center = np.transpose(new_center[0])
ori_center = cv2.transform(center, ori_M)
ori_center = np.transpose(ori_center[0])
M = cv2.getRotationMatrix2D(
(IMAGE_PAD_SIZE // 2, IMAGE_PAD_SIZE // 2), i_rotated_angle, 1,
)
new_center = [new_center[0][0], new_center[1][0]]
new_center = np.array([[new_center]])
new_center = cv2.transform(new_center, M)[0][0]
ori_center = [ori_center[0][0], ori_center[1][0]]
ori_center = np.array([[ori_center]])
ori_center = cv2.transform(ori_center, M)[0][0]
if (
new_center[1] - IMAGE_PAD_WIDTH > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or new_center[1] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
or new_center[0] - IMAGE_PAD_WIDTH > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or new_center[0] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
):
if not (
ori_center[1] - IMAGE_PAD_WIDTH > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or ori_center[1] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
or ori_center[0] - IMAGE_PAD_WIDTH > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or ori_center[0] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
):
valid = False
break
if valid:
for pi in range(num_obj[i]):
# # if the object is out of the boundary, then, we can skip this action
# if (
# i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1]
# > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1] < PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2] + i_output[pi * 3]
# > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2] + i_output[pi * 3] < PUSH_BUFFER / PIXEL_SIZE
# ):
# valid = False
# break
# find out transformation
mask = i_rotated_mask_objs[pi]
points = np.argwhere(mask == 255)
points = np.expand_dims(points, axis=0)
M = cv2.getRotationMatrix2D(
(
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
),
-i_output[pi * 3 + 2],
1,
)
M[0, 2] += i_output[pi * 3]
M[1, 2] += i_output[pi * 3 + 1]
new_points = cv2.transform(points, M)
post_points_pad.append(list(np.transpose(points[0])))
post_new_points_pad.append(list(np.transpose(new_points[0])))
validations.append(valid)
if valid:
for pi in range(num_obj[i]):
post_new_points_pad[pi] = (
np.clip(post_new_points_pad[pi][0], 0, IMAGE_PAD_SIZE - 1),
np.clip(post_new_points_pad[pi][1], 0, IMAGE_PAD_SIZE - 1),
)
post_points_pad[pi] = (
np.clip(post_points_pad[pi][0], 0, IMAGE_PAD_SIZE - 1),
np.clip(post_points_pad[pi][1], 0, IMAGE_PAD_SIZE - 1),
)
generated_color_image[post_new_points_pad[pi]] = color_image[
post_points_pad[pi]
]
generated_depth_image[post_new_points_pad[pi]] = depth_image[
post_points_pad[pi]
]
if plot:
cv2.circle(
generated_color_image,
(i_prev_poses[pi * 2 + 1] + 48, i_prev_poses[pi * 2] + 48),
3,
(255, 255, 255),
-1,
)
if plot:
cv2.arrowedLine(
generated_color_image,
(action_start_ori[i][1] + 48, action_start_ori[i][0] + 48),
(action_end_ori[i][1] + 48, action_end_ori[i][0] + 48),
(255, 0, 255),
2,
tipLength=0.4,
)
generated_color_image = utils.rotate(generated_color_image, angle=-i_rotated_angle)
generated_depth_image = utils.rotate(generated_depth_image, angle=-i_rotated_angle)
generated_color_image = generated_color_image[
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, :
]
generated_depth_image = generated_depth_image[
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF
]
generated_color_image = cv2.medianBlur(generated_color_image, 5)
generated_depth_image = generated_depth_image.astype(np.float32)
generated_depth_image = cv2.medianBlur(generated_depth_image, 5)
generated_color_images.append(generated_color_image)
generated_depth_images.append(generated_depth_image)
return generated_color_images, generated_depth_images, validations
def get_sign_line(pose0, pose1, pose2):
"""
Line is from pose1 to pose2.
if value > 0, pose0 is on the left side of the line.
if value = 0, pose0 is on the same line.
if value < 0, pose0 is on the right side of the line.
"""
return (pose2[0] - pose1[0]) * (pose0[1] - pose1[1]) - (pose0[0] - pose1[0]) * (
pose2[1] - pose1[1]
)
def distance_to_line(pose0, pose1, pose2):
"""
Line is from pose1 to pose2.
"""
return abs(
(pose2[0] - pose1[0]) * (pose1[1] - pose0[1])
- (pose1[0] - pose0[0]) * (pose2[1] - pose1[1])
) / math.sqrt((pose2[0] - pose1[0]) ** 2 + (pose2[1] - pose1[1]) ** 2)
def adjust_push_start_point(
pose0, pose1, contour, distance=GRIPPER_PUSH_RADIUS_PIXEL, add_distance=GRIPPER_PUSH_ADD_PIXEL,
):
"""
Give two points, find the most left and right point on the contour within a given range based on pose1->pose0.
So the push will not collide with the contour
pose0: the center of contour
pose1: the point on the contour
"""
r = math.sqrt((pose1[0] - pose0[0]) ** 2 + (pose1[1] - pose0[1]) ** 2)
dx = round(distance / r * (pose0[1] - pose1[1]))
dy = round(distance / r * (pose1[0] - pose0[0]))
pose2 = (pose0[0] + dx, pose0[1] + dy)
pose3 = (pose1[0] + dx, pose1[1] + dy)
pose4 = (pose0[0] - dx, pose0[1] - dy)
pose5 = (pose1[0] - dx, pose1[1] - dy)
pose1_sign23 = get_sign_line(pose1, pose2, pose3)
pose1_sign45 = get_sign_line(pose1, pose4, pose5)
assert pose1_sign23 * pose1_sign45 < 0
center_distance = distance_to_line(pose1, pose2, pose4)
max_distance = 0
for p in range(0, len(contour)):
test_pose = contour[p][0]
test_pose_sign23 = get_sign_line(test_pose, pose2, pose3)
test_pose_sign45 = get_sign_line(test_pose, pose4, pose5)
# in the range, between two lines
if pose1_sign23 * test_pose_sign23 >= 0 and pose1_sign45 * test_pose_sign45 >= 0:
# is far enough
test_center_distance = distance_to_line(test_pose, pose2, pose4)
if test_center_distance >= center_distance:
# in the correct side
test_edge_distance = distance_to_line(test_pose, pose3, pose5)
if test_edge_distance < test_center_distance:
if test_center_distance > max_distance:
max_distance = test_center_distance
diff_distance = abs(max_distance - center_distance)
return math.ceil(diff_distance) + add_distance
def get_orientation(pts):
sz = len(pts)
data_pts = np.empty((sz, 2), dtype=np.float64)
for i in range(data_pts.shape[0]):
data_pts[i, 0] = pts[i, 0, 0]
data_pts[i, 1] = pts[i, 0, 1]
# Perform PCA analysis
mean = np.empty((0))
mean, eigenvectors, eigenvalues = cv2.PCACompute2(data_pts, mean)
angle = math.atan2(eigenvectors[0, 1], eigenvectors[0, 0]) # orientation in radians
return angle
def is_close(prev_pose, this_pose):
dis = math.sqrt((this_pose[0] - prev_pose[0]) ** 2 + (this_pose[1] - prev_pose[1]) ** 2)
if dis < CONSECUTIVE_DISTANCE_THRESHOLD / PIXEL_SIZE:
return True
return False
def close_distance(prev_pose, this_pose):
dis = math.sqrt((this_pose[0] - prev_pose[0]) ** 2 + (this_pose[1] - prev_pose[1]) ** 2)
return dis
def sample_actions(
color_image,
depth_image,
mask_objs,
plot=False,
start_pose=None,
from_color=False,
prev_move=None,
):
"""
Sample actions around the objects, from the boundary to the center.
Assume there is no object in "black"
Output the rotated image, such that the push action is from left to right
"""
gray = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
gray = gray.astype(np.uint8)
if plot:
plot_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
blurred = cv2.medianBlur(gray, 5)
thresh = cv2.threshold(blurred, 20, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
# if the mask is in color format
if from_color:
ori_mask_objs = mask_objs[0]
ori_mask_obj_centers = mask_objs[1]
new_mask_objs = []
for idx, mask in enumerate(mask_objs[0]):
center = ori_mask_obj_centers[idx]
new_mask = np.copy(mask[0])
new_mask = new_mask.astype(np.uint8)
new_mask = cv2.cvtColor(new_mask, cv2.COLOR_RGB2GRAY)
new_mask = cv2.threshold(new_mask, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
new_mask_pad = np.zeros((IMAGE_PAD_SIZE, IMAGE_PAD_SIZE), dtype=np.uint8)
if (
center[0] - 30 < 0
or center[0] + 30 >= IMAGE_PAD_SIZE
or center[1] - 30 < 0
or center[1] + 30 >= IMAGE_PAD_SIZE
):
return [], [], [], [], [], [], [], []
new_mask_pad[
center[0] - 30 : center[0] + 30, center[1] - 30 : center[1] + 30
] = new_mask
new_mask = new_mask_pad[
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
]
new_mask_objs.append(new_mask)
mask_objs = new_mask_objs
# find the contour of a single object
points_on_contour = []
points = []
four_idx = []
other_idx = []
priority_points_on_contour = []
priority_points = []
center = []
binary_objs = []
for oi in range(len(mask_objs)):
obj_cnt = cv2.findContours(mask_objs[oi], cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
obj_cnt = imutils.grab_contours(obj_cnt)
if len(obj_cnt) == 0:
return [], [], [], [], [], [], [], []
obj_cnt = sorted(obj_cnt, key=lambda x: cv2.contourArea(x))[
-1
] # the mask r cnn could give bad masks
if cv2.contourArea(obj_cnt) < 10:
return [], [], [], [], [], [], [], []
# get center
M = cv2.moments(obj_cnt)
cX = round(M["m10"] / M["m00"])
cY = round(M["m01"] / M["m00"])
center.append([cX, cY])
# get crop of each object
temp = np.zeros((IMAGE_PAD_SIZE, IMAGE_PAD_SIZE), dtype=np.uint8)
temp[
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
] = mask_objs[oi]
crop = temp[
cY + IMAGE_PAD_WIDTH - 30 : cY + IMAGE_PAD_WIDTH + 30,
cX + IMAGE_PAD_WIDTH - 30 : cX + IMAGE_PAD_WIDTH + 30,
]
assert crop.shape[0] == 60 and crop.shape[1] == 60, crop.shape
binary_objs.append(crop)
if plot:
cv2.circle(plot_image, (cX, cY), 3, (255, 255, 255), -1)
# get pca angle
angle = get_orientation(obj_cnt)
# get contour points
skip_num = len(obj_cnt) // 12 # 12 possible pushes for an object
skip_count = 0
diff_angle_limit_four = 0.3
target_diff_angles = np.array([0, np.pi, np.pi / 2, 3 * np.pi / 2])
# add the consecutive move
if prev_move:
prev_angle = math.atan2(
prev_move[1][1] - prev_move[0][1], prev_move[1][0] - prev_move[0][0]
)
pose = (cX - math.cos(prev_angle) * 2, cY - math.sin(prev_angle) * 2)
x = pose[0]
y = pose[1]
diff_x = cX - x
diff_y = cY - y
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt, add_distance=0)
test_point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
if is_close(prev_move[1], test_point):
if len(priority_points) > 0:
prev_dis = close_distance(prev_move[1], priority_points[0])
this_dis = close_distance(prev_move[1], test_point)
if this_dis < prev_dis:
priority_points_on_contour[0] = point_on_contour
priority_points[0] = point
else:
priority_points_on_contour.append(point_on_contour)
priority_points.append(point)
# add four directions to center of object
four_poses = [
(cX + math.cos(angle) * 2, cY + math.sin(angle) * 2),
(cX + math.cos(angle + np.pi / 2) * 2, cY + math.sin(angle + np.pi / 2) * 2),
(cX + math.cos(angle - np.pi / 2) * 2, cY + math.sin(angle - np.pi / 2) * 2),
(cX - math.cos(angle) * 2, cY - math.sin(angle) * 2),
]
for pose in four_poses:
x = pose[0]
y = pose[1]
diff_x = cX - x
diff_y = cY - y
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
points_on_contour.append(point_on_contour)
points.append(point)
four_idx.append(len(points) - 1)
for pi, p in enumerate(obj_cnt):
x = p[0][0]
y = p[0][1]
if x == cX or y == cY:
continue
diff_x = cX - x
diff_y = cY - y
test_angle = math.atan2(diff_y, diff_x)
should_append = False
# avoid four directions to center of object
if np.min(np.abs(abs(angle - test_angle) - target_diff_angles)) < diff_angle_limit_four:
should_append = False
skip_count = 0
elif skip_count == skip_num:
should_append = True
if should_append:
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
points_on_contour.append(point_on_contour)
points.append(point)
other_idx.append(len(points) - 1)
skip_count = 0
else:
skip_count += 1
# random actions, adding priority points at the end
# temp = list(zip(points_on_contour, points))
# random.shuffle(temp)
# points_on_contour, points = zip(*temp)
# points_on_contour = list(points_on_contour)
# points = list(points)
# points.extend(priority_points)
# points_on_contour.extend(priority_points_on_contour)
random.shuffle(four_idx)
random.shuffle(other_idx)
new_points = []
new_points_on_contour = []
for idx in other_idx:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
for idx in four_idx:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
new_points.extend(priority_points)
new_points_on_contour.extend(priority_points_on_contour)
points = new_points
points_on_contour = new_points_on_contour
priority_qualified = False
if plot:
# loop over the contours
for c in cnts:
cv2.drawContours(plot_image, [c], -1, (133, 137, 140), 2)
valid_points = []
for pi in range(len(points)):
# out of boundary
if (
points[pi][0] < 5
or points[pi][0] > IMAGE_SIZE - 5
or points[pi][1] < 5
or points[pi][1] > IMAGE_SIZE - 5
):
qualify = False
elif pi >= len(points) - len(priority_points):
temp = list(points[pi])
temp[0] = max(temp[0], 5)
temp[0] = min(temp[0], IMAGE_SIZE - 5)
temp[1] = max(temp[1], 5)
temp[1] = min(temp[1], IMAGE_SIZE - 5)
points[pi] = temp
qualify = True
priority_qualified = True
# clearance
elif (
np.sum(
thresh[
points[pi][1]
- GRIPPER_GRASP_WIDTH_PIXEL // 2 : points[pi][1]
+ GRIPPER_GRASP_WIDTH_PIXEL // 2
+ 1,
points[pi][0]
- GRIPPER_GRASP_WIDTH_PIXEL // 2 : points[pi][0]
+ GRIPPER_GRASP_WIDTH_PIXEL // 2
+ 1,
]
> 0
)
== 0
):
qualify = True
else:
qualify = False
if qualify:
if plot:
diff_x = points_on_contour[pi][0] - points[pi][0]
diff_y = points_on_contour[pi][1] - points[pi][1]
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_to = (
int(points[pi][0] + diff_x * PUSH_DISTANCE / PIXEL_SIZE / 2),
int(points[pi][1] + diff_y * PUSH_DISTANCE / PIXEL_SIZE / 2),
)
if pi < len(other_idx):
cv2.arrowedLine(
plot_image, points[pi], point_to, (0, 0, 255), 2, tipLength=0.2,
)
elif pi >= len(points) - len(priority_points):
cv2.arrowedLine(
plot_image, tuple(points[pi]), point_to, (0, 255, 0), 2, tipLength=0.2,
)
else:
cv2.arrowedLine(
plot_image, points[pi], point_to, (255, 0, 0), 2, tipLength=0.2,
)
valid_points.append([points[pi], points_on_contour[pi]])
if start_pose is not None:
spose = (start_pose[1], start_pose[0])
epose = (start_pose[3], start_pose[2])
valid_points = [[spose, epose]]
print(valid_points)
if plot:
cv2.imwrite("test.png", plot_image)
# rotate image
rotated_color_image = []
rotated_depth_image = []
rotated_mask_objs = []
rotated_angle = []
rotated_center = []
rotated_action = []
rotated_binary_objs_image = []
before_rotated_action = []
count = 0
for aidx, action in enumerate(valid_points):
# padding from 224 to 320
# color image
color_image_pad = np.zeros((IMAGE_PAD_SIZE, IMAGE_PAD_SIZE, 3), np.uint8)
color_image_pad[
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
] = color_image
# depth image
depth_image_pad = np.zeros((IMAGE_PAD_SIZE, IMAGE_PAD_SIZE), np.float32)
depth_image_pad[
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
] = depth_image
# compute rotation angle
down = (0, 1)
current = (action[1][0] - action[0][0], action[1][1] - action[0][1])
dot = (
down[0] * current[0] + down[1] * current[1]
) # dot product between [x1, y1] and [x2, y2]
det = down[0] * current[1] - down[1] * current[0] # determinant
angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
angle = math.degrees(angle)
# rotate images
rotated_color = utils.rotate(color_image_pad, angle)
rotated_depth = utils.rotate(depth_image_pad, angle)
# rotate cropped object
if len(binary_objs) == 1:
# binary_objs_image = np.expand_dims(binary_objs[0], axis=-1)
binary_objs_image = binary_objs[0]
rotated_binary_objs = utils.rotate(binary_objs_image, angle, True)
rotated_binary_objs = np.expand_dims(rotated_binary_objs, axis=-1)
else:
binary_objs_image = np.stack(binary_objs, axis=-1)
rotated_binary_objs = utils.rotate(binary_objs_image, angle, True)
M = cv2.getRotationMatrix2D((IMAGE_SIZE / 2, IMAGE_SIZE / 2), angle, 1) # rotate by center
# rotate points
points = np.array(center)
points = np.concatenate((points, [action[0]]), axis=0)
points = np.expand_dims(points, axis=0)
points = cv2.transform(points, M)[0]
points_center = points[: len(center)]
# clearance check
clearance = cv2.cvtColor(rotated_color, cv2.COLOR_RGB2GRAY)
clearance = cv2.medianBlur(clearance, 5)
clearance = cv2.threshold(clearance, 20, 255, cv2.THRESH_BINARY)[1]
area = np.sum(
clearance[
max(
0, points[-1][1] + IMAGE_PAD_WIDTH - round(GRIPPER_GRASP_WIDTH_PIXEL / 2)
) : min(
IMAGE_PAD_SIZE,
points[-1][1] + IMAGE_PAD_WIDTH + round(GRIPPER_GRASP_WIDTH_PIXEL / 2) + 1,
),
max(0, points[-1][0] + IMAGE_PAD_WIDTH - GRIPPER_PUSH_RADIUS_PIXEL) : min(
IMAGE_PAD_SIZE, points[-1][0] + IMAGE_PAD_WIDTH + GRIPPER_PUSH_RADIUS_PIXEL + 1
),
]
> 0
)
if area > 0:
if not (priority_qualified and aidx == len(valid_points) - 1):
continue
rotated_color_image.append(rotated_color)
rotated_depth_image.append(rotated_depth)
rotated_angle.append(angle)
rotated_center.append(np.flip(points_center, 1))
rotated_action.append(np.flip(points[-1]))
rotated_binary_objs_image.append(rotated_binary_objs)
rotated_mask_obj = []
rotated_mask_centers = []
if from_color:
for idx, mask in enumerate(ori_mask_objs):
mask_color = mask[0]
mask_depth = mask[1]
rotated_mask_color = utils.rotate(mask_color, angle)
rotated_mask_depth = utils.rotate(mask_depth, angle)
rotated_mask = (rotated_mask_color, rotated_mask_depth)
rotated_mask_obj.append(rotated_mask)
rotated_mask_centers.append(
[
points_center[idx][1] + IMAGE_PAD_WIDTH,
points_center[idx][0] + IMAGE_PAD_WIDTH,
]
)
rotated_mask_objs.append((rotated_mask_obj, rotated_mask_centers))
else:
for mask in mask_objs:
mask = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
rotated_mask = utils.rotate(mask, angle, True)
rotated_mask_obj.append(rotated_mask)
rotated_mask_objs.append(rotated_mask_obj)
before_rotated_action.append(np.flip(action[0]))
# if plot:
# rotated_image = rotated_color.copy()
# rotated_image_gray = cv2.cvtColor(rotated_image, cv2.COLOR_RGB2GRAY)
# rotated_image_gray = rotated_image_gray.astype(np.uint8)
# rotated_image_gray = cv2.medianBlur(rotated_image_gray, 5)
# rotated_image = cv2.threshold(rotated_image_gray, 50, 255, cv2.THRESH_BINARY)[1]
# rotated_image = rotated_image[
# IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
# IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
# ]
# for ci in range(len(points_center)):
# cY, cX = rotated_center[-1][ci]
# cv2.circle(rotated_image, (cX, cY), 3, (128), -1)
# y1, x1 = rotated_action[-1]
# cv2.arrowedLine(
# rotated_image,
# (x1, y1),
# (x1, y1 + int(PUSH_DISTANCE / PIXEL_SIZE)),
# (128),
# 2,
# tipLength=0.4,
# )
# cv2.circle(rotated_image, (x1, y1), 2, (200), -1)
# cv2.imwrite(str(count) + "test_rotated.png", rotated_image)
# count += 1
return (
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs_image,
before_rotated_action,
rotated_mask_objs,
)
def from_color_segm(color_image, plot=False):
"""
Use Pre-defined color to do instance segmentation and output masks in binary format.
"""
image = cv2.cvtColor(color_image, cv2.COLOR_RGB2HSV)
mask_objs = []
if plot:
pred_mask = np.zeros((224, 224), dtype=np.uint8)
for ci in range(4):
mask = cv2.inRange(image, colors_lower[ci], colors_upper[ci])
if np.sum(mask > 0):
mask_objs.append(mask)
if plot:
pred_mask[mask > 0] = 255 - ci * 20
cv2.imwrite(str(ci) + "mask.png", mask)
if plot:
cv2.imwrite("pred.png", pred_mask)
print("Mask R-CNN: %d objects detected" % len(mask_objs))
return mask_objs
@torch.no_grad()
def from_maskrcnn(model, color_image, device, plot=False):
"""
Use Mask R-CNN to do instance segmentation and output masks in binary format.
"""
model.eval()
image = color_image.copy()
image = TF.to_tensor(image)
prediction = model([image.to(device)])[0]
mask_objs = []
if plot:
pred_mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE), dtype=np.uint8)
for idx, mask in enumerate(prediction["masks"]):
# NOTE: 0.98 can be tuned
if prediction["scores"][idx] > 0.98:
img = mask[0].mul(255).byte().cpu().numpy()
img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
if np.sum(img == 255) < 100:
continue
mask_objs.append(img)
if plot:
pred_mask[img > 0] = 255 - idx * 50
cv2.imwrite(str(idx) + "mask.png", img)
if plot:
cv2.imwrite("pred.png", pred_mask)
print("Mask R-CNN: %d objects detected" % len(mask_objs), prediction["scores"].cpu())
return mask_objs
if __name__ == "__main__":
# color_image = cv2.imread(
# "logs_grasp/mcts-2021-03-21-00-31-13/data/color-heightmaps/000019.0.color.png"
# )
color_image = cv2.imread("tree_plot/root.0-73_140_74_103.1-72_138_108_130.2-99_132_136_132.png")
# color_image_after = cv2.imread("logs_push/final-test/data/color_heightmaps/0002507.color.png")
# color_image = cv2.imread("logs/action_test/data/color-heightmaps/000004.0.color.png")
# color_image = cv2.imread(
# "logs_push/2021-01-24-16-07-43/data/color-heightmaps/000000.0.color.png"
# )
# color_image = cv2.imread("logs/vpg+&pp/p104/data/color-heightmaps/000001.0.color.png")
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
# color_image_after = cv2.cvtColor(color_image_after, cv2.COLOR_BGR2RGB)
depth_image = cv2.imread(
"tree_plot/root.0-73_140_74_103.1-72_138_108_130.2-99_132_136_132-depth.png",
cv2.IMREAD_UNCHANGED,
)
# depth_image = cv2.imread("logs/real-maskrcnn/data/depth-heightmaps/000002.0.depth.png", cv2.IMREAD_UNCHANGED)
# depth_image = cv2.imread("logs/old/object-detection-data/data/depth-heightmaps/000001.0.depth.png", cv2.IMREAD_UNCHANGED)
# depth_image = cv2.imread(
# "logs_grasp/mcts-2021-03-21-00-31-13/data/depth-heightmaps/000019.0.depth.png",
# cv2.IMREAD_UNCHANGED,
# )
# depth_image = cv2.imread("logs/vpg+&pp/p104/data/depth-heightmaps/000001.0.depth.png", cv2.IMREAD_UNCHANGED)
depth_image = depth_image.astype(np.float32) / 100000
# with open('logs_push/final-test/data/actions/0002502.action.txt', 'r') as file:
# filedata = file.read()
# x, y = filedata.split(' ')
# start_pose = [x, y]
# cv2.imwrite('predicttruth.png', color_image_after)
# check diff of color image and depth image
# gray = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
# blurred = cv2.medianBlur(gray, 5)
# gray = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
# depth_image[depth_image <= DEPTH_MIN] = 0
# depth_image[depth_image > DEPTH_MIN] = 255
# # depth_image = depth_image.astype(np.uint8)
# cv2.imshow('color', gray)
# cv2.imwrite('blackwhite', gray)
# diff = depth_image - gray
# diff[diff < 0] = 128
# cv2.imshow('diff', diff)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# exit()
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
predictor = Predictor("logs_push/push_prediction_model-75.pth")
# trainer = Trainer(
# "reinforcement",
# 0,
# 0,
# True,
# True,
# "logs_grasp/power1.5graspnew/models/snapshot-post-020000.reinforcement.pth",
# False,
# )
model = get_model_instance_segmentation(2)
model.load_state_dict(torch.load("logs_image/maskrcnn.pth"))
model = model.to(device)
mask_objs = from_maskrcnn(model, color_image, device, True)
(
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
before_rotated_action,
rotated_mask_objs,
) = sample_actions(color_image, depth_image, mask_objs, True)
generated_color_images, generated_depth_images, validations = predictor.forward(
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
rotated_mask_objs,
True,
)
for idx, img in enumerate(generated_color_images):
overlay = color_image
# added_image = cv2.addWeighted(generated_color_images[idx], 0.8, overlay, 0.4, 0)
added_image = generated_color_images[idx].copy()
img = cv2.cvtColor(added_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(str(idx) + "predict.png", img)
img = generated_depth_images[idx]
img[img <= DEPTH_MIN] = 0
img[img > DEPTH_MIN] = 255
cv2.imwrite(str(idx) + "predictgray.png", img)
# generated_color_images.append(color_image)
# generated_depth_images.append(depth_image)
# for idx, img in enumerate(generated_color_images):
# if idx + 1 == len(generated_color_images) or validations[idx]:
# _, grasp_predictions = trainer.forward(
# generated_color_images[idx], generated_depth_images[idx], is_volatile=True
# )
# grasp_predictions = trainer.focus_on_target(
# generated_color_images[idx], grasp_predictions
# )
# best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
# predicted_value = np.max(grasp_predictions)
# grasp_pred_vis = trainer.get_prediction_vis(
# grasp_predictions, generated_color_images[idx], best_pix_ind
# )
# cv2.imwrite(str(idx) + "visualization.grasp.png", grasp_pred_vis)
# predicted_values = np.sum(np.sort(grasp_predictions.flatten())[:])
# print(idx, predicted_value, predicted_values)
# else:
# print("invalid")
# _, grasp_predictions = trainer.forward(
# color_image, depth_image, is_volatile=True
# )
# grasp_predictions = trainer.focus_on_target(
# color_image, depth_image, grasp_predictions, TARGET_LOWER, TARGET_UPPER
# )
# best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
# predicted_value = np.max(grasp_predictions)
# grasp_pred_vis = trainer.get_prediction_vis(
# grasp_predictions, color_image, best_pix_ind
# )
# cv2.imwrite("visualization.grasp.png", grasp_pred_vis)
# predicted_values = np.sum(np.sort(grasp_predictions.flatten())[:])
# print(predicted_value, predicted_values)
| 39,123 | 40.933548 | 127 | py |
more | more-main/models.py |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from vision.backbone_utils import resnet_fpn_net
from constants import NUM_ROTATION
class PushNet(nn.Module):
"""
The DQN Network.
"""
def __init__(self, pre_train=False):
super().__init__()
self.device = torch.device("cuda")
self.pre_train = pre_train
self.num_rotations = NUM_ROTATION
# self.pushnet = FCN(2, 1).to(self.device)
self.pushnet = resnet_fpn_net(
"resnet34", trainable_layers=5, grasp=False, input_channels=2).to(self.device)
print("max_memory_allocated (MB):", torch.cuda.max_memory_allocated() / 2 ** 20)
print("memory_allocated (MB):", torch.cuda.memory_allocated() / 2 ** 20)
def forward(
self, input_data, is_volatile=False, specific_rotation=-1,
):
if self.pre_train:
output_probs = self.pushnet(input_data)
return output_probs
else:
if is_volatile:
with torch.no_grad():
output_prob = []
# Apply rotations to images
for rotate_idx in range(self.num_rotations):
rotate_theta = np.radians(rotate_idx * (360 / self.num_rotations))
# Compute sample grid for rotation BEFORE neural network
affine_mat_before = np.asarray(
[
[np.cos(-rotate_theta), np.sin(-rotate_theta), 0],
[-np.sin(-rotate_theta), np.cos(-rotate_theta), 0],
]
)
affine_mat_before.shape = (2, 3, 1)
affine_mat_before = (
torch.from_numpy(affine_mat_before)
.permute(2, 0, 1)
.float()
.to(self.device)
)
flow_grid_before = F.affine_grid(
affine_mat_before, input_data.size(), align_corners=True
)
# Rotate images clockwise
rotate_data = F.grid_sample(
input_data.to(self.device),
flow_grid_before,
mode="bilinear",
align_corners=True,
)
final_push_feat = self.pushnet(rotate_data)
# Compute sample grid for rotation AFTER branches
affine_mat_after = np.asarray(
[
[np.cos(rotate_theta), np.sin(rotate_theta), 0],
[-np.sin(rotate_theta), np.cos(rotate_theta), 0],
]
)
affine_mat_after.shape = (2, 3, 1)
affine_mat_after = (
torch.from_numpy(affine_mat_after)
.permute(2, 0, 1)
.float()
.to(self.device)
)
flow_grid_after = F.affine_grid(
affine_mat_after, final_push_feat.data.size(), align_corners=True
)
# Forward pass through branches, undo rotation on output predictions, upsample results
output_prob.append(
F.grid_sample(
final_push_feat,
flow_grid_after,
mode="bilinear",
align_corners=True,
),
)
return output_prob
else:
raise NotImplementedError
# self.output_prob = []
# # Apply rotations to images
# rotate_idx = specific_rotation
# rotate_theta = np.radians(rotate_idx * (360 / self.num_rotations))
# # Compute sample grid for rotation BEFORE branches
# affine_mat_before = np.asarray(
# [
# [np.cos(-rotate_theta), np.sin(-rotate_theta), 0],
# [-np.sin(-rotate_theta), np.cos(-rotate_theta), 0],
# ]
# )
# affine_mat_before.shape = (2, 3, 1)
# affine_mat_before = (
# torch.from_numpy(affine_mat_before).permute(2, 0, 1).float().to(self.device)
# )
# affine_mat_before.requires_grad_(False)
# flow_grid_before = F.affine_grid(
# affine_mat_before, input_color_data.size(), align_corners=True
# )
# # Rotate images clockwise
# rotate_color = F.grid_sample(
# input_color_data.to(self.device),
# flow_grid_before,
# mode="bilinear",
# align_corners=True,
# )
# rotate_depth = F.grid_sample(
# input_depth_data.to(self.device),
# flow_grid_before,
# mode="bilinear",
# align_corners=True,
# )
# input_data = torch.cat((rotate_color, rotate_depth), dim=1)
# # Pass intermediate features to net
# final_push_feat = self.pushnet(input_data)
# # Compute sample grid for rotation AFTER branches
# affine_mat_after = np.asarray(
# [
# [np.cos(rotate_theta), np.sin(rotate_theta), 0],
# [-np.sin(rotate_theta), np.cos(rotate_theta), 0],
# ]
# )
# affine_mat_after.shape = (2, 3, 1)
# affine_mat_after = (
# torch.from_numpy(affine_mat_after).permute(2, 0, 1).float().to(self.device)
# )
# affine_mat_after.requires_grad_(False)
# flow_grid_after = F.affine_grid(
# affine_mat_after.to(self.device),
# final_push_feat.data.size(),
# align_corners=True,
# )
# # Forward pass through branches, undo rotation on output predictions, upsample results
# self.output_prob.append(
# F.grid_sample(
# final_push_feat, flow_grid_after, mode="bilinear", align_corners=True
# )
# )
# return self.output_prob
class reinforcement_net(nn.Module):
"""
The DQN Network.
graspnet is the Grasp Network.
pushnet is the Push Network for the DQN + GN method.
"""
def __init__(self, pre_train=False): # , snapshot=None
super(reinforcement_net, self).__init__()
self.device = torch.device("cuda")
self.pre_train = pre_train
self.num_rotations = NUM_ROTATION
if pre_train:
# self.pushnet = resnet_fpn_net(
# "resnet18", trainable_layers=5, grasp=False, input_channels=4
# ).to(self.device)
# self.pushnet = FCN(4, 1).to(self.device)
self.graspnet = resnet_fpn_net("resnet18", trainable_layers=5).to(self.device)
else:
# self.pushnet = resnet_fpn_net(
# "resnet18", trainable_layers=5, grasp=False, input_channels=4
# ).to(self.device)
# self.pushnet = FCN(4, 1).to(self.device)
self.graspnet = resnet_fpn_net("resnet18", trainable_layers=5).to(self.device)
print("max_memory_allocated (MB):", torch.cuda.max_memory_allocated() / 2 ** 20)
print("memory_allocated (MB):", torch.cuda.memory_allocated() / 2 ** 20)
def forward(
self,
input_color_data,
input_depth_data,
is_volatile=False,
specific_rotation=-1,
use_push=True,
push_only=False,
):
if self.pre_train:
input_data = torch.cat((input_color_data, input_depth_data), dim=1)
if use_push:
if push_only:
output_probs = self.pushnet(input_data)
else:
final_push_feat = self.pushnet(input_data)
final_grasp_feat = self.graspnet(input_data)
output_probs = (final_push_feat, final_grasp_feat)
else:
output_probs = self.graspnet(input_data)
return output_probs
else:
if is_volatile:
with torch.no_grad():
output_prob = []
# Apply rotations to images
for rotate_idx in range(self.num_rotations):
rotate_theta = np.radians(rotate_idx * (360 / self.num_rotations))
# Compute sample grid for rotation BEFORE neural network
affine_mat_before = np.asarray(
[
[np.cos(-rotate_theta), np.sin(-rotate_theta), 0],
[-np.sin(-rotate_theta), np.cos(-rotate_theta), 0],
]
)
affine_mat_before.shape = (2, 3, 1)
affine_mat_before = (
torch.from_numpy(affine_mat_before)
.permute(2, 0, 1)
.float()
.to(self.device)
)
flow_grid_before = F.affine_grid(
affine_mat_before, input_color_data.size(), align_corners=True
)
# Rotate images clockwise
rotate_color = F.grid_sample(
input_color_data.to(self.device),
flow_grid_before,
mode="nearest",
align_corners=True,
)
rotate_depth = F.grid_sample(
input_depth_data.to(self.device),
flow_grid_before,
mode="nearest",
align_corners=True,
)
input_data = torch.cat((rotate_color, rotate_depth), dim=1)
# Pass intermediate features to net
if use_push:
final_push_feat = self.pushnet(input_data)
if not push_only:
final_grasp_feat = self.graspnet(input_data)
else:
final_grasp_feat = self.graspnet(input_data)
# Compute sample grid for rotation AFTER branches
affine_mat_after = np.asarray(
[
[np.cos(rotate_theta), np.sin(rotate_theta), 0],
[-np.sin(rotate_theta), np.cos(rotate_theta), 0],
]
)
affine_mat_after.shape = (2, 3, 1)
affine_mat_after = (
torch.from_numpy(affine_mat_after)
.permute(2, 0, 1)
.float()
.to(self.device)
)
if use_push:
flow_grid_after = F.affine_grid(
affine_mat_after, final_push_feat.data.size(), align_corners=True
)
else:
flow_grid_after = F.affine_grid(
affine_mat_after, final_grasp_feat.data.size(), align_corners=True
)
# Forward pass through branches, undo rotation on output predictions, upsample results
if use_push:
if push_only:
output_prob.append(
F.grid_sample(
final_push_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
)
else:
output_prob.append(
[
F.grid_sample(
final_push_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
F.grid_sample(
final_grasp_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
]
)
else:
output_prob.append(
[
None,
F.grid_sample(
final_grasp_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
]
)
return output_prob
else:
self.output_prob = []
# Apply rotations to images
rotate_idx = specific_rotation
rotate_theta = np.radians(rotate_idx * (360 / self.num_rotations))
# Compute sample grid for rotation BEFORE branches
affine_mat_before = np.asarray(
[
[np.cos(-rotate_theta), np.sin(-rotate_theta), 0],
[-np.sin(-rotate_theta), np.cos(-rotate_theta), 0],
]
)
affine_mat_before.shape = (2, 3, 1)
affine_mat_before = (
torch.from_numpy(affine_mat_before).permute(2, 0, 1).float().to(self.device)
)
affine_mat_before.requires_grad_(False)
flow_grid_before = F.affine_grid(
affine_mat_before, input_color_data.size(), align_corners=True
)
# Rotate images clockwise
rotate_color = F.grid_sample(
input_color_data.to(self.device),
flow_grid_before,
mode="nearest",
align_corners=True,
)
rotate_depth = F.grid_sample(
input_depth_data.to(self.device),
flow_grid_before,
mode="nearest",
align_corners=True,
)
input_data = torch.cat((rotate_color, rotate_depth), dim=1)
# Pass intermediate features to net
final_push_feat = self.pushnet(input_data)
if not push_only:
final_grasp_feat = self.graspnet(input_data)
# Compute sample grid for rotation AFTER branches
affine_mat_after = np.asarray(
[
[np.cos(rotate_theta), np.sin(rotate_theta), 0],
[-np.sin(rotate_theta), np.cos(rotate_theta), 0],
]
)
affine_mat_after.shape = (2, 3, 1)
affine_mat_after = (
torch.from_numpy(affine_mat_after).permute(2, 0, 1).float().to(self.device)
)
affine_mat_after.requires_grad_(False)
flow_grid_after = F.affine_grid(
affine_mat_after.to(self.device),
final_push_feat.data.size(),
align_corners=True,
)
# Forward pass through branches, undo rotation on output predictions, upsample results
if push_only:
self.output_prob.append(
F.grid_sample(
final_push_feat, flow_grid_after, mode="nearest", align_corners=True
)
)
else:
self.output_prob.append(
[
F.grid_sample(
final_push_feat, flow_grid_after, mode="nearest", align_corners=True
),
F.grid_sample(
final_grasp_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
]
)
return self.output_prob
| 17,954 | 40.370968 | 110 | py |
more | more-main/push_net.py | import torch
import torch.nn as nn
from vision.backbone_utils import resent_backbone
from collections import OrderedDict
class PushPredictionNet(nn.Module):
def __init__(self):
super().__init__()
# single object state encoder
self.single_state_encoder = nn.Sequential(
OrderedDict(
[
("single-state-encoder-fc1", nn.Linear(2, 8)),
("single-state-encoder-relu1", nn.ReLU(inplace=True)),
("single-state-encoder-fc2", nn.Linear(8, 16)),
("single-state-encoder-relu2", nn.ReLU(inplace=True)),
]
)
)
# single object image encoder
self.singel_image_encoder = resent_backbone(
"resnet10", pretrained=False, num_classes=64, input_channels=1
)
# Interactive transformation
self.interact = nn.Sequential(
OrderedDict(
[
("interact-fc1", nn.Linear(176, 256)),
("interact-relu1", nn.ReLU(inplace=True)),
("interact-fc2", nn.Linear(256, 256)),
("interact-relu2", nn.ReLU(inplace=True)),
("interact-fc3", nn.Linear(256, 256)),
("interact-relu3", nn.ReLU(inplace=True)),
]
)
)
# Direct transformation
self.dynamics = nn.Sequential(
OrderedDict(
[
("dynamics-fc1", nn.Linear(96, 256)),
("dynamics-relu1", nn.ReLU(inplace=True)),
("dynamics-fc2", nn.Linear(256, 256)),
("dynamics-relu2", nn.ReLU(inplace=True)),
]
)
)
# action encoder
self.action_encoder = nn.Sequential(
OrderedDict(
[
("action_encoder-fc1", nn.Linear(4, 8)),
("action_encoder-relu1", nn.ReLU(inplace=True)),
("action_encoder-fc2", nn.Linear(8, 16)),
("action_encoder-relu2", nn.ReLU(inplace=True)),
]
)
)
# global image encoder
self.image_encoder = resent_backbone(
"resnet10", pretrained=False, num_classes=512, input_channels=2
)
self.decoder = nn.Sequential(
OrderedDict(
[
("decoder-fc00", nn.Linear(768, 256)),
("decoder-relu00", nn.ReLU(inplace=True)),
("decoder-fc0", nn.Linear(256, 64)),
("decoder-relu0", nn.ReLU(inplace=True)),
("decoder-fc1", nn.Linear(64, 16)),
("decoder-relu1", nn.ReLU(inplace=True)),
("decoder-fc3", nn.Linear(16, 3)),
("decoder-relu3", nn.ReLU(inplace=True)),
("decoder-fc4", nn.Linear(3, 3)),
]
)
)
def forward(self, prev_poses, action, image, image_objs, num_objs):
# action
encoded_action = self.action_encoder(action)
# single object
encoded_info = []
for i in range(num_objs):
encoded_state = self.single_state_encoder(prev_poses[:, i * 2 : i * 2 + 2])
encoded_image = self.singel_image_encoder(
image_objs[
:,
i : i + 1,
:,
:,
]
)
encoded_cat = torch.cat((encoded_state, encoded_image), dim=1)
encoded_info.append(encoded_cat)
# the environment
y = self.image_encoder(image)
# interact
z = None
for i in range(num_objs):
dy_input = torch.cat((encoded_action, encoded_info[i]), dim=1)
all_dynamics = self.dynamics(dy_input)
for j in range(1, num_objs):
idx = i + j
if idx >= num_objs:
idx = idx - num_objs
inter_input = torch.cat((dy_input, encoded_info[idx]), dim=1)
other = self.interact(inter_input)
all_dynamics = all_dynamics + other
de_input = torch.cat((y, all_dynamics), dim=1)
output = self.decoder(de_input)
if z is None:
z = output
else:
z = torch.cat((z, output), dim=1)
return z | 4,492 | 33.829457 | 87 | py |
more | more-main/mcts_utils.py | from dataset import LifelongEvalDataset
import math
import random
import torch
from torchvision.transforms import functional as TF
import numpy as np
import cv2
import imutils
from models import reinforcement_net
from action_utils_mask import get_orientation, adjust_push_start_point
import utils
from constants import (
GRIPPER_PUSH_RADIUS_PIXEL,
GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL,
PIXEL_SIZE,
PUSH_DISTANCE_PIXEL,
TARGET_LOWER,
TARGET_UPPER,
IMAGE_PAD_WIDTH,
COLOR_MEAN,
COLOR_STD,
DEPTH_MEAN,
DEPTH_STD,
NUM_ROTATION,
GRIPPER_GRASP_INNER_DISTANCE_PIXEL,
GRIPPER_GRASP_WIDTH_PIXEL,
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
IMAGE_PAD_WIDTH,
BG_THRESHOLD,
IMAGE_SIZE,
WORKSPACE_LIMITS,
PUSH_DISTANCE,
)
class MCTSHelper:
"""
Simulate the state after push actions.
Evaluation the grasp rewards.
"""
def __init__(self, env, grasp_model_path):
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Initialize Mask R-CNN
# self.mask_model = get_model_instance_segmentation(2)
# self.mask_model.load_state_dict(torch.load(mask_model_path))
# self.mask_model = self.mask_model.to(self.device)
# self.mask_model.eval()
# Initialize Grasp Q Evaluation
self.grasp_model = reinforcement_net()
self.grasp_model.load_state_dict(torch.load(grasp_model_path)["model"], strict=False)
self.grasp_model = self.grasp_model.to(self.device)
self.grasp_model.eval()
self.env = env
self.move_recorder = {}
self.simulation_recorder = {}
def reset(self):
self.move_recorder = {}
self.simulation_recorder = {}
# @torch.no_grad()
# def from_maskrcnn(self, color_image, plot=False):
# """
# Use Mask R-CNN to do instance segmentation and output masks in binary format.
# """
# image = color_image.copy()
# image = TF.to_tensor(image)
# prediction = self.mask_model([image.to(self.device)])[0]
# mask_objs = []
# if plot:
# pred_mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE), dtype=np.uint8)
# for idx, mask in enumerate(prediction["masks"]):
# # NOTE: 0.98 can be tuned
# if prediction["scores"][idx] > 0.98:
# img = mask[0].mul(255).byte().cpu().numpy()
# img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# if np.sum(img == 255) < 100:
# continue
# mask_objs.append(img)
# if plot:
# pred_mask[img > 0] = 255 - idx * 50
# cv2.imwrite(str(idx) + "mask.png", img)
# if plot:
# cv2.imwrite("pred.png", pred_mask)
# print("Mask R-CNN: %d objects detected" % len(mask_objs), prediction["scores"].cpu())
# return mask_objs
# def sample_actions(
# self, object_states, color_image=None, mask_image=None, prev_move=None, plot=False
# ):
# """
# Sample actions around the objects, from the boundary to the center.
# Assume there is no object in "black"
# Output the rotated image, such that the push action is from left to right
# """
# # Retrieve information
# if color_image is None:
# self.env.restore_objects(object_states)
# color_image, _, mask_image = utils.get_true_heightmap(self.env)
# # Process mask into binary format
# masks = []
# for i in self.env.obj_ids["rigid"]:
# mask = np.where(mask_image == i, 255, 0).astype(np.uint8)
# masks.append(mask)
# if len(masks) == 0:
# return [], [], [], [], [], [], [], []
# gray = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
# gray = gray.astype(np.uint8)
# if plot:
# plot_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
# blurred = cv2.medianBlur(gray, 5)
# thresh = cv2.threshold(blurred, 20, 255, cv2.THRESH_BINARY)[1]
# cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# cnts = imutils.grab_contours(cnts)
# # find the contour of a single object
# points_on_contour = []
# points = []
# # four_idx = []
# other_idx = []
# # priority_points_on_contour = []
# # priority_points = []
# for oi in range(len(masks)):
# obj_cnt = cv2.findContours(masks[oi], cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# obj_cnt = imutils.grab_contours(obj_cnt)
# obj_cnt = sorted(obj_cnt, key=lambda x: cv2.contourArea(x))
# if len(obj_cnt) == 0:
# continue
# else:
# obj_cnt = obj_cnt[-1]
# # if too small, then, we skip
# if cv2.contourArea(obj_cnt) < 10:
# continue
# # get center
# M = cv2.moments(obj_cnt)
# cX = round(M["m10"] / M["m00"])
# cY = round(M["m01"] / M["m00"])
# if plot:
# cv2.circle(plot_image, (cX, cY), 3, (255, 255, 255), -1)
# # get pca angle
# # angle = get_orientation(obj_cnt)
# # get contour points
# skip_num = len(obj_cnt) // 12 # 12 possible pushes for an object
# skip_count = 0
# # diff_angle_limit_four = 0.3
# # target_diff_angles = np.array([0, np.pi, np.pi / 2, 3 * np.pi / 2])
# # add the consecutive move
# # if prev_move:
# # prev_angle = math.atan2(
# # prev_move[1][1] - prev_move[0][1], prev_move[1][0] - prev_move[0][0]
# # )
# # pose = (cX - math.cos(prev_angle) * 2, cY - math.sin(prev_angle) * 2)
# # x = pose[0]
# # y = pose[1]
# # diff_x = cX - x
# # diff_y = cY - y
# # diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
# # diff_x /= diff_norm
# # diff_y /= diff_norm
# # point_on_contour = (round(x), round(y))
# # diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
# # point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
# # diff_mul = adjust_push_start_point(
# # (cX, cY), point_on_contour, obj_cnt, add_distance=0
# # )
# # test_point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
# # if is_close(prev_move[1], test_point):
# # if len(priority_points) > 0:
# # prev_dis = close_distance(prev_move[1], priority_points[0])
# # this_dis = close_distance(prev_move[1], test_point)
# # if this_dis < prev_dis:
# # priority_points_on_contour[0] = point_on_contour
# # priority_points[0] = point
# # else:
# # priority_points_on_contour.append(point_on_contour)
# # priority_points.append(point)
# # add four directions to center of object
# # four_poses = [
# # (cX + math.cos(angle) * 2, cY + math.sin(angle) * 2),
# # (cX + math.cos(angle + np.pi / 2) * 2, cY + math.sin(angle + np.pi / 2) * 2),
# # (cX + math.cos(angle - np.pi / 2) * 2, cY + math.sin(angle - np.pi / 2) * 2),
# # (cX - math.cos(angle) * 2, cY - math.sin(angle) * 2),
# # ]
# # for pose in four_poses:
# # x = pose[0]
# # y = pose[1]
# # diff_x = cX - x
# # diff_y = cY - y
# # diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
# # diff_x /= diff_norm
# # diff_y /= diff_norm
# # point_on_contour = (round(x), round(y))
# # diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
# # point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
# # points_on_contour.append(point_on_contour)
# # points.append(point)
# # four_idx.append(len(points) - 1)
# tested_angles = []
# for pi, p in enumerate(obj_cnt):
# x = p[0][0]
# y = p[0][1]
# if x == cX or y == cY:
# continue
# diff_x = cX - x
# diff_y = cY - y
# test_angle = math.atan2(diff_y, diff_x)
# should_append = False
# # avoid four directions to center of object
# # if (
# # np.min(np.abs(abs(angle - test_angle) - target_diff_angles))
# # < diff_angle_limit_four
# # ):
# # should_append = False
# # skip_count = 0
# if skip_count == skip_num:
# should_append = True
# if should_append:
# diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
# diff_x /= diff_norm
# diff_y /= diff_norm
# point_on_contour = (round(x), round(y))
# diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
# point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
# points_on_contour.append(point_on_contour)
# points.append(point)
# other_idx.append(len(points) - 1)
# skip_count = 0
# tested_angles.append(test_angle)
# else:
# skip_count += 1
# # random actions, adding priority points at the end
# # random.shuffle(four_idx)
# random.shuffle(other_idx)
# new_points = []
# new_points_on_contour = []
# for idx in other_idx:
# new_points.append(points[idx])
# new_points_on_contour.append(points_on_contour[idx])
# # for idx in four_idx:
# # new_points.append(points[idx])
# # new_points_on_contour.append(points_on_contour[idx])
# # new_points.extend(priority_points)
# # new_points_on_contour.extend(priority_points_on_contour)
# points = new_points
# points_on_contour = new_points_on_contour
# if plot:
# # loop over the contours
# for c in cnts:
# cv2.drawContours(plot_image, [c], -1, (133, 137, 140), 2)
# actions = []
# for pi in range(len(points)):
# # out of boundary
# if (
# points[pi][0] < 5
# or points[pi][0] > IMAGE_SIZE - 5
# or points[pi][1] < 5
# or points[pi][1] > IMAGE_SIZE - 5
# ):
# qualify = False
# # consecutive action
# # elif pi >= len(points) - len(priority_points):
# # qualify = True
# # clearance large
# elif (
# np.sum(
# thresh[
# max(0, points[pi][1] - GRIPPER_PUSH_RADIUS_SAFE_PIXEL) : min(
# IMAGE_SIZE, points[pi][1] + GRIPPER_PUSH_RADIUS_SAFE_PIXEL + 1
# ),
# max(0, points[pi][0] - GRIPPER_PUSH_RADIUS_SAFE_PIXEL) : min(
# IMAGE_SIZE, points[pi][0] + GRIPPER_PUSH_RADIUS_SAFE_PIXEL + 1
# ),
# ]
# > 0
# )
# == 0
# ):
# qualify = True
# # clearance small
# else:
# # compute rotation angle
# down = (0, 1)
# current = (
# points_on_contour[pi][0] - points[pi][0],
# points_on_contour[pi][1] - points[pi][1],
# )
# dot = (
# down[0] * current[0] + down[1] * current[1]
# ) # dot product between [x1, y1] and [x2, y2]
# det = down[0] * current[1] - down[1] * current[0] # determinant
# angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
# angle = math.degrees(angle)
# crop = thresh[
# points[pi][1]
# - GRIPPER_PUSH_RADIUS_SAFE_PIXEL : points[pi][1]
# + GRIPPER_PUSH_RADIUS_SAFE_PIXEL
# + 1,
# points[pi][0]
# - GRIPPER_PUSH_RADIUS_SAFE_PIXEL : points[pi][0]
# + GRIPPER_PUSH_RADIUS_SAFE_PIXEL
# + 1,
# ]
# if crop.shape == (
# GRIPPER_PUSH_RADIUS_SAFE_PIXEL * 2 + 1,
# GRIPPER_PUSH_RADIUS_SAFE_PIXEL * 2 + 1,
# ):
# crop = utils.rotate(crop, angle)
# (h, w) = crop.shape
# crop_cy, crop_cx = (h // 2, w // 2)
# crop = crop[
# crop_cy
# - math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2) : crop_cy
# + math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2)
# + 1,
# crop_cx
# - GRIPPER_PUSH_RADIUS_PIXEL : crop_cx
# + GRIPPER_PUSH_RADIUS_PIXEL
# + 1,
# ]
# qualify = np.sum(crop > 0) == 0
# else:
# qualify = False
# if qualify:
# if plot:
# diff_x = points_on_contour[pi][0] - points[pi][0]
# diff_y = points_on_contour[pi][1] - points[pi][1]
# diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
# diff_x /= diff_norm
# diff_y /= diff_norm
# point_to = (
# int(points[pi][0] + diff_x * PUSH_DISTANCE / PIXEL_SIZE / 2),
# int(points[pi][1] + diff_y * PUSH_DISTANCE / PIXEL_SIZE / 2),
# )
# if pi < len(other_idx):
# cv2.arrowedLine(
# plot_image, points[pi], point_to, (0, 0, 255), 2, tipLength=0.2,
# )
# # elif pi >= len(points) - len(priority_points):
# # cv2.arrowedLine(
# # plot_image, tuple(points[pi]), point_to, (0, 255, 0), 2, tipLength=0.2,
# # )
# else:
# cv2.arrowedLine(
# plot_image, points[pi], point_to, (255, 0, 0), 2, tipLength=0.2,
# )
# push_start = (points[pi][1], points[pi][0])
# push_vector = np.array(
# [
# points_on_contour[pi][1] - points[pi][1],
# points_on_contour[pi][0] - points[pi][0],
# ]
# )
# unit_push = push_vector / np.linalg.norm(push_vector)
# push_end = (
# round(push_start[0] + unit_push[0] * PUSH_DISTANCE / PIXEL_SIZE),
# round(push_start[1] + unit_push[1] * PUSH_DISTANCE / PIXEL_SIZE),
# )
# actions.append([push_start, push_end])
# if plot:
# cv2.imwrite("test.png", plot_image)
# return actions
def check_valid(self, point, point_on_contour, thresh):
# out of boundary
if not (
GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
< point[0]
< IMAGE_SIZE - GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
) or not (
GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
< point[1]
< IMAGE_SIZE - GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
):
qualify = False
else:
# compute rotation angle
down = (0, 1)
current = (
point_on_contour[0] - point[0],
point_on_contour[1] - point[1],
)
dot = (
down[0] * current[0] + down[1] * current[1]
) # dot product between [x1, y1] and [x2, y2]
det = down[0] * current[1] - down[1] * current[0] # determinant
angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
angle = math.degrees(angle)
crop = thresh[
point[1]
- GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL : point[1]
+ GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
+ 1,
point[0]
- GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL : point[0]
+ GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
+ 1,
]
# test the rotated crop part
crop = utils.rotate(crop, angle, is_mask=True)
(h, w) = crop.shape
crop_cy, crop_cx = (h // 2, w // 2)
crop = crop[
crop_cy
- math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2)
- 1 : crop_cy
+ math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2)
+ 2,
crop_cx - GRIPPER_PUSH_RADIUS_PIXEL - 1 : crop_cx + GRIPPER_PUSH_RADIUS_PIXEL + 2,
]
qualify = np.sum(crop > 0) == 0
return qualify
def global_adjust(self, point, point_on_contour, thresh):
for dis in [0.01, 0.02]:
dis = dis / PIXEL_SIZE
diff_x = point_on_contour[0] - point[0]
diff_y = point_on_contour[1] - point[1]
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
test_point = (round(point[0] - diff_x * dis), round(point[1] - diff_y * dis))
qualify = self.check_valid(test_point, point_on_contour, thresh)
if qualify:
return qualify, test_point
return False, None
def sample_actions(
self, object_states, color_image=None, mask_image=None, env=None, plot=False, masks=None
):
"""
Sample actions around the objects, from the boundary to the center.
Assume there is no object in "black"
Output the rotated image, such that the push action is from left to right
"""
if env is None:
env = self.env
# Retrieve information
if color_image is None:
env.restore_objects(object_states)
color_image, _, mask_image = utils.get_true_heightmap(env)
# Process mask into binary format
if masks is None:
masks = []
for i in env.obj_ids["rigid"]:
mask = np.where(mask_image == i, 255, 0).astype(np.uint8)
masks.append(mask)
if len(masks) == 0:
return None
gray = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
gray = gray.astype(np.uint8)
if plot:
plot_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
blurred = cv2.medianBlur(gray, 5)
thresh = cv2.threshold(blurred, 20, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
# find the contour of a single object
points_on_contour = []
points = []
four_idx = []
other_idx = []
for oi in range(len(masks)):
obj_cnt = cv2.findContours(masks[oi], cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
obj_cnt = imutils.grab_contours(obj_cnt)
obj_cnt = sorted(obj_cnt, key=lambda x: cv2.contourArea(x))
if len(obj_cnt) == 0:
continue
else:
obj_cnt = obj_cnt[-1]
# if too small, then, we skip
if cv2.contourArea(obj_cnt) < 10:
continue
# get center
M = cv2.moments(obj_cnt)
cX = round(M["m10"] / M["m00"])
cY = round(M["m01"] / M["m00"])
if plot:
cv2.circle(plot_image, (cX, cY), 3, (255, 255, 255), -1)
# get pca angle
angle = get_orientation(obj_cnt)
# get contour points
# skip_num = len(obj_cnt) // 12 # 12 possible pushes for an object
# skip_count = 0
diff_angle_limit = 0.75 # around 45 degrees
# target_diff_angles = np.array([0, np.pi, np.pi / 2, 3 * np.pi / 2])
target_diff_angles = []
# add four directions to center of object
four_poses = [
(cX + math.cos(angle) * 2, cY + math.sin(angle) * 2),
(cX + math.cos(angle + np.pi / 2) * 2, cY + math.sin(angle + np.pi / 2) * 2),
(cX + math.cos(angle - np.pi / 2) * 2, cY + math.sin(angle - np.pi / 2) * 2),
(cX - math.cos(angle) * 2, cY - math.sin(angle) * 2),
]
for pose in four_poses:
x = pose[0]
y = pose[1]
diff_x = cX - x
diff_y = cY - y
test_angle = math.atan2(diff_y, diff_x)
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
should_append = self.check_valid(point, point_on_contour, thresh)
if not should_append:
should_append, point = self.global_adjust(point, point_on_contour, thresh)
if should_append:
points_on_contour.append(point_on_contour)
points.append(point)
four_idx.append(len(points) - 1)
target_diff_angles.append(test_angle)
for pi, p in enumerate(obj_cnt):
x = p[0][0]
y = p[0][1]
if x == cX or y == cY:
continue
diff_x = cX - x
diff_y = cY - y
test_angle = math.atan2(diff_y, diff_x)
# avoid similar directions to center of object
if len(target_diff_angles) > 0:
test_target_diff_angles = np.abs(np.array(target_diff_angles) - test_angle)
should_append = (
np.min(test_target_diff_angles) > diff_angle_limit
and np.max(test_target_diff_angles) < math.pi * 2 - diff_angle_limit
)
else:
should_append = True
if should_append:
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
should_append = self.check_valid(point, point_on_contour, thresh)
if not should_append:
should_append, point = self.global_adjust(point, point_on_contour, thresh)
if should_append:
points_on_contour.append(point_on_contour)
points.append(point)
other_idx.append(len(points) - 1)
target_diff_angles.append(test_angle)
# random actions, adding priority points at the end
random.shuffle(four_idx)
random.shuffle(other_idx)
new_points = []
new_points_on_contour = []
for idx in other_idx:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
for idx in four_idx:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
points = new_points
points_on_contour = new_points_on_contour
idx_list = list(range(len(points)))
random.shuffle(idx_list)
new_points = []
new_points_on_contour = []
for idx in idx_list:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
points = new_points
points_on_contour = new_points_on_contour
if plot:
# loop over the contours
for c in cnts:
cv2.drawContours(plot_image, [c], -1, (133, 137, 140), 2)
actions = []
for pi in range(len(points)):
if plot:
diff_x = points_on_contour[pi][0] - points[pi][0]
diff_y = points_on_contour[pi][1] - points[pi][1]
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_to = (
int(points[pi][0] + diff_x * PUSH_DISTANCE / PIXEL_SIZE / 2),
int(points[pi][1] + diff_y * PUSH_DISTANCE / PIXEL_SIZE / 2),
)
if pi < len(other_idx):
cv2.arrowedLine(
plot_image, points[pi], point_to, (0, 0, 255), 2, tipLength=0.2,
)
else:
cv2.arrowedLine(
plot_image, points[pi], point_to, (255, 0, 0), 2, tipLength=0.2,
)
push_start = (points[pi][1], points[pi][0])
push_vector = np.array(
[
points_on_contour[pi][1] - points[pi][1],
points_on_contour[pi][0] - points[pi][0],
]
)
unit_push = push_vector / np.linalg.norm(push_vector)
push_end = (
round(push_start[0] + unit_push[0] * PUSH_DISTANCE / PIXEL_SIZE),
round(push_start[1] + unit_push[1] * PUSH_DISTANCE / PIXEL_SIZE),
)
actions.append([push_start, push_end])
if plot:
cv2.imwrite("test.png", plot_image)
return actions
def simulate(self, push_start, push_end, restore_states=None):
if restore_states is not None:
self.env.restore_objects(restore_states)
push_start = [
push_start[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_start[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
push_end = [
push_end[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_end[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
success = self.env.push(push_start, push_end, verbose=False)
if not success:
return None
self.env.wait_static()
object_states = self.env.save_objects()
# Check if all objects are still in workspace
for obj in object_states:
pos = obj[0]
if (
pos[0] < WORKSPACE_LIMITS[0][0]
or pos[0] > WORKSPACE_LIMITS[0][1]
or pos[1] < WORKSPACE_LIMITS[1][0]
or pos[1] > WORKSPACE_LIMITS[1][1]
):
return None
color_image, depth_image, mask_image = utils.get_true_heightmap(self.env)
return color_image, depth_image, mask_image, object_states
@torch.no_grad()
def get_grasp_q(self, color_heightmap, depth_heightmap, post_checking=False, is_real=False):
color_heightmap_pad = np.copy(color_heightmap)
depth_heightmap_pad = np.copy(depth_heightmap)
# Add extra padding (to handle rotations inside network)
color_heightmap_pad = np.pad(
color_heightmap_pad,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
# Pre-process color image (scale and normalize)
image_mean = COLOR_MEAN
image_std = COLOR_STD
input_color_image = color_heightmap_pad.astype(float) / 255
for c in range(3):
input_color_image[:, :, c] = (input_color_image[:, :, c] - image_mean[c]) / image_std[c]
# Pre-process depth image (normalize)
image_mean = DEPTH_MEAN
image_std = DEPTH_STD
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1], 1)
input_depth_image = np.copy(depth_heightmap_pad)
input_depth_image[:, :, 0] = (input_depth_image[:, :, 0] - image_mean[0]) / image_std[0]
# Construct minibatch of size 1 (b,c,h,w)
input_color_image.shape = (
input_color_image.shape[0],
input_color_image.shape[1],
input_color_image.shape[2],
1,
)
input_depth_image.shape = (
input_depth_image.shape[0],
input_depth_image.shape[1],
input_depth_image.shape[2],
1,
)
input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(
3, 2, 0, 1
)
input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(
3, 2, 0, 1
)
# Pass input data through model
output_prob = self.grasp_model(input_color_data, input_depth_data, True, -1, False)
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
if rotate_idx == 0:
grasp_predictions = (
output_prob[rotate_idx][1].cpu().data.numpy()[:, 0, :, :,]
)
else:
grasp_predictions = np.concatenate(
(
grasp_predictions,
output_prob[rotate_idx][1].cpu().data.numpy()[:, 0, :, :,],
),
axis=0,
)
# post process, only grasp one object, focus on blue object
temp = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
mask_bg = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask_bg_pad = np.pad(mask_bg, IMAGE_PAD_WIDTH, "constant", constant_values=255)
# focus on blue
for rotate_idx in range(len(grasp_predictions)):
grasp_predictions[rotate_idx][mask_pad != 255] = 0
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = grasp_predictions[0].shape[0] - IMAGE_PAD_WIDTH
# only grasp one object
kernel_big = np.ones(
(GRIPPER_GRASP_SAFE_WIDTH_PIXEL, GRIPPER_GRASP_INNER_DISTANCE_PIXEL), dtype=np.uint8
)
if (
is_real
): # due to color, depth sensor and lighting, the size of object looks a bit smaller.
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 5
threshold_small = (
GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 10
)
else:
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 10
threshold_small = (
GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 20
)
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1])
for rotate_idx in range(len(grasp_predictions)):
color_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
color_mask[color_mask == 0] = 1
color_mask[color_mask == 255] = 0
no_target_mask = color_mask
bg_mask = utils.rotate(mask_bg_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
no_target_mask[bg_mask == 255] = 0
# only grasp one object
invalid_mask = cv2.filter2D(no_target_mask, -1, kernel_big)
invalid_mask = utils.rotate(invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True)
grasp_predictions[rotate_idx][invalid_mask > threshold_small] = (
grasp_predictions[rotate_idx][invalid_mask > threshold_small] / 2
)
grasp_predictions[rotate_idx][invalid_mask > threshold_big] = 0
# collision checking, only work for one level
if post_checking:
mask = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask = 255 - mask
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
check_kernel = np.ones(
(GRIPPER_GRASP_WIDTH_PIXEL, GRIPPER_GRASP_OUTER_DISTANCE_PIXEL), dtype=np.uint8
)
left_bound = math.floor(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL - GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
right_bound = (
math.ceil(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL + GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
+ 1
)
check_kernel[:, left_bound:right_bound] = 0
for rotate_idx in range(len(grasp_predictions)):
object_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = cv2.filter2D(object_mask, -1, check_kernel)
invalid_mask[invalid_mask > 5] = 255
invalid_mask = utils.rotate(
invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True
)
grasp_predictions[rotate_idx][invalid_mask > 128] = 0
grasp_predictions = grasp_predictions[
:, padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
grasp_q_value = grasp_predictions[best_pix_ind]
return grasp_q_value, best_pix_ind, grasp_predictions
def get_prediction_vis(self, predictions, color_heightmap, best_pix_ind, is_push=False):
canvas = None
num_rotations = predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
tmp_row_canvas = None
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
prediction_vis = predictions[rotate_idx, :, :].copy()
prediction_vis = np.clip(prediction_vis, 0, 1)
prediction_vis.shape = (predictions.shape[1], predictions.shape[2])
prediction_vis = cv2.applyColorMap(
(prediction_vis * 255).astype(np.uint8), cv2.COLORMAP_JET
)
if rotate_idx == best_pix_ind[0]:
prediction_vis = cv2.circle(
prediction_vis,
(int(best_pix_ind[2]), int(best_pix_ind[1])),
7,
(0, 0, 255),
2,
)
prediction_vis = utils.rotate(prediction_vis, rotate_idx * (360.0 / num_rotations))
if rotate_idx == best_pix_ind[0]:
center = np.array([[[int(best_pix_ind[2]), int(best_pix_ind[1])]]])
M = cv2.getRotationMatrix2D(
(prediction_vis.shape[1] // 2, prediction_vis.shape[0] // 2,),
rotate_idx * (360.0 / num_rotations),
1,
)
center = cv2.transform(center, M)
center = np.transpose(center[0])
if is_push:
point_from = (int(center[0]), int(center[1]))
point_to = (int(center[0] + PUSH_DISTANCE_PIXEL), int(center[1]))
prediction_vis = cv2.arrowedLine(
prediction_vis, point_from, point_to, (100, 255, 0), 2, tipLength=0.2,
)
else:
prediction_vis = cv2.rectangle(
prediction_vis,
(
max(0, int(center[0]) - GRIPPER_GRASP_INNER_DISTANCE_PIXEL // 2),
max(0, int(center[1]) - GRIPPER_GRASP_WIDTH_PIXEL // 2),
),
(
min(
prediction_vis.shape[1],
int(center[0]) + GRIPPER_GRASP_INNER_DISTANCE_PIXEL // 2,
),
min(
prediction_vis.shape[0],
int(center[1]) + GRIPPER_GRASP_WIDTH_PIXEL // 2,
),
),
(100, 255, 0),
1,
)
prediction_vis = cv2.rectangle(
prediction_vis,
(
max(0, int(center[0]) - GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2),
max(0, int(center[1]) - GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2),
),
(
min(
prediction_vis.shape[1],
int(center[0]) + GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2,
),
min(
prediction_vis.shape[0],
int(center[1]) + GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2,
),
),
(100, 100, 155),
1,
)
background_image = utils.rotate(
color_heightmap, rotate_idx * (360.0 / num_rotations)
)
prediction_vis = (
0.5 * cv2.cvtColor(background_image, cv2.COLOR_RGB2BGR) + 0.5 * prediction_vis
).astype(np.uint8)
if tmp_row_canvas is None:
tmp_row_canvas = prediction_vis
else:
tmp_row_canvas = np.concatenate((tmp_row_canvas, prediction_vis), axis=1)
if canvas is None:
canvas = tmp_row_canvas
else:
canvas = np.concatenate((canvas, tmp_row_canvas), axis=0)
return canvas
@torch.no_grad()
def _sampled_prediction_precise(env, model, actions, mask_image):
model.pre_train = True
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
dataset = LifelongEvalDataset(env, actions, mask_image)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=len(actions), shuffle=False, num_workers=0, drop_last=False
)
rot_angle, input_data = next(iter(data_loader))
input_data = input_data.to(device)
# get output
output = model(input_data)
output = output.cpu().numpy()
rot_angle = rot_angle.numpy()
out_q = []
for idx, out in enumerate(output):
out = utils.rotate(out[0], -rot_angle[idx])
action = actions[idx]
q = np.max(
out[
action[0][0] + IMAGE_PAD_WIDTH - 3 : action[0][0] + IMAGE_PAD_WIDTH + 4,
action[0][1] + IMAGE_PAD_WIDTH - 3 : action[0][1] + IMAGE_PAD_WIDTH + 4,
]
)
out_q.append(q)
return out_q
@torch.no_grad()
def from_maskrcnn(model, color_image, device, plot=False):
"""
Use Mask R-CNN to do instance segmentation and output masks in binary format.
Assume it works in real world
"""
image = color_image.copy()
image = TF.to_tensor(image)
prediction = model([image.to(device)])[0]
final_mask = np.zeros((720, 1280), dtype=np.uint8)
labels = {}
if plot:
pred_mask = np.zeros((720, 1280), dtype=np.uint8)
for idx, mask in enumerate(prediction["masks"]):
# TODO, 0.9 can be tuned
threshold = 0.7
if prediction["scores"][idx] > threshold:
# get mask
img = mask[0].mul(255).byte().cpu().numpy()
# img = cv2.GaussianBlur(img, (3, 3), 0)
img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# too small
if np.sum(img == 255) < 100:
continue
# overlap IoU 70%
if np.sum(np.logical_and(final_mask > 0, img == 255)) > np.sum(img == 255) * 3 / 4:
continue
fill_pixels = np.logical_and(final_mask == 0, img == 255)
final_mask[fill_pixels] = idx + 1
labels[(idx + 1)] = prediction["labels"][idx].cpu().item()
if plot:
pred_mask[img > 0] = prediction["labels"][idx].cpu().item() * 10
cv2.imwrite(str(idx) + "mask.png", img)
if plot:
cv2.imwrite("pred.png", pred_mask)
print("Mask R-CNN: %d objects detected" % (len(np.unique(final_mask)) - 1), prediction["scores"].cpu())
return final_mask, labels
| 42,442 | 42.48668 | 107 | py |
more | more-main/train_foreground.py | import torch
from models import reinforcement_net
from dataset import ForegroundDataset
import argparse
import time
import datetime
import os
from constants import PUSH_Q, GRASP_Q, NUM_ROTATION
from torch.utils.tensorboard import SummaryWriter
import log_utils
import torch_utils
def parse_args():
default_params = {
"lr": 1e-6,
"batch_size": 16,
"t_0": 5, # CosineAnnealing, start 1 6 16 36 76
"t_mult": 2, # CosineAnnealing, period 5 10 20 40
"eta_min": 1e-15, # CosineAnnealing, minimum lr
"epochs": 36, # CosineAnnealing, should end before warm start
"loss_beta": 1,
"num_rotation": NUM_ROTATION,
}
parser = argparse.ArgumentParser(description="Train foreground")
parser.add_argument(
"--lr",
action="store",
type=float,
default=default_params["lr"],
help="Enter the learning rate",
)
parser.add_argument(
"--batch_size",
action="store",
default=default_params["batch_size"],
type=int,
help="Enter the batchsize for training and testing",
)
parser.add_argument(
"--t_0",
action="store",
default=default_params["t_0"],
type=int,
help="The t_0 of CosineAnnealing",
)
parser.add_argument(
"--t_mult",
action="store",
default=default_params["t_mult"],
type=int,
help="The t_mult of CosineAnnealing",
)
parser.add_argument(
"--eta_min",
action="store",
default=default_params["eta_min"],
type=float,
help="The eta_min of CosineAnnealing",
)
parser.add_argument(
"--epochs",
action="store",
default=default_params["epochs"],
type=int,
help="Enter the epoch for training",
)
parser.add_argument(
"--loss_beta",
action="store",
default=default_params["loss_beta"],
type=int,
help="The beta of SmoothL1Loss",
)
parser.add_argument(
"--num_rotation",
action="store",
default=default_params["num_rotation"],
type=int,
help="Number of rotation",
)
parser.add_argument("--dataset_root", action="store", help="Enter the path to the dataset")
parser.add_argument(
"--pretrained_model", action="store", help="The path to the pretrained model"
)
parser.add_argument(
"--test", action="store_true", default=False, help="Testing and visualizing"
)
args = parser.parse_args()
return args
class ForegroundTrainer:
def __init__(self, args):
self.params = {
"lr": args.lr,
"batch_size": args.batch_size,
"t_0": args.t_0, # CosineAnnealing, start 0 4 12 28
"t_mult": args.t_mult, # CosineAnnealing, period 4 8 16
"eta_min": args.eta_min, # CosineAnnealing, minimum lr
"epochs": args.epochs, # CosineAnnealing, should end before warm start
"loss_beta": args.loss_beta,
"num_rotation": args.num_rotation,
}
self.dataset_root = args.dataset_root
self.pretrained_model = args.pretrained_model
self.test = args.test
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if not self.test:
self.log_dir = os.path.join(self.dataset_root, "runs")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
timestamp_value = datetime.datetime.fromtimestamp(time.time())
time_name = timestamp_value.strftime("%Y-%m-%d-%H-%M")
self.log_dir = os.path.join(self.log_dir, time_name)
self.tb_logger = SummaryWriter(self.log_dir)
self.logger = log_utils.setup_logger(self.log_dir, "Foreground")
def main(self):
model = reinforcement_net(True)
model = model.to(self.device)
criterion_push = torch.nn.SmoothL1Loss(beta=self.params["loss_beta"], reduction="none")
criterion_grasp = torch.nn.SmoothL1Loss(beta=self.params["loss_beta"], reduction="none")
optimizer = torch.optim.SGD(
[p for p in model.parameters() if p.requires_grad],
lr=args.lr,
momentum=0.9,
weight_decay=2e-5,
)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer,
T_0=self.params["t_0"],
T_mult=self.params["t_mult"],
eta_min=self.params["eta_min"],
last_epoch=-1,
verbose=False,
)
start_epoch = 0
if self.pretrained_model is not None:
checkpoint = torch.load(self.pretrained_model)
model.load_state_dict(checkpoint["model"], strict=False)
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
start_epoch = checkpoint["epoch"] + 1
# prev_params = checkpoint["params"]
if self.test:
data_loader = self._get_data_loader("test", 1, shuffle=False, test=True)
criterion = torch.nn.SmoothL1Loss(reduction="none")
self._test(model, data_loader)
else:
self.logger.info(f"Hyperparameters: {self.params}")
if self.pretrained_model is not None:
self.logger.info(f"Start from the pretrained model: {self.pretrained_model}")
# self.logger.info(f"Previous Hyperparameters: {prev_params}")
data_loader_train = self._get_data_loader(
"train", self.params["batch_size"], shuffle=True
)
data_loader_test = self._get_data_loader("test", max(1, self.params["batch_size"] // 2))
for epoch in range(start_epoch, self.params["epochs"]):
# warmup start
if epoch == 0:
warmup_factor = 0.001
warmup_iters = min(1000, len(data_loader_train) - 1)
current_lr_scheduler = torch_utils.warmup_lr_scheduler(
optimizer, warmup_iters, warmup_factor
)
else:
current_lr_scheduler = lr_scheduler
train_loss = self._train_one_epoch(
model,
criterion_push,
criterion_grasp,
optimizer,
data_loader_train,
current_lr_scheduler,
epoch,
)
evaluate_loss = self._evaluate(
model, criterion_push, criterion_grasp, data_loader_test
)
save_state = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"params": self.params,
}
torch.save(save_state, os.path.join(self.log_dir, f"foreground_model-{epoch}.pth"))
self.tb_logger.add_scalars(
"Epoch_Loss", {"train": train_loss, "test": evaluate_loss}, epoch
)
self.tb_logger.flush()
self.tb_logger.add_hparams(
self.params, {"hparam/train": train_loss, "hparam/test": evaluate_loss}
)
self.logger.info("Training completed!")
def _train_one_epoch(
self,
model,
criterion_push,
criterion_grasp,
optimizer,
data_loader,
lr_scheduler,
epoch,
print_freq=50,
):
model.train()
metric_logger = log_utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", log_utils.SmoothedValue(window_size=1, fmt="{value:.12f}"))
metric_logger.add_meter("loss", log_utils.SmoothedValue())
metric_logger.add_meter("grasp_loss", log_utils.SmoothedValue())
metric_logger.add_meter("push_loss", log_utils.SmoothedValue())
header = "Epoch: [{}]".format(epoch)
losses = []
n_iter = 0
total_iters = len(data_loader)
for (color_images, depth_images, push_targets, grasp_targets) in metric_logger.log_every(
data_loader, print_freq, self.logger, header
):
color_images = color_images.to(self.device, non_blocking=True)
depth_images = depth_images.to(self.device, non_blocking=True)
push_targets = push_targets.to(self.device, non_blocking=True)
grasp_targets = grasp_targets.to(self.device, non_blocking=True)
output = model(color_images, depth_images, use_push=False)
weights_push = torch.ones(push_targets.shape)
weights_grasp = torch.ones(grasp_targets.shape)
weights_push[push_targets > 0] = 2
weights_grasp[grasp_targets > 0] = 2
loss_push = criterion_push(output[0], push_targets) * weights_push.cuda()
loss_push = loss_push.sum() / push_targets.size(0)
loss_grasp = criterion_grasp(output[1], grasp_targets) * weights_grasp.cuda()
loss_grasp = loss_grasp.sum() / grasp_targets.size(0)
optimizer.zero_grad()
if epoch != 0:
loss_push.backward()
loss_grasp.backward()
loss = loss_push + loss_grasp
optimizer.step()
# log
log_loss = loss.item()
log_loss_push = loss_push.item()
log_loss_grasp = loss_grasp.item()
log_lr = optimizer.param_groups[0]["lr"]
metric_logger.update(
loss=log_loss, lr=log_lr, grasp_loss=log_loss_grasp, push_loss=log_loss_push
)
self.tb_logger.add_scalar("Step/Loss/Train", log_loss, total_iters * epoch + n_iter)
self.tb_logger.add_scalar(
"Step/Loss/Train/Push", log_loss, total_iters * epoch + n_iter
)
self.tb_logger.add_scalar(
"Step/Loss/Train/Grasp", log_loss, total_iters * epoch + n_iter
)
self.tb_logger.add_scalar("Step/LR", log_lr, total_iters * epoch + n_iter)
losses.append(log_loss)
if epoch == 0:
lr_scheduler.step()
n_iter += 1
if epoch != 0:
lr_scheduler.step(epoch)
return sum(losses) / len(losses)
@torch.no_grad()
def _evaluate(self, model, criterion_push, criterion_grasp, data_loader, print_freq=10):
model.eval()
metric_logger = log_utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("loss", log_utils.SmoothedValue(window_size=len(data_loader)))
metric_logger.add_meter("grasp_loss", log_utils.SmoothedValue())
metric_logger.add_meter("push_loss", log_utils.SmoothedValue())
losses = []
header = "Test:"
for (color_images, depth_images, push_targets, grasp_targets) in metric_logger.log_every(
data_loader, print_freq, self.logger, header
):
color_images = color_images.to(self.device, non_blocking=True)
depth_images = depth_images.to(self.device, non_blocking=True)
push_targets = push_targets.to(self.device, non_blocking=True)
grasp_targets = grasp_targets.to(self.device, non_blocking=True)
output = model(color_images, depth_images, use_push=False)
weights_push = torch.ones(push_targets.shape)
weights_grasp = torch.ones(grasp_targets.shape)
weights_push[push_targets > 0] = 2
weights_grasp[grasp_targets > 0] = 2
loss_push = criterion_push(output[0], push_targets) * weights_push.cuda()
loss_push = loss_push.sum() / push_targets.size(0)
loss_grasp = criterion_grasp(output[1], grasp_targets) * weights_grasp.cuda()
loss_grasp = loss_grasp.sum() / grasp_targets.size(0)
loss = loss_push + loss_grasp
log_loss = loss.item()
log_loss_push = loss_push.item()
log_loss_grasp = loss_grasp.item()
metric_logger.update(loss=log_loss, grasp_loss=log_loss_grasp, push_loss=log_loss_push)
losses.append(log_loss)
return sum(losses) / len(losses)
def _get_data_loader(self, folder, batch_size, shuffle=False, test=False):
"""Get data loader."""
path = os.path.join(self.dataset_root, folder)
dataset = ForegroundDataset(path, self.params["num_rotation"])
if not test:
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, num_workers=4, drop_last=False
)
else:
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0, drop_last=False
)
return data_loader
@torch.no_grad()
def _test(self, model, data_loader):
import torchvision
import matplotlib.pyplot as plt
from PIL import Image, ImageStat
torch.manual_seed(1)
model.eval()
ite = iter(data_loader)
for _ in range(5):
color_img_pil, depth_img_pil, push_target_img_pil, grasp_target_img_pil = next(ite)
color_img_pil_train = color_img_pil.to(self.device)
depth_img_pil_train = depth_img_pil.to(self.device)
outputs = model(color_img_pil_train, depth_img_pil_train)
push = outputs[0][0].cpu()
grasp = outputs[1][0].cpu()
push *= 1 / PUSH_Q
push[push > 1] = 1
push[push < 0] = 0
grasp *= 1 / GRASP_Q
grasp[grasp > 1] = 1
grasp[grasp < 0] = 0
new_push = push.clone()
new_grasp = grasp.clone()
new_push[new_push > 0.5] = 1
new_push[new_push <= 0.5] = 0
new_grasp[new_grasp > 0.5] = 1
new_grasp[new_grasp <= 0.5] = 0
to_pil = torchvision.transforms.ToPILImage()
img1 = to_pil(color_img_pil[0])
img2 = to_pil(depth_img_pil[0])
img3 = to_pil(push_target_img_pil[0])
img4 = to_pil(grasp_target_img_pil[0])
img5 = to_pil(push)
img6 = to_pil(grasp)
img7 = to_pil(new_push)
img8 = to_pil(new_grasp)
titles = [
"Color",
"Depth",
"Target_push",
"Target_grasp",
"predicted push",
"predicted grasp",
"binary predicted push",
"binary predicted grasp",
]
images = [img1, img2, img3, img4, img5, img6, img7, img8]
for i in range(len(images)):
plt.subplot(2, 4, i + 1), plt.imshow(images[i], "gray")
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
if __name__ == "__main__":
args = parse_args()
trainer = ForegroundTrainer(args)
trainer.main()
# def get_data_loader(dataset_root, batch_size):
# # use our dataset and defined transformations
# dataset = ForegroundDataset(dataset_root, 16)
# # dataset_test = ForegroundDataset(dataset_root, 16)
# # split the dataset in train and test set
# indices = torch.randperm(len(dataset)).tolist()
# start_point = 5
# dataset = torch.utils.data.Subset(dataset, indices[start_point:])
# dataset_test = torch.utils.data.Subset(dataset, indices[:start_point])
# # define training and validation data loaders
# data_loader = torch.utils.data.DataLoader(
# dataset, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True
# )
# data_loader_test = torch.utils.data.DataLoader(
# dataset_test, batch_size=batch_size, shuffle=False, num_workers=1
# )
# return data_loader, data_loader_test
# def train_one_epoch(
# model,
# criterion_push,
# criterion_grasp,
# optimizer,
# data_loader,
# device,
# epoch,
# print_freq,
# resume=False,
# ):
# """
# https://github.com/pytorch/vision/blob/master/references/detection/engine.py
# """
# model.train()
# metric_logger = utils.MetricLogger(delimiter=" ")
# metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.8f}"))
# header = "Epoch: [{}]".format(epoch)
# lr_scheduler = None
# if epoch == 0 and not resume:
# warmup_factor = 1.0 / 1000
# warmup_iters = min(1000, len(data_loader) - 1)
# lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
# for color_images, depth_images, push_targets, grasp_targets in metric_logger.log_every(
# data_loader, print_freq, header
# ):
# color_images = color_images.to(device)
# depth_images = depth_images.to(device)
# push_targets = push_targets.to(device)
# grasp_targets = grasp_targets.to(device)
# optimizer.zero_grad()
# output_probs = model(color_images, depth_images)
# weights = torch.ones(grasp_targets.shape)
# # if it doesn't converge, just restart, expecting the loss to below 60. it should below 100 very soon
# weights[grasp_targets > 0] = 2
# loss1 = criterion_push(output_probs[0], push_targets)
# loss1 = loss1.sum() / push_targets.size(0)
# loss1.backward()
# loss2 = criterion_grasp(output_probs[1], grasp_targets) * weights.cuda()
# loss2 = loss2.sum() / grasp_targets.size(0)
# loss2.backward()
# losses = loss1 + loss2
# optimizer.step()
# if lr_scheduler is not None:
# lr_scheduler.step()
# metric_logger.update(loss=losses.cpu())
# metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# return metric_logger
# def main(args):
# data_loader, data_loader_test = get_data_loader(
# args.dataset_root, args.batch_size, args.fine_tuning_num
# )
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# model = reinforcement_net(True) # TODO: remove use_cuda in model, replace with device
# if args.resume:
# # model.load_state_dict(torch.load('data/pre_train/foreground_model.pth'))
# model.load_state_dict(torch.load(os.path.join(args.dataset_root, "foreground_model.pth")))
# criterion_push = torch.nn.SmoothL1Loss(reduction="none")
# criterion_grasp = torch.nn.SmoothL1Loss(reduction="none")
# # criterion_push = torch.nn.BCEWithLogitsLoss()
# # criterion_grasp = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(5))
# # construct an optimizer
# params = [p for p in model.parameters() if p.requires_grad]
# optimizer = torch.optim.SGD(params, lr=args.lr, momentum=0.9, weight_decay=2e-5)
# # optimizer = torch.optim.SGD(params, lr=1e-4, momentum=0.9, weight_decay=2e-5)
# # and a learning rate scheduler which decreases the learning rate by 10x every 1 epochs
# # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.5)
# # for large dataset
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=9, gamma=0.5)
# # for small dataset, expect ~ 50 epochs
# # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
# for epoch in range(args.epochs):
# # train for one epoch, printing every 10 iterations
# train_one_epoch(
# model,
# criterion_push,
# criterion_grasp,
# optimizer,
# data_loader,
# device,
# epoch,
# print_freq=20,
# resume=args.resume,
# )
# # update the learning rate
# lr_scheduler.step()
# # evaluate on the test dataset
# # evaluate(model, criterion, data_loader_test, device=device)
# torch.save(model.state_dict(), os.path.join(args.dataset_root, "foreground_model.pth"))
# @torch.no_grad()
# def test():
# import torchvision
# import matplotlib.pyplot as plt
# from PIL import Image, ImageStat
# torch.manual_seed(2)
# # data_loader, data_loader_test = get_data_loader('data/pre_train/', 1)
# data_loader, data_loader_test = get_data_loader("logs/real-maskrcnn/data", 1)
# # data_loader, data_loader_test = get_data_loader('logs/final-pretrain/data', 1)
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# model = reinforcement_net(True)
# # model.load_state_dict(torch.load('data/pre_train/foreground_model.pth'))
# model.load_state_dict(torch.load("logs/random-pretrain/data/foreground_model.pth"))
# # model.load_state_dict(torch.load('logs/real-maskrcnn/data/foreground_model.pth'))
# # model.load_state_dict(torch.load('logs_push/final/data/foreground_model.pth'))
# model.eval().to(device)
# sig = torch.nn.Sigmoid()
# ite = iter(data_loader)
# for _ in range(6):
# color_img_pil, depth_img_pil, push_target_img_pil, grasp_target_img_pil = next(ite)
# color_img_pil_train = color_img_pil.to(device)
# depth_img_pil_train = depth_img_pil.to(device)
# outputs = model(color_img_pil_train, depth_img_pil_train)
# # push = sig(outputs[0][0]).cpu()
# # grasp = sig(outputs[1][0]).cpu()
# push = outputs[0][0].cpu()
# grasp = outputs[1][0].cpu()
# push *= 1 / PUSH_Q
# push[push > 1] = 1
# push[push < 0] = 0
# grasp *= 1 / GRASP_Q
# grasp[grasp > 1] = 1
# grasp[grasp < 0] = 0
# new_push = push.clone()
# new_grasp = grasp.clone()
# new_push[new_push > 0.5] = 1
# new_push[new_push <= 0.5] = 0
# new_grasp[new_grasp > 0.5] = 1
# new_grasp[new_grasp <= 0.5] = 0
# to_pil = torchvision.transforms.ToPILImage()
# img1 = to_pil(color_img_pil[0])
# img2 = to_pil(depth_img_pil[0])
# img3 = to_pil(push_target_img_pil[0])
# img4 = to_pil(grasp_target_img_pil[0])
# img5 = to_pil(push)
# img6 = to_pil(grasp)
# img7 = to_pil(new_push)
# img8 = to_pil(new_grasp)
# titles = [
# "Color",
# "Depth",
# "Target_push",
# "Target_grasp",
# "predicted push",
# "predicted grasp",
# "binary predicted push",
# "binary predicted grasp",
# ]
# images = [img1, img2, img3, img4, img5, img6, img7, img8]
# for i in range(len(images)):
# plt.subplot(2, 4, i + 1), plt.imshow(images[i], "gray")
# plt.title(titles[i])
# plt.xticks([]), plt.yticks([])
# plt.show()
# # plt.savefig('test_pre.png')
# if __name__ == "__main__":
# parser = argparse.ArgumentParser(description="Train foreground")
# parser.add_argument(
# "--dataset_root", dest="dataset_root", action="store", help="Enter the path to the dataset"
# )
# parser.add_argument(
# "--epochs",
# dest="epochs",
# action="store",
# type=int,
# default=30,
# help="Enter the epoch for training",
# )
# parser.add_argument(
# "--batch_size",
# dest="batch_size",
# action="store",
# type=int,
# default=16,
# help="Enter the batchsize for training and testing",
# )
# parser.add_argument(
# "--test", dest="test", action="store_true", default=False, help="Testing and visualizing"
# )
# parser.add_argument(
# "--lr", dest="lr", action="store", type=float, default=1e-6, help="Enter the learning rate"
# )
# parser.add_argument(
# "--real_fine_tuning", dest="real_fine_tuning", action="store_true", default=False, help=""
# )
# parser.add_argument(
# "--fine_tuning_num",
# dest="fine_tuning_num",
# action="store",
# type=int,
# default=16500,
# help="1500 action, one action contains 11 images",
# )
# parser.add_argument(
# "--resume",
# dest="resume",
# action="store_true",
# default=False,
# help="Enter the path to the dataset",
# )
# args = parser.parse_args()
# if args.resume:
# args.epochs = 10
# else:
# args.fine_tuning_num = None
# if args.test:
# test()
# else:
# main(args) | 24,462 | 35.241481 | 111 | py |
more | more-main/push_predictor.py | import copy
import torch
import gc
import numpy as np
import cv2
from torchvision.transforms import functional as TF
import math
from push_net import PushPredictionNet
from models import reinforcement_net
from train_maskrcnn import get_model_instance_segmentation
from dataset import PushPredictionMultiDatasetEvaluation
from constants import (
DEPTH_MIN,
TARGET_LOWER,
TARGET_UPPER,
IS_REAL,
IMAGE_SIZE,
IMAGE_PAD_WIDTH,
PUSH_DISTANCE,
COLOR_MEAN,
COLOR_STD,
DEPTH_MEAN,
DEPTH_STD,
NUM_ROTATION,
GRIPPER_GRASP_INNER_DISTANCE_PIXEL,
GRIPPER_GRASP_WIDTH_PIXEL,
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
IMAGE_PAD_WIDTH,
IMAGE_PAD_DIFF,
PIXEL_SIZE,
IMAGE_PAD_SIZE,
PUSH_BUFFER,
GRASP_Q_GRASP_THRESHOLD,
BG_THRESHOLD,
COLOR_SPACE
)
from action_utils_mask import sample_actions as sample_actions_util
import imutils
import utils
class PushPredictor:
"""
Predict and generate images after push actions.
Assume the color image and depth image are well matched.
We use the masks to generate new images, so the quality of mask is important.
The input to this forward function should be returned from the sample_actions.
"""
def __init__(self):
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Initialize Mask R-CNN
self.mask_model = get_model_instance_segmentation(2)
self.mask_model.load_state_dict(torch.load("logs_image/maskrcnn.pth"))
self.mask_model = self.mask_model.to(self.device)
self.mask_model.eval()
# Initialize Push Prediction
self.push_model = PushPredictionNet()
self.push_model.load_state_dict(
torch.load("logs_push/push_prediction_model-75.pth")["model"]
)
self.push_model = self.push_model.to(self.device)
self.push_model.eval()
# Initialize Grasp Q Evaluation
self.grasp_model = reinforcement_net()
self.grasp_model.load_state_dict(
torch.load("logs_grasp/snapshot-post-020000.reinforcement.pth")["model"]
)
self.grasp_model = self.grasp_model.to(self.device)
self.grasp_model.eval()
self.move_recorder = {}
self.prediction_recorder = {}
def reset(self):
del self.move_recorder
del self.prediction_recorder
gc.collect()
self.move_recorder = {}
self.prediction_recorder = {}
@torch.no_grad()
def from_maskrcnn(self, color_image, depth_image, plot=False):
"""
Use Mask R-CNN to do instance segmentation and output masks in binary format.
"""
image = color_image.copy()
image = TF.to_tensor(image)
prediction = self.mask_model([image.to(self.device)])[0]
mask_objs = []
centers = []
blue_idx = -1
if plot:
pred_mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE), dtype=np.uint8)
for idx, mask in enumerate(prediction["masks"]):
# TODO, 0.9 can be tuned
if IS_REAL:
threshold = 0.97
else:
threshold = 0.98
if prediction["scores"][idx] > threshold:
# get mask
img = mask[0].mul(255).byte().cpu().numpy()
img = cv2.GaussianBlur(img, (3, 3), 0)
img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
if np.sum(img == 255) < 100:
continue
# get center
obj_cnt = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
obj_cnt = imutils.grab_contours(obj_cnt)
obj_cnt = sorted(obj_cnt, key=lambda x: cv2.contourArea(x))[
-1
] # the mask r cnn could give bad masks
M = cv2.moments(obj_cnt)
cX = round(M["m10"] / M["m00"])
cY = round(M["m01"] / M["m00"])
# get color and depth masks
color_mask = cv2.bitwise_and(color_image, color_image, mask=img)
temp = cv2.cvtColor(color_mask, cv2.COLOR_RGB2HSV)
temp = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
if np.sum(temp == 255) >= 100:
blue_idx = idx
depth_mask = cv2.bitwise_and(depth_image, depth_image, mask=img)
# get cropped masks
color_mask = np.pad(
color_mask,
(
(IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH),
(IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH),
(0, 0),
),
"constant",
constant_values=0,
)
color_mask = color_mask[
cY + IMAGE_PAD_WIDTH - 30 : cY + IMAGE_PAD_WIDTH + 30,
cX + IMAGE_PAD_WIDTH - 30 : cX + IMAGE_PAD_WIDTH + 30,
:,
]
depth_mask = np.pad(
depth_mask,
(
(IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH),
(IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH),
),
"constant",
constant_values=0,
)
depth_mask = depth_mask[
cY + IMAGE_PAD_WIDTH - 30 : cY + IMAGE_PAD_WIDTH + 30,
cX + IMAGE_PAD_WIDTH - 30 : cX + IMAGE_PAD_WIDTH + 30,
]
final_mask = (color_mask, depth_mask)
mask_objs.append(final_mask)
centers.append([cY + IMAGE_PAD_WIDTH, cX + IMAGE_PAD_WIDTH])
if plot:
pred_mask[img > 0] = 255 - idx * 20
cv2.imwrite(str(idx) + "mask.png", img)
if plot:
cv2.imwrite("pred.png", pred_mask)
print("Mask R-CNN: %d objects detected" % len(mask_objs), prediction["scores"].cpu())
if blue_idx != -1 and blue_idx != 0:
temp = mask_objs[0]
mask_objs[0] = mask_objs[blue_idx]
mask_objs[blue_idx] = temp
temp = centers[0]
centers[0] = centers[blue_idx]
centers[blue_idx] = temp
return mask_objs, centers
def sample_actions(
self, color_image, depth_image, mask_objs, plot=False, start_pose=None, prev_move=None
):
"""
Sample actions around the objects, from the boundary to the center.
Assume there is no object in "black"
Output the rotated image, such that the push action is from left to right
"""
return sample_actions_util(
color_image,
depth_image,
mask_objs,
plot,
start_pose,
from_color=True,
prev_move=prev_move,
)
# only rotated_color_image, rotated_depth_image are padding to 320x320
@torch.no_grad()
def predict(
self,
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
rotated_mask_objs,
plot=False,
):
# get data
dataset = PushPredictionMultiDatasetEvaluation(
rotated_depth_image, rotated_action, rotated_center, rotated_binary_objs
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=len(rotated_depth_image), shuffle=False, num_workers=0
)
(
prev_poses,
action,
action_start_ori,
action_end_ori,
used_binary_img,
binary_objs_total,
num_obj,
) = next(iter(data_loader))
prev_poses = prev_poses.to(self.device, non_blocking=True)
used_binary_img = used_binary_img.to(self.device, non_blocking=True, dtype=torch.float)
binary_objs_total = binary_objs_total.to(self.device, non_blocking=True)
action = action.to(self.device, non_blocking=True)
# get output
output = self.push_model(prev_poses, action, used_binary_img, binary_objs_total, num_obj[0])
output = output.cpu().numpy()
# generate new images
prev_poses_input = prev_poses.cpu().numpy().astype(int)
prev_poses = copy.deepcopy(prev_poses_input)
action_start_ori = action_start_ori.numpy().astype(int)
action_end_ori = action_end_ori.numpy().astype(int)
action_start_ori_tile = np.tile(action_start_ori, num_obj[0])
action_start = action[:, :2].cpu().numpy().astype(int)
action_start_tile = np.tile(action_start, num_obj[0])
generated_color_images = []
generated_depth_images = []
generated_obj_masks = []
validations = []
for i in range(len(rotated_depth_image)):
i_output = output[i]
i_prev_poses = prev_poses[i]
i_action_start_ori_tile = action_start_ori_tile[i]
i_action_start_tile = action_start_tile[i]
i_prev_poses += i_action_start_ori_tile
i_prev_poses -= i_action_start_tile
i_rotated_angle = rotated_angle[i]
i_rotated_mask_objs, i_rotated_mask_obj_centers = rotated_mask_objs[i]
color_image = rotated_color_image[i]
depth_image = rotated_depth_image[i]
# transform points and fill them into a black image
generated_color_image = np.zeros_like(color_image)
generated_depth_image = np.zeros_like(depth_image)
obj_masks = []
obj_mask_centers = []
temp_obj_masks = []
temp_obj_mask_centers = []
# for each object
valid = True
for pi in range(num_obj[i]):
# if the object is out of the boundary, then, we can skip this action
center = i_rotated_mask_obj_centers[pi]
center = np.array([[center]])
M = cv2.getRotationMatrix2D(
(
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
),
-i_output[pi * 3 + 2],
1,
)
ori_M = M.copy()
M[0, 2] += i_output[pi * 3]
M[1, 2] += i_output[pi * 3 + 1]
new_center = cv2.transform(center, M)
new_center = np.transpose(new_center[0])
temp_obj_mask_centers.append(new_center)
ori_center = cv2.transform(center, ori_M)
ori_center = np.transpose(ori_center[0])
M = cv2.getRotationMatrix2D(
(IMAGE_PAD_SIZE // 2, IMAGE_PAD_SIZE // 2),
i_rotated_angle,
1,
)
new_center = [new_center[0][0], new_center[1][0]]
new_center = np.array([[new_center]])
new_center = cv2.transform(new_center, M)[0][0]
obj_mask_centers.append(new_center)
ori_center = [ori_center[0][0], ori_center[1][0]]
ori_center = np.array([[ori_center]])
ori_center = cv2.transform(ori_center, M)[0][0]
if (
new_center[1] - IMAGE_PAD_WIDTH
> IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or new_center[1] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
or new_center[0] - IMAGE_PAD_WIDTH
> IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or new_center[0] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
):
if not (
ori_center[1] - IMAGE_PAD_WIDTH
> IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or ori_center[1] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
or ori_center[0] - IMAGE_PAD_WIDTH
> IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or ori_center[0] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
):
valid = False
break
validations.append(valid)
if valid:
for pi in range(num_obj[i]):
# if the object is out of the boundary, then, we can skip this action
# if (
# i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1]
# > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1] < PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2] + i_output[pi * 3]
# > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2] + i_output[pi * 3] < PUSH_BUFFER / PIXEL_SIZE
# ):
# valid = False
# break
# find out transformation
# mask
mask_color = i_rotated_mask_objs[pi][0]
mask_depth = i_rotated_mask_objs[pi][1]
rotated_color = utils.rotate(mask_color, i_output[pi * 3 + 2])
rotated_depth = utils.rotate(mask_depth, i_output[pi * 3 + 2])
temp_obj_masks.append((rotated_color, rotated_depth))
# center
# center = i_rotated_mask_obj_centers[pi]
# center = np.array([[center]])
# M = cv2.getRotationMatrix2D(
# (
# i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
# i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
# ),
# -i_output[pi * 3 + 2],
# 1,
# )
# M[0, 2] += i_output[pi * 3]
# M[1, 2] += i_output[pi * 3 + 1]
# new_center = cv2.transform(center, M)
# new_center = np.transpose(new_center[0])
# temp_obj_mask_centers.append(new_center)
# validations.append(valid)
# if valid:
for pi in range(num_obj[i]):
mask = temp_obj_masks[pi]
new_center = temp_obj_mask_centers[pi]
color = mask[0]
fill_color = np.nonzero(np.sum(color, axis=2))
fill_color_shift = (
np.clip(fill_color[0] + new_center[0] - 30, 0, IMAGE_PAD_SIZE - 1),
np.clip(fill_color[1] + new_center[1] - 30, 0, IMAGE_PAD_SIZE - 1)
)
generated_color_image[fill_color_shift] = color[fill_color]
depth = mask[1]
fill_depth = np.nonzero(depth)
fill_depth_shift = (
np.clip(fill_depth[0] + new_center[0] - 30, 0, IMAGE_PAD_SIZE - 1),
np.clip(fill_depth[1] + new_center[1] - 30, 0, IMAGE_PAD_SIZE - 1)
)
generated_depth_image[fill_depth_shift] = depth[fill_depth]
generated_obj_mask_color = utils.rotate(color, -i_rotated_angle)
generated_obj_mask_depth = utils.rotate(depth, -i_rotated_angle)
obj_masks.append((generated_obj_mask_color, generated_obj_mask_depth))
# M = cv2.getRotationMatrix2D(
# (IMAGE_PAD_SIZE // 2, IMAGE_PAD_SIZE // 2),
# i_rotated_angle,
# 1,
# )
# new_center = [new_center[0][0], new_center[1][0]]
# new_center = np.array([[new_center]])
# new_center = cv2.transform(new_center, M)[0][0]
# obj_mask_centers.append(new_center)
if plot:
cv2.circle(
generated_color_image,
(
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
),
3,
(255, 255, 255),
-1,
)
cv2.circle(
generated_color_image,
(
round(i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1]) + IMAGE_PAD_WIDTH,
round(i_prev_poses[pi * 2] + i_output[pi * 3]) + IMAGE_PAD_WIDTH,
),
3,
(128, 255, 0),
-1,
)
if plot:
cv2.arrowedLine(
generated_color_image,
(
action_start_ori[i][1] + IMAGE_PAD_WIDTH,
action_start_ori[i][0] + IMAGE_PAD_WIDTH,
),
(
action_end_ori[i][1] + IMAGE_PAD_WIDTH,
action_end_ori[i][0] + IMAGE_PAD_WIDTH,
),
(255, 0, 255),
2,
tipLength=0.4,
)
generated_color_image = utils.rotate(generated_color_image, -i_rotated_angle)
generated_depth_image = utils.rotate(generated_depth_image, -i_rotated_angle)
generated_color_image = generated_color_image[
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, :
]
generated_depth_image = generated_depth_image[
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF
]
generated_depth_image = generated_depth_image.astype(np.float32)
generated_color_images.append(generated_color_image)
generated_depth_images.append(generated_depth_image)
generated_obj_masks.append((obj_masks, obj_mask_centers))
return generated_color_images, generated_depth_images, generated_obj_masks, validations
@torch.no_grad()
def get_grasp_q(self, color_heightmap, depth_heightmap, post_checking=False):
color_heightmap_pad = np.copy(color_heightmap)
depth_heightmap_pad = np.copy(depth_heightmap)
# use light color
if IS_REAL:
temp = cv2.cvtColor(color_heightmap_pad, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
color_heightmap_pad[mask == 255] = [118, 183, 178] # cyan
# Add extra padding (to handle rotations inside network)
color_heightmap_pad = np.pad(
color_heightmap_pad,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
# Pre-process color image (scale and normalize)
image_mean = COLOR_MEAN
image_std = COLOR_STD
input_color_image = color_heightmap_pad.astype(float) / 255
for c in range(3):
input_color_image[:, :, c] = (input_color_image[:, :, c] - image_mean[c]) / image_std[c]
# Pre-process depth image (normalize)
image_mean = DEPTH_MEAN
image_std = DEPTH_STD
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1], 1)
input_depth_image = np.copy(depth_heightmap_pad)
input_depth_image[:, :, 0] = (input_depth_image[:, :, 0] - image_mean[0]) / image_std[0]
# Construct minibatch of size 1 (b,c,h,w)
input_color_image.shape = (
input_color_image.shape[0],
input_color_image.shape[1],
input_color_image.shape[2],
1,
)
input_depth_image.shape = (
input_depth_image.shape[0],
input_depth_image.shape[1],
input_depth_image.shape[2],
1,
)
input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(
3, 2, 0, 1
)
input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(
3, 2, 0, 1
)
# Pass input data through model
output_prob = self.grasp_model(input_color_data, input_depth_data, True, -1, False)
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
if rotate_idx == 0:
grasp_predictions = (
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
:,
:,
]
)
else:
grasp_predictions = np.concatenate(
(
grasp_predictions,
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
:,
:,
],
),
axis=0,
)
# post process, only grasp one object, focus on blue object
temp = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
mask_bg = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask_bg_pad = np.pad(mask_bg, IMAGE_PAD_WIDTH, "constant", constant_values=255)
# focus on blue
for rotate_idx in range(len(grasp_predictions)):
grasp_predictions[rotate_idx][mask_pad != 255] = 0
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = grasp_predictions[0].shape[0] - IMAGE_PAD_WIDTH
# only grasp one object
kernel_big = np.ones(
(GRIPPER_GRASP_SAFE_WIDTH_PIXEL, GRIPPER_GRASP_INNER_DISTANCE_PIXEL), dtype=np.uint8
)
if IS_REAL: # due to color, depth sensor and lighting, the size of object looks a bit smaller.
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 7
threshold_small = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 15
else:
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 10
threshold_small = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 20
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1])
for rotate_idx in range(len(grasp_predictions)):
color_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
color_mask[color_mask == 0] = 1
color_mask[color_mask == 255] = 0
no_target_mask = color_mask
bg_mask = utils.rotate(mask_bg_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
no_target_mask[bg_mask == 255] = 0
# only grasp one object
invalid_mask = cv2.filter2D(no_target_mask, -1, kernel_big)
invalid_mask = utils.rotate(invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True)
grasp_predictions[rotate_idx][invalid_mask > threshold_small] = (
grasp_predictions[rotate_idx][invalid_mask > threshold_small] / 2
)
grasp_predictions[rotate_idx][invalid_mask > threshold_big] = 0
# collision checking, only work for one level
if post_checking:
mask = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask = 255 - mask
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
check_kernel = np.ones(
(GRIPPER_GRASP_WIDTH_PIXEL, GRIPPER_GRASP_OUTER_DISTANCE_PIXEL), dtype=np.uint8
)
left_bound = math.floor(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL - GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
right_bound = (
math.ceil(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL + GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
+ 1
)
check_kernel[:, left_bound:right_bound] = 0
for rotate_idx in range(len(grasp_predictions)):
object_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = cv2.filter2D(object_mask, -1, check_kernel)
invalid_mask[invalid_mask > 5] = 255
invalid_mask = utils.rotate(
invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True
)
grasp_predictions[rotate_idx][invalid_mask > 128] = 0
grasp_predictions = grasp_predictions[
:, padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
grasp_q_value = grasp_predictions[best_pix_ind]
return grasp_q_value, best_pix_ind, grasp_predictions
def get_prediction_vis(self, predictions, color_heightmap, best_pix_ind):
canvas = None
num_rotations = predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
tmp_row_canvas = None
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
prediction_vis = predictions[rotate_idx, :, :].copy()
prediction_vis = np.clip(prediction_vis, 0, 1)
prediction_vis.shape = (predictions.shape[1], predictions.shape[2])
prediction_vis = cv2.applyColorMap(
(prediction_vis * 255).astype(np.uint8), cv2.COLORMAP_JET
)
if rotate_idx == best_pix_ind[0]:
prediction_vis = cv2.circle(
prediction_vis,
(int(best_pix_ind[2]), int(best_pix_ind[1])),
7,
(0, 0, 255),
2,
)
prediction_vis = utils.rotate(prediction_vis, rotate_idx * (360.0 / num_rotations))
if rotate_idx == best_pix_ind[0]:
center = np.array([[[int(best_pix_ind[2]), int(best_pix_ind[1])]]])
M = cv2.getRotationMatrix2D(
(
prediction_vis.shape[1] // 2,
prediction_vis.shape[0] // 2,
),
rotate_idx * (360.0 / num_rotations),
1,
)
center = cv2.transform(center, M)
center = np.transpose(center[0])
prediction_vis = cv2.rectangle(
prediction_vis,
(max(0, int(center[0]) - GRIPPER_GRASP_INNER_DISTANCE_PIXEL // 2), max(0, int(center[1]) - GRIPPER_GRASP_WIDTH_PIXEL // 2)),
(min(prediction_vis.shape[1], int(center[0]) + GRIPPER_GRASP_INNER_DISTANCE_PIXEL // 2), min(prediction_vis.shape[0], int(center[1]) + GRIPPER_GRASP_WIDTH_PIXEL // 2)),
(100, 255, 0),
1
)
prediction_vis = cv2.rectangle(
prediction_vis,
(max(0, int(center[0]) - GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2), max(0, int(center[1]) - GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2)),
(min(prediction_vis.shape[1], int(center[0]) + GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2), min(prediction_vis.shape[0], int(center[1]) + GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2)),
(100, 100, 155),
1,
)
background_image = utils.rotate(
color_heightmap, rotate_idx * (360.0 / num_rotations)
)
prediction_vis = (
0.5 * cv2.cvtColor(background_image, cv2.COLOR_RGB2BGR) + 0.5 * prediction_vis
).astype(np.uint8)
if tmp_row_canvas is None:
tmp_row_canvas = prediction_vis
else:
tmp_row_canvas = np.concatenate((tmp_row_canvas, prediction_vis), axis=1)
if canvas is None:
canvas = tmp_row_canvas
else:
canvas = np.concatenate((canvas, tmp_row_canvas), axis=0)
return canvas
| 29,449 | 43.961832 | 198 | py |
more | more-main/trainer.py | import os
import numpy as np
import math
import cv2
import torch
from torch.autograd import Variable
from models import reinforcement_net
from scipy import ndimage
from constants import (
COLOR_MEAN,
COLOR_STD,
DEPTH_MEAN,
DEPTH_STD,
DEPTH_MIN,
IMAGE_PAD_WIDTH,
NUM_ROTATION,
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
GRIPPER_GRASP_INNER_DISTANCE_PIXEL,
TARGET_LOWER,
TARGET_UPPER,
BG_THRESHOLD,
GRIPPER_GRASP_WIDTH_PIXEL,
IMAGE_PAD_SIZE,
IMAGE_PAD_DIFF,
IMAGE_SIZE,
)
import utils
class Trainer(object):
def __init__(
self,
method,
push_rewards,
future_reward_discount,
is_testing,
load_snapshot,
snapshot_file,
force_cpu,
is_baseline=False,
):
self.method = method
self.is_baseline = is_baseline
# Check if CUDA can be used
if torch.cuda.is_available() and not force_cpu:
print("CUDA detected. Running with GPU acceleration.")
self.use_cuda = True
elif force_cpu:
print("CUDA detected, but overriding with option '--cpu'. Running with only CPU.")
self.use_cuda = False
else:
print("CUDA is *NOT* detected. Running with only CPU.")
self.use_cuda = False
# Fully convolutional classification network for supervised learning
if self.method == "reinforcement":
self.model = reinforcement_net()
self.push_rewards = push_rewards
self.future_reward_discount = future_reward_discount
# Initialize Huber loss
self.push_criterion = torch.nn.SmoothL1Loss(reduction="none") # Huber loss
self.grasp_criterion = torch.nn.SmoothL1Loss(reduction="none") # Huber loss
if self.use_cuda:
self.push_criterion = self.push_criterion.cuda()
self.grasp_criterion = self.grasp_criterion.cuda()
# Load pre-trained model
if load_snapshot:
states = torch.load(snapshot_file)
if "model" in states:
self.model.load_state_dict(states["model"])
else:
self.model.load_state_dict(states)
print("Pre-trained model snapshot loaded from: %s" % (snapshot_file))
# Convert model from CPU to GPU
if self.use_cuda:
self.model = self.model.cuda()
# Set model to training mode
self.model.train()
# Initialize optimizer
self.iteration = 0
if is_testing:
if is_baseline:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=1e-5, momentum=0.9, weight_decay=2e-5
)
else:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=1e-5, momentum=0.9, weight_decay=2e-5
)
else:
if is_baseline:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=1e-5, momentum=0.9, weight_decay=2e-5
)
else:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=5e-5, momentum=0.9, weight_decay=2e-5
)
if is_baseline:
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=5000, gamma=0.5
)
else:
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=2000, gamma=0.5
)
# Initialize lists to save execution info and RL variables
self.executed_action_log = []
self.label_value_log = []
self.reward_value_log = []
self.predicted_value_log = []
self.use_heuristic_log = []
self.is_exploit_log = []
self.clearance_log = []
self.loss_log = []
if is_testing:
self.batch_size = 2
else:
self.batch_size = 12
self.loss_list = []
# Pre-load execution info and RL variables
def preload(self, transitions_directory):
self.executed_action_log = np.loadtxt(
os.path.join(transitions_directory, "executed-action.log.txt"), delimiter=" "
)
self.iteration = self.executed_action_log.shape[0] - 2
self.executed_action_log = self.executed_action_log[0 : self.iteration, :]
self.executed_action_log = self.executed_action_log.tolist()
self.label_value_log = np.loadtxt(
os.path.join(transitions_directory, "label-value.log.txt"), delimiter=" "
)
self.label_value_log = self.label_value_log[0 : self.iteration]
self.label_value_log.shape = (self.iteration, 1)
self.label_value_log = self.label_value_log.tolist()
self.predicted_value_log = np.loadtxt(
os.path.join(transitions_directory, "predicted-value.log.txt"), delimiter=" "
)
self.predicted_value_log = self.predicted_value_log[0 : self.iteration]
self.predicted_value_log.shape = (self.iteration, 1)
self.predicted_value_log = self.predicted_value_log.tolist()
self.reward_value_log = np.loadtxt(
os.path.join(transitions_directory, "reward-value.log.txt"), delimiter=" "
)
self.reward_value_log = self.reward_value_log[0 : self.iteration]
self.reward_value_log.shape = (self.iteration, 1)
self.reward_value_log = self.reward_value_log.tolist()
self.use_heuristic_log = np.loadtxt(
os.path.join(transitions_directory, "use-heuristic.log.txt"), delimiter=" "
)
self.use_heuristic_log = self.use_heuristic_log[0 : self.iteration]
self.use_heuristic_log.shape = (self.iteration, 1)
self.use_heuristic_log = self.use_heuristic_log.tolist()
self.is_exploit_log = np.loadtxt(
os.path.join(transitions_directory, "is-exploit.log.txt"), delimiter=" "
)
self.is_exploit_log = self.is_exploit_log[0 : self.iteration]
self.is_exploit_log.shape = (self.iteration, 1)
self.is_exploit_log = self.is_exploit_log.tolist()
self.clearance_log = np.loadtxt(
os.path.join(transitions_directory, "clearance.log.txt"), delimiter=" "
)
self.clearance_log.shape = (self.clearance_log.shape[0], 1)
self.clearance_log = self.clearance_log.tolist()
# Compute forward pass through model to compute affordances/Q
def forward(
self,
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=-1,
use_push=True,
):
color_heightmap_pad = np.copy(color_heightmap)
depth_heightmap_pad = np.copy(depth_heightmap)
# Add extra padding (to handle rotations inside network)
color_heightmap_pad = np.pad(
color_heightmap_pad,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
# Pre-process color image (scale and normalize)
image_mean = COLOR_MEAN
image_std = COLOR_STD
input_color_image = color_heightmap_pad.astype(float) / 255
for c in range(3):
input_color_image[:, :, c] = (input_color_image[:, :, c] - image_mean[c]) / image_std[c]
# Pre-process depth image (normalize)
image_mean = DEPTH_MEAN
image_std = DEPTH_STD
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1], 1)
input_depth_image = np.copy(depth_heightmap_pad)
input_depth_image[:, :, 0] = (input_depth_image[:, :, 0] - image_mean[0]) / image_std[0]
# Construct minibatch of size 1 (b,c,h,w)
input_color_image.shape = (
input_color_image.shape[0],
input_color_image.shape[1],
input_color_image.shape[2],
1,
)
input_depth_image.shape = (
input_depth_image.shape[0],
input_depth_image.shape[1],
input_depth_image.shape[2],
1,
)
input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(
3, 2, 0, 1
)
input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(
3, 2, 0, 1
)
# Pass input data through model
output_prob = self.model(
input_color_data, input_depth_data, is_volatile, specific_rotation, use_push
)
if self.method == "reinforcement":
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
if rotate_idx == 0:
if not use_push:
push_predictions = 0
grasp_predictions = (
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
]
)
else:
push_predictions = (
output_prob[rotate_idx][0]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
]
)
grasp_predictions = (
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
]
)
else:
if not use_push:
push_predictions = 0
grasp_predictions = np.concatenate(
(
grasp_predictions,
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
],
),
axis=0,
)
else:
push_predictions = np.concatenate(
(
push_predictions,
output_prob[rotate_idx][0]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
],
),
axis=0,
)
grasp_predictions = np.concatenate(
(
grasp_predictions,
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
],
),
axis=0,
)
return push_predictions, grasp_predictions
def focus_on_target(
self, color_heightmap, depth_heightmap, grasp_predictions, target_lower, target_upper
):
"""Should match push_predictor"""
# focus on target object
temp = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
mask_bg = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask_bg_pad = np.pad(mask_bg, IMAGE_PAD_WIDTH, "constant", constant_values=255)
# focus on blue
for rotate_idx in range(len(grasp_predictions)):
grasp_predictions[rotate_idx][mask != 255] = 0
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = grasp_predictions[0].shape[0] + IMAGE_PAD_WIDTH
# only grasp one object
kernel_big = np.ones(
(GRIPPER_GRASP_SAFE_WIDTH_PIXEL, GRIPPER_GRASP_INNER_DISTANCE_PIXEL), dtype=np.float32
)
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 10
threshold_small = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 20
depth_heightmap_pad = np.pad(
depth_heightmap, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
for rotate_idx in range(len(grasp_predictions)):
color_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
color_mask[color_mask == 0] = 1
color_mask[color_mask == 255] = 0
no_target_mask = color_mask
bg_mask = utils.rotate(mask_bg_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
no_target_mask[bg_mask == 255] = 0
# only grasp one object
invalid_mask = cv2.filter2D(no_target_mask, -1, kernel_big)
invalid_mask = utils.rotate(invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = invalid_mask[
padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
grasp_predictions[rotate_idx][invalid_mask > threshold_small] = (
grasp_predictions[rotate_idx][invalid_mask > threshold_small] / 2
)
grasp_predictions[rotate_idx][invalid_mask > threshold_big] = 0
# collision checking
mask = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask = 255 - mask
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
check_kernel = np.ones(
(GRIPPER_GRASP_WIDTH_PIXEL, GRIPPER_GRASP_OUTER_DISTANCE_PIXEL), dtype=np.uint8
)
left_bound = math.ceil(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL - GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
right_bound = (
math.ceil((GRIPPER_GRASP_OUTER_DISTANCE_PIXEL + GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2)
+ 1
)
check_kernel[:, left_bound:right_bound] = 0
for rotate_idx in range(len(grasp_predictions)):
object_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = cv2.filter2D(object_mask, -1, check_kernel)
invalid_mask[invalid_mask > 5] = 255
invalid_mask = utils.rotate(invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = invalid_mask[
padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
grasp_predictions[rotate_idx][invalid_mask > 128] = 0
return grasp_predictions
def get_label_value(
self,
primitive_action,
push_success,
grasp_success,
change_detected,
prev_push_predictions,
prev_grasp_predictions,
next_color_heightmap,
next_depth_heightmap,
prev_depth_heightmap,
use_push=True,
):
if self.method == "reinforcement":
# Compute current reward
current_reward = 0
if primitive_action == "push":
if change_detected:
current_reward = 0.0
elif primitive_action == "grasp":
if grasp_success:
current_reward = 1.0
# Compute future reward
if not change_detected and not grasp_success:
future_reward = 0
else:
future_reward = 0 # no future reward
if primitive_action == "push":
_, next_grasp_predictions = self.forward(
next_color_heightmap,
next_depth_heightmap,
is_volatile=True,
use_push=use_push,
)
if np.max(next_grasp_predictions) > np.max(prev_grasp_predictions) * 1.1:
current_reward = (
np.max(next_grasp_predictions) + np.max(prev_grasp_predictions)
) / 2
else:
future_reward = 0
print(
"Prediction:",
np.max(prev_grasp_predictions),
np.max(next_grasp_predictions),
)
delta_area = self.push_change_area(prev_depth_heightmap, next_depth_heightmap)
if delta_area > 300: # 300 can be changed
if current_reward < 0.8:
current_reward = 0.8
elif delta_area < -100: # -100 can be changed
current_reward = 0
future_reward = 0
print("Current reward: %f" % (current_reward))
print("Future reward: %f" % (future_reward))
if primitive_action == "push" and not self.push_rewards:
expected_reward = self.future_reward_discount * future_reward
print(
"Expected reward: %f + %f x %f = %f"
% (0.0, self.future_reward_discount, future_reward, expected_reward)
)
else:
expected_reward = current_reward + self.future_reward_discount * future_reward
print(
"Expected reward: %f + %f x %f = %f"
% (current_reward, self.future_reward_discount, future_reward, expected_reward)
)
return expected_reward, current_reward
def get_label_value_base(
self,
primitive_action,
push_success,
grasp_success,
change_detected,
prev_push_predictions,
prev_grasp_predictions,
next_color_heightmap,
next_depth_heightmap,
use_push=True,
target=None,
prev_color_img=None,
prev_depth_img=None,
):
"""As baseline label value function"""
if self.method == "reinforcement":
# Compute current reward
current_reward = 0
if primitive_action == "push":
if change_detected:
current_reward = 0.0
elif primitive_action == "grasp":
if grasp_success:
crop = prev_color_img[
max(0, target[0] - 2) : min(target[0] + 3, IMAGE_SIZE),
max(0, target[1] - 2) : min(target[1] + 3, IMAGE_SIZE),
]
crop = cv2.cvtColor(crop, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(crop, TARGET_LOWER, TARGET_UPPER)
if np.sum(mask) > 0:
current_reward = 20
else:
current_reward = 0
# Compute future reward
if not change_detected and not grasp_success:
future_reward = 0
else:
next_push_predictions, next_grasp_predictions = self.forward(
next_color_heightmap, next_depth_heightmap, is_volatile=True, use_push=use_push
)
target_next_grasp_predictions = self.focus_on_target(
next_color_heightmap,
next_depth_heightmap,
next_grasp_predictions,
TARGET_LOWER,
TARGET_UPPER,
)
target_prev_grasp_predictions = self.focus_on_target(
prev_color_img,
prev_depth_img,
prev_grasp_predictions,
TARGET_LOWER,
TARGET_UPPER,
)
future_reward = max(np.max(next_push_predictions), np.max(next_grasp_predictions))
if primitive_action == "push":
if (
np.max(target_next_grasp_predictions)
> np.max(target_prev_grasp_predictions) * 1.1
):
current_reward = 1
print(
"Prediction:",
np.max(prev_grasp_predictions),
np.max(next_grasp_predictions),
)
delta_area = self.push_change_area(prev_depth_img, next_depth_heightmap)
if delta_area > 300: # 300 can be changed
if current_reward < 1:
current_reward = 0.5
elif delta_area < -100: # -100 can be changed
current_reward = 0
future_reward = 0
print("Current reward: %f" % (current_reward))
print("Future reward: %f" % (future_reward))
if primitive_action == "push" and not self.push_rewards:
expected_reward = self.future_reward_discount * future_reward
print(
"Expected reward: %f + %f x %f = %f"
% (0.0, self.future_reward_discount, future_reward, expected_reward)
)
else:
expected_reward = current_reward + self.future_reward_discount * future_reward
print(
"Expected reward: %f + %f x %f = %f"
% (current_reward, self.future_reward_discount, future_reward, expected_reward)
)
return expected_reward, current_reward
def get_neg(self, depth_heightmap, label, best_pix_ind):
"""Should match train_foreground"""
depth_heightmap_pad = np.copy(depth_heightmap)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
depth_heightmap_pad = utils.rotate(
depth_heightmap_pad, best_pix_ind * (360.0 / NUM_ROTATION)
)
label = ndimage.rotate(
label, best_pix_ind * (360.0 / NUM_ROTATION), axes=(2, 1), reshape=False
)
label = np.round(label)
x_y_idx = np.argwhere(label > 0)
for idx in x_y_idx:
_, x, y = tuple(idx)
left_area = depth_heightmap_pad[
max(0, x - math.ceil(GRIPPER_GRASP_SAFE_WIDTH_PIXEL / 2)) : min(
depth_heightmap_pad.shape[0],
x + math.ceil(GRIPPER_GRASP_SAFE_WIDTH_PIXEL / 2) + 1,
),
max(0, y - math.ceil(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL / 2)) : max(
0, y - math.ceil(GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 2) + 1
),
]
right_area = depth_heightmap_pad[
max(0, x - math.ceil(GRIPPER_GRASP_SAFE_WIDTH_PIXEL / 2)) : min(
depth_heightmap_pad.shape[0],
x + math.ceil(GRIPPER_GRASP_SAFE_WIDTH_PIXEL / 2) + 1,
),
min(
depth_heightmap_pad.shape[1] - 1,
y + math.ceil(GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 2),
) : min(
depth_heightmap_pad.shape[1],
y + math.ceil(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL / 2) + 1,
),
]
if (
np.sum(left_area > DEPTH_MIN) > 0
and np.sum((left_area - depth_heightmap_pad[x, y]) > -0.05) > 0
) or (
np.sum(right_area > DEPTH_MIN) > 0
and np.sum((right_area - depth_heightmap_pad[x, y]) > -0.05) > 0
):
label[0, x, y] = 0
label = ndimage.rotate(
label, -best_pix_ind * (360.0 / NUM_ROTATION), axes=(2, 1), reshape=False
)
label = np.round(label)
return label
# Compute labels and backpropagate
def backprop(
self,
color_heightmap,
depth_heightmap,
primitive_action,
best_pix_ind,
label_value,
use_push=True,
):
if self.method == "reinforcement":
batch_lose = -1
# Compute labels
label = np.zeros((1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE))
action_area = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
action_area[best_pix_ind[1]][best_pix_ind[2]] = 1
tmp_label = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
tmp_label[action_area > 0] = label_value
label[0, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF] = tmp_label
# Compute label mask
label_weights = np.zeros(label.shape)
tmp_label_weights = np.zeros((224, 224))
tmp_label_weights[action_area > 0] = 1
label_weights[
0, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF
] = tmp_label_weights
# Compute loss and backward pass
if len(self.loss_list) == 0:
self.optimizer.zero_grad()
if primitive_action == "grasp" and label_value > 0:
neg_loss = []
for i in range(self.model.num_rotations):
if i != best_pix_ind[0]:
neg_label = self.get_neg(depth_heightmap, label.copy(), i)
if (
neg_label[
0, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF
][best_pix_ind[1]][best_pix_ind[2]]
== 0
):
_, _ = self.forward(
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=i,
use_push=use_push,
)
loss = self.grasp_criterion(
self.model.output_prob[0][1].view(
1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE
),
torch.from_numpy(neg_label)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda(),
) * Variable(
torch.from_numpy(label_weights)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
)
loss = loss.sum()
neg_loss.append(loss)
if len(neg_loss) > 0:
self.loss_list.append(sum(neg_loss) / len(neg_loss))
if primitive_action == "push":
if not self.is_baseline:
if label_value > 0:
label_weights *= 2 # to compromise the less push operations
# Do forward pass with specified rotation (to save gradients)
_, _ = self.forward(
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=best_pix_ind[0],
use_push=use_push,
)
if self.use_cuda:
loss = self.push_criterion(
self.model.output_prob[0][0].view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(
torch.from_numpy(label)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
),
) * Variable(
torch.from_numpy(label_weights)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda(),
requires_grad=False,
)
else:
loss = self.push_criterion(
self.model.output_prob[0][0].view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(torch.from_numpy(label).float()),
) * Variable(torch.from_numpy(label_weights).float(), requires_grad=False)
loss = loss.sum()
self.loss_list.append(loss)
if len(self.loss_list) >= self.batch_size:
total_loss = sum(self.loss_list)
print("Batch Loss:", total_loss.cpu().item())
self.loss_log.append([self.iteration, total_loss.cpu().item()])
mean_loss = total_loss / len(self.loss_list)
mean_loss.backward()
self.loss_list = []
# loss.backward()
loss_value = loss.cpu().data.numpy()
elif primitive_action == "grasp":
if label_value > 0:
if self.is_baseline:
label_weights *= 4
else:
label_weights *= 2
# Do forward pass with specified rotation (to save gradients)
_, _ = self.forward(
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=best_pix_ind[0],
use_push=use_push,
)
if self.use_cuda:
loss1 = self.grasp_criterion(
self.model.output_prob[0][1].view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(
torch.from_numpy(label)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
),
) * Variable(
torch.from_numpy(label_weights)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
)
else:
loss1 = self.grasp_criterion(
self.model.output_prob[0][1].view(1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(torch.from_numpy(label).float()),
) * Variable(torch.from_numpy(label_weights).float())
loss1 = loss1.sum()
self.loss_list.append(loss1)
# loss.backward()
loss_value = loss1.detach().cpu().data.numpy()
opposite_rotate_idx = (
best_pix_ind[0] + self.model.num_rotations / 2
) % self.model.num_rotations
_, _ = self.forward(
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=opposite_rotate_idx,
use_push=use_push,
)
if self.use_cuda:
loss2 = self.grasp_criterion(
self.model.output_prob[0][1].view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(
torch.from_numpy(label)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
),
) * Variable(
torch.from_numpy(label_weights)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
)
else:
loss2 = self.grasp_criterion(
self.model.output_prob[0][1].view(1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(torch.from_numpy(label).float()),
) * Variable(torch.from_numpy(label_weights).float())
loss2 = loss2.sum()
self.loss_list.append(loss2)
if len(self.loss_list) >= self.batch_size:
total_loss = sum(self.loss_list)
mean_loss = total_loss / len(self.loss_list)
mean_loss.backward()
self.loss_list = []
total_loss = total_loss.detach().cpu().item()
print("Batch Loss:", total_loss, len(self.loss_list))
self.loss_log.append([self.iteration, total_loss])
# loss.backward()
loss_value += loss2.detach().cpu().data.numpy()
loss_value = loss_value / 2
if len(self.loss_list) == 0:
print("Training loss: %f" % (loss_value.sum()))
self.optimizer.step()
self.lr_scheduler.step()
def get_prediction_vis(self, predictions, color_heightmap, best_pix_ind):
canvas = None
num_rotations = predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
tmp_row_canvas = None
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
prediction_vis = predictions[rotate_idx, :, :].copy()
# prediction_vis[prediction_vis < 0] = 0 # assume probability
# prediction_vis[prediction_vis > 1] = 1 # assume probability
prediction_vis = np.clip(prediction_vis, 0, 1)
prediction_vis.shape = (predictions.shape[1], predictions.shape[2])
prediction_vis = cv2.applyColorMap(
(prediction_vis * 255).astype(np.uint8), cv2.COLORMAP_JET
)
if rotate_idx == best_pix_ind[0]:
prediction_vis = cv2.circle(
prediction_vis,
(int(best_pix_ind[2]), int(best_pix_ind[1])),
7,
(0, 0, 255),
2,
)
prediction_vis = utils.rotate(prediction_vis, rotate_idx * (360.0 / num_rotations))
background_image = utils.rotate(
color_heightmap, rotate_idx * (360.0 / num_rotations)
)
prediction_vis = (
0.5 * cv2.cvtColor(background_image, cv2.COLOR_RGB2BGR) + 0.5 * prediction_vis
).astype(np.uint8)
if tmp_row_canvas is None:
tmp_row_canvas = prediction_vis
else:
tmp_row_canvas = np.concatenate((tmp_row_canvas, prediction_vis), axis=1)
if canvas is None:
canvas = tmp_row_canvas
else:
canvas = np.concatenate((canvas, tmp_row_canvas), axis=0)
return canvas
def push_change_area(self, prev_depth_img, next_depth_img):
kernel = np.ones((11, 11))
# kernel_num = np.ones((5, 5))
depth_img = np.copy(prev_depth_img)
depth_img_copy = np.copy(depth_img)
depth_img_copy[depth_img_copy <= DEPTH_MIN] = 0
depth_img_copy[depth_img_copy > DEPTH_MIN] = 1
prev_area = cv2.filter2D(depth_img_copy, -1, kernel)
prev_area[prev_area <= 1] = 0
prev_area[prev_area > 1] = 1
prev_area = np.sum(prev_area - depth_img_copy)
depth_img = np.copy(next_depth_img)
depth_img_copy = np.copy(depth_img)
depth_img_copy[depth_img_copy <= DEPTH_MIN] = 0
depth_img_copy[depth_img_copy > DEPTH_MIN] = 1
next_area = cv2.filter2D(depth_img_copy, -1, kernel)
next_area[next_area <= 1] = 0
next_area[next_area > 1] = 1
next_area = np.sum(next_area - depth_img_copy)
print("Prev Area %d" % (prev_area))
print("Next Area %d" % (next_area))
return next_area - prev_area
| 39,013 | 41.222944 | 100 | py |
more | more-main/mcts_main.py | """Test"""
import glob
import gc
import os
import time
import datetime
import pybullet as p
import cv2
import numpy as np
from graphviz import Digraph
import argparse
import random
import torch
import pandas as pd
from mcts_utils import MCTSHelper
from mcts.search import MonteCarloTreeSearch
from mcts.nodes import PushSearchNode
from mcts.push import PushState
import utils
from constants import (
MCTS_EARLY_ROLLOUTS,
PIXEL_SIZE,
WORKSPACE_LIMITS,
TARGET_LOWER,
TARGET_UPPER,
NUM_ROTATION,
GRASP_Q_PUSH_THRESHOLD,
GRASP_Q_GRASP_THRESHOLD,
IS_REAL,
MCTS_MAX_LEVEL,
MCTS_ROLLOUTS,
)
from environment_sim import Environment
class SeachCollector:
def __init__(self, cases):
# Create directory to save data
timestamp = time.time()
timestamp_value = datetime.datetime.fromtimestamp(timestamp)
name = ""
for case in cases:
name = name + case.split("/")[-1].split(".")[0] + "-"
name = name[:-1]
self.base_directory = os.path.join(
os.path.abspath("logs_grasp"),
"mcts-" + timestamp_value.strftime("%Y-%m-%d-%H-%M-%S") + "-" + name,
)
print("Creating data logging session: %s" % (self.base_directory))
self.color_heightmaps_directory = os.path.join(
self.base_directory, "data", "color-heightmaps"
)
self.depth_heightmaps_directory = os.path.join(
self.base_directory, "data", "depth-heightmaps"
)
self.mask_directory = os.path.join(self.base_directory, "data", "masks")
self.prediction_directory = os.path.join(self.base_directory, "data", "predictions")
self.visualizations_directory = os.path.join(self.base_directory, "visualizations")
self.transitions_directory = os.path.join(self.base_directory, "transitions")
self.executed_action_log = []
self.label_value_log = []
self.consecutive_log = []
self.time_log = []
self.mcts_directory = os.path.join(self.base_directory, "mcts")
self.mcts_color_directory = os.path.join(self.base_directory, "mcts", "color")
self.mcts_depth_directory = os.path.join(self.base_directory, "mcts", "depth")
self.mcts_mask_directory = os.path.join(self.base_directory, "mcts", "mask")
self.mcts_child_image_directory = os.path.join(self.base_directory, "mcts", "child_image")
self.idx = 0
self.record_image_idx = []
self.record_action = []
self.record_label = []
self.record_num_visits = []
self.record_child_image_idx = []
self.record_data = {
"image_idx": self.record_image_idx,
"action": self.record_action,
"label": self.record_label,
"num_visits": self.record_num_visits,
"child_image_idx": self.record_child_image_idx,
}
if not os.path.exists(self.color_heightmaps_directory):
os.makedirs(self.color_heightmaps_directory)
if not os.path.exists(self.depth_heightmaps_directory):
os.makedirs(self.depth_heightmaps_directory)
if not os.path.exists(self.mask_directory):
os.makedirs(self.mask_directory)
if not os.path.exists(self.prediction_directory):
os.makedirs(self.prediction_directory)
if not os.path.exists(self.visualizations_directory):
os.makedirs(self.visualizations_directory)
if not os.path.exists(self.transitions_directory):
os.makedirs(os.path.join(self.transitions_directory))
if not os.path.exists(self.mcts_directory):
os.makedirs(os.path.join(self.mcts_directory))
if not os.path.exists(self.mcts_color_directory):
os.makedirs(os.path.join(self.mcts_color_directory))
if not os.path.exists(self.mcts_depth_directory):
os.makedirs(os.path.join(self.mcts_depth_directory))
if not os.path.exists(self.mcts_mask_directory):
os.makedirs(os.path.join(self.mcts_mask_directory))
if not os.path.exists(self.mcts_child_image_directory):
os.makedirs(os.path.join(self.mcts_child_image_directory))
def save_heightmaps(self, iteration, color_heightmap, depth_heightmap, mode=0):
color_heightmap = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2BGR)
cv2.imwrite(
os.path.join(self.color_heightmaps_directory, "%06d.%s.color.png" % (iteration, mode)),
color_heightmap,
)
depth_heightmap = np.round(depth_heightmap * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(
os.path.join(self.depth_heightmaps_directory, "%06d.%s.depth.png" % (iteration, mode)),
depth_heightmap,
)
def write_to_log(self, log_name, log):
np.savetxt(
os.path.join(self.transitions_directory, "%s.log.txt" % log_name), log, delimiter=" "
)
def save_predictions(self, iteration, pred, name="push"):
cv2.imwrite(
os.path.join(self.prediction_directory, "%06d.png" % (iteration)), pred,
)
def save_visualizations(self, iteration, affordance_vis, name):
cv2.imwrite(
os.path.join(self.visualizations_directory, "%06d.%s.png" % (iteration, name)),
affordance_vis,
)
def _save_mcts_image(self, env, file_id, node, is_child=False):
env.restore_objects(node.state.object_states)
color_image, depth_image, mask_image = utils.get_true_heightmap(env)
mask_image = utils.relabel_mask(env, mask_image)
file_name = f"{file_id:06d}"
if is_child:
file_name += f"-{node.prev_move}"
# color
color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
if is_child:
cv2.imwrite(
os.path.join(self.mcts_child_image_directory, f"{file_name}.color.png"),
color_image,
)
else:
cv2.imwrite(
os.path.join(self.mcts_color_directory, f"{file_name}.color.png"), color_image,
)
# depth
depth_image = np.round(depth_image * 100000).astype(np.uint16) # Save depth in 1e-5 meters
if is_child:
cv2.imwrite(
os.path.join(self.mcts_child_image_directory, f"{file_name}.depth.png"),
depth_image,
)
else:
cv2.imwrite(
os.path.join(self.mcts_depth_directory, f"{file_name}.depth.png"), depth_image,
)
# mask
if is_child:
cv2.imwrite(
os.path.join(self.mcts_child_image_directory, f"{file_name}.mask.png"), mask_image,
)
else:
cv2.imwrite(
os.path.join(self.mcts_mask_directory, f"{file_name}.mask.png"), mask_image,
)
return file_name
def save_mcts_data(self, mcts_helper, env, root, best_action, best_idx):
backup_state = env.save_objects()
search_list = [root]
while len(search_list) > 0:
current_node = search_list.pop(0)
if current_node.has_children:
save_image = False
for i in range(len(current_node.children)):
child_node = current_node.children[i]
action = child_node.prev_move
# child_q = sum(sorted(child_node.q)[-MCTS_TOP:]) / min(child_node.n, MCTS_TOP)
# child_q = sum(child_node.q) / child_node.n
child_q = max(child_node.q)
self.record_image_idx.append(self.idx)
self.record_action.append(
[action.pos0[1], action.pos0[0], action.pos1[1], action.pos1[0]]
)
label = child_q
self.record_label.append(label)
self.record_num_visits.append(child_node.n)
child_idx = self._save_mcts_image(env, self.idx, child_node, is_child=True)
self.record_child_image_idx.append(child_idx)
save_image = True
if save_image:
self._save_mcts_image(env, self.idx, current_node, is_child=False)
self.idx += 1
search_list.extend(current_node.children)
df = pd.DataFrame(self.record_data, columns=list(self.record_data.keys()))
df.to_csv(os.path.join(self.mcts_directory, "records.csv"), index=False, header=True)
env.restore_objects(backup_state)
def plot_mcts(self, env, root, iteration):
backup_state = env.save_objects()
files = glob.glob("tree_plot/*")
for f in files:
os.remove(f)
dot = Digraph(
"mcts",
filename=f"tree_plot/mcts{iteration}.gv",
node_attr={
"shape": "box",
"fontcolor": "white",
"fontsize": "3",
"labelloc": "b",
"fixedsize": "true",
},
)
search_list = [root]
while len(search_list) > 0:
current_node = search_list.pop(0)
node_name = current_node.state.uid
# node_name_label = f"Q: {(sum(sorted(current_node.q)[-MCTS_TOP:]) / min(current_node.n, MCTS_TOP)):.3f}, N: {current_node.n}, Grasp Q: {current_node.state.q_value:.3f}"
node_name_label = f"Q: {(sum(sorted(current_node.q)[-1:]) / min(current_node.n, 1)):.3f}, N: {current_node.n}, Grasp Q: {current_node.state.q_value:.3f}"
env.restore_objects(current_node.state.object_states)
color_image, depth_image, _ = utils.get_true_heightmap(env)
node_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
if current_node.prev_move is not None:
node_action = str(current_node.prev_move).split("_")
cv2.arrowedLine(
node_image,
(int(node_action[1]), int(node_action[0])),
(int(node_action[3]), int(node_action[2])),
(255, 0, 255),
2,
tipLength=0.4,
)
image_name = f"tree_plot/{node_name}.png"
cv2.imwrite(image_name, node_image)
depthimage_name = f"tree_plot/{node_name}-depth.png"
depth_image = np.round(depth_image * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(depthimage_name, depth_image)
image_name = f"{node_name}.png"
image_size = str(
max(
0.6,
# sum(sorted(current_node.q)[-MCTS_TOP:]) / min(current_node.n, MCTS_TOP) * 2,
sum(sorted(current_node.q)[-1:]) / min(current_node.n, 1) * 2,
)
)
dot.node(
node_name,
label=node_name_label,
image=image_name,
width=image_size,
height=image_size,
)
if current_node.parent is not None:
node_partent_name = current_node.parent.state.uid
dot.edge(node_partent_name, node_name)
untracked_states = [current_node.state]
last_node_used = False
while len(untracked_states) > 0:
current_state = untracked_states.pop()
last_state_name = current_state.uid
if last_node_used:
actions = current_state.get_actions()
else:
if len(current_node.children) == 0:
actions = current_state.get_actions()
else:
actions = current_node.untried_actions
last_node_used = True
for _, move in enumerate(actions):
key = current_state.uid + str(move)
if key in current_state.mcts_helper.simulation_recorder:
(
object_states,
new_image_q,
) = current_state.mcts_helper.simulation_recorder[key]
node_name = f"{current_state.uid}.{current_state.level}-{move}"
node_name_label = f"Grasp Q: {new_image_q:.3f}"
env.restore_objects(object_states)
color_image, depth_image, _ = utils.get_true_heightmap(env)
node_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
node_action = str(move).split("_")
if len(node_action) > 1:
cv2.arrowedLine(
node_image,
(int(node_action[1]), int(node_action[0])),
(int(node_action[3]), int(node_action[2])),
(255, 0, 255),
2,
tipLength=0.4,
)
image_name = f"tree_plot/{node_name}.png"
cv2.imwrite(image_name, node_image)
depthimage_name = f"tree_plot/{node_name}-depth.png"
depth_image = np.round(depth_image * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(depthimage_name, depth_image)
image_name = f"{node_name}.png"
image_size = str(max(0.6, new_image_q * 2))
dot.node(
node_name,
label=node_name_label,
image=image_name,
width=image_size,
height=image_size,
)
dot.edge(last_state_name, node_name)
new_state, _, _, _ = current_state.move(move)
if new_state is not None:
untracked_states.append(new_state)
search_list.extend(current_node.children)
dot.view()
env.restore_objects(backup_state)
# input("wait for key")
def parse_args():
parser = argparse.ArgumentParser(description="MCTS DIPN")
parser.add_argument("--test_case", action="store", help="File for testing")
parser.add_argument("--test_cases", nargs="+", help="Files for testing")
parser.add_argument(
"--max_test_trials",
action="store",
type=int,
default=5,
help="maximum number of test runs per case/scenario",
)
parser.add_argument("--switch", action="store", type=int, help="Switch target")
parser.add_argument("--plot", action="store_true")
parser.add_argument("--test", action="store_true", default=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
# set seed
random.seed(1234)
torch.manual_seed(1234)
np.random.seed(1234)
iteration = 0
args = parse_args()
case = args.test_case
cases = args.test_cases
switch = args.switch
test = args.test
if switch is not None:
print(f"Target ID has been switched to {switch}")
if cases:
repeat_num = len(cases)
else:
repeat_num = args.max_test_trials
cases = [case] * repeat_num
collector = SeachCollector(cases)
env = Environment(gui=False)
env_sim = Environment(gui=False)
mcts_helper = MCTSHelper(env_sim, "logs_grasp/snapshot-post-020000.reinforcement.pth")
is_plot = args.plot
for repeat_idx in range(repeat_num):
if not IS_REAL:
success = False
while not success:
env.reset()
env_sim.reset()
success = env.add_object_push_from_file(cases[repeat_idx], switch)
success &= env_sim.add_object_push_from_file(cases[repeat_idx], switch)
print(f"Reset environment at iteration {iteration} of repeat times {repeat_idx}")
else:
print(f"Reset environment at iteration {iteration} of repeat times {repeat_idx}")
obj_num = input("Reset manually!!! Enter the number of objects")
# push_start = [4.120000000000000329e-01, -1.999999999999999001e-02, 1.000000000000000021e-02]
# push_end = [5.080000000000000071e-01, -4.800000000000001488e-02, 1.000000000000000021e-02]
# push_start = [4.879999999999999893e-01, -1.239999999999999991e-01, 1.000000000000000021e-02]
# push_end = [5.180000000000000160e-01, -2.799999999999999711e-02, 1.000000000000000021e-02]
# push_start = [
# 82 * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
# 76 * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
# 1.000000000000000021e-02,
# ]
# push_end = [
# 112 * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
# 116 * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
# 1.000000000000000021e-02,
# ]
# env_sim.push(push_start, push_end)
# env.push(push_start, push_end)
# push_start = [4.800000000000000377e-01, 4.400000000000001132e-02, 1.000000000000000021e-02]
# push_end = [5.800000000000000711e-01, 4.400000000000001132e-02, 1.000000000000000021e-02]
# env.push(push_start, push_end)
# input("test")
start_time = time.time()
while True:
color_image, depth_image, _ = utils.get_true_heightmap(env)
temp = cv2.cvtColor(color_image, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
print(f"Target on the table (value: {np.sum(mask) / 255}) at iteration {iteration}")
if np.sum(mask) / 255 < 10:
break
q_value, best_pix_ind, grasp_predictions = mcts_helper.get_grasp_q(
color_image, depth_image, post_checking=True
)
print(f"Max grasp Q value: {q_value}")
# record
collector.save_heightmaps(iteration, color_image, depth_image)
grasp_pred_vis = mcts_helper.get_prediction_vis(
grasp_predictions, color_image, best_pix_ind
)
collector.save_visualizations(iteration, grasp_pred_vis, "grasp")
# Grasp >>>>>
if q_value > GRASP_Q_GRASP_THRESHOLD:
best_rotation_angle = np.deg2rad(best_pix_ind[0] * (360.0 / NUM_ROTATION))
primitive_position = [
best_pix_ind[1] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
best_pix_ind[2] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
depth_image[best_pix_ind[1]][best_pix_ind[2]] + WORKSPACE_LIMITS[2][0],
]
if not IS_REAL:
success = env.grasp(primitive_position, best_rotation_angle)
else:
grasp_sucess = env.grasp(primitive_position, best_rotation_angle)
success = grasp_sucess
# record
reward_value = 1 if success else 0
collector.executed_action_log.append(
[
1, # grasp
primitive_position[0],
primitive_position[1],
primitive_position[2],
best_rotation_angle,
-1,
-1,
]
)
collector.label_value_log.append(reward_value)
collector.write_to_log("executed-action", collector.executed_action_log)
collector.write_to_log("label-value", collector.label_value_log)
iteration += 1
if success:
break
else:
continue
# Grasp <<<<<
# Search >>>>>
object_states = env.save_objects()
initial_state = PushState("root", object_states, q_value, 0, mcts_helper, max_q=GRASP_Q_PUSH_THRESHOLD, max_level=MCTS_MAX_LEVEL)
root = PushSearchNode(initial_state)
mcts = MonteCarloTreeSearch(root)
best_node = mcts.best_action(MCTS_ROLLOUTS, MCTS_EARLY_ROLLOUTS, test)
print("best node:")
print(best_node.state.uid)
print(best_node.state.q_value)
print(best_node.prev_move)
print(len(root.children))
node = best_node
# env.restore_objects(node.state.object_states)
# color_image, _, _ = utils.get_true_heightmap(env)
node_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
node_action = str(node.prev_move).split("_")
cv2.arrowedLine(
node_image,
(int(node_action[1]), int(node_action[0])),
(int(node_action[3]), int(node_action[2])),
(255, 0, 255),
2,
tipLength=0.4,
)
collector.save_predictions(iteration, node_image)
# env.restore_objects(object_states)
# Search <<<<<
# Push >>>>>
push_start = best_node.prev_move.pos0
push_end = best_node.prev_move.pos1
push_start = [
push_start[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_start[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
push_end = [
push_end[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_end[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
env.push(push_start, push_end)
# record
reward_value = 0
collector.executed_action_log.append(
[
0, # push
push_start[0],
push_start[1],
push_start[2],
push_end[0],
push_end[1],
push_end[2],
]
)
collector.label_value_log.append(reward_value)
collector.write_to_log("executed-action", collector.executed_action_log)
collector.write_to_log("label-value", collector.label_value_log)
iteration += 1
# Push <<<<<
# Plot
if is_plot:
collector.plot_mcts(env_sim, root, iteration)
# Save tree for training, BFS
# best_action = best_node.prev_move
# collector.record_image_idx.append(collector.idx)
# collector.record_action.append(
# [best_action.pos0[1], best_action.pos0[0], best_action.pos1[1], best_action.pos1[0]]
# )
# label = 2
# collector.record_label.append(label)
if not test:
collector.save_mcts_data(mcts_helper, env_sim, root, best_node.prev_move, collector.idx)
# clean up for memory
del initial_state
del mcts
del root
del best_node
del push_start
del push_end
mcts_helper.reset()
gc.collect()
end_time = time.time()
collector.time_log.append(end_time - start_time)
collector.write_to_log("executed-time", collector.time_log)
| 23,696 | 40.793651 | 183 | py |
more | more-main/vision/backbone_utils.py | from collections import OrderedDict
from torch import nn
from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork, LastLevelMaxPool
import torch.nn.functional as F
from torchvision.ops import misc as misc_nn_ops
from ._utils import IntermediateLayerGetter
from . import resnet
from constants import GRIPPER_GRASP_OUTER_DISTANCE_PIXEL, GRIPPER_GRASP_SAFE_WIDTH_PIXEL
class FCNHead(nn.Sequential):
def __init__(self, in_channels, channels, kernel_size=3, padding=1, last=True):
inter_channels = in_channels // 2
if last:
layers = [
nn.Conv2d(in_channels, inter_channels, kernel_size, padding=padding, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(),
nn.Conv2d(inter_channels, channels, 1),
]
else:
layers = [
nn.Conv2d(in_channels, inter_channels, kernel_size, padding=padding, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(),
nn.Conv2d(inter_channels, channels, 1),
nn.BatchNorm2d(channels),
nn.ReLU(),
]
super(FCNHead, self).__init__(*layers)
class BackboneWithFPNAndHeadPush(nn.Module):
"""
Adds a FPN on top of a model.
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediatLayerGetter apply here.
Arguments:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
in_channels_list (List[int]): number of channels for each feature map
that is returned, in the order they are present in the OrderedDict
out_channels (int): number of channels in the FPN.
Attributes:
out_channels (int): the number of channels in the FPN
"""
def __init__(self, backbone, return_layers, in_channels_list, out_channels, is_real=False):
super().__init__()
# self.backbone = backbone
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=LastLevelMaxPool(),
)
self.conv0 = nn.Conv2d(256, 256, kernel_size=3, stride=1, bias=False)
self.bn0 = nn.BatchNorm2d(256)
self.conv1 = nn.Conv2d(256, 128, kernel_size=3, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.conv2 = nn.Conv2d(128, 32, kernel_size=1, stride=1, bias=False)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 1, kernel_size=1, stride=1, bias=False)
# self.head = nn.Sequential(
# OrderedDict(
# [
# ("push-head-conv0", nn.Conv2d(1, 1, kernel_size=(1, 1), bias=False),),
# ("head-relu0", nn.ReLU(inplace=True)),
# ("push-head-conv1", nn.Conv2d(1, 1, kernel_size=(1, 1), bias=False),),
# ]
# )
# )
inplanes = 256 # the channels of 'out' layer.
final_out_channels = 1
self.classifier1 = FCNHead(inplanes, 64, last=False)
self.classifier2 = FCNHead(64, final_out_channels, last=True)
self.out_channels = out_channels
def forward(self, x):
input_shape_half = (x.shape[-2] // 2, x.shape[-1] // 2)
input_shape = x.shape[-2:]
# x = self.body(x)
# x = self.fpn(x)
# x = x["0"]
# x = self.classifier1(x)
# x = F.interpolate(x, size=input_shape_half, mode="nearest")
# x = self.classifier2(x)
# x = F.interpolate(x, size=input_shape, mode="nearest")
# x = self.backbone(x)
x = self.body(x)
x = self.fpn(x)
x = x["0"]
x = self.conv0(x)
x = self.bn0(x)
x = F.relu(x)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = F.interpolate(x, size=input_shape_half, mode="bilinear", align_corners=True)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=True)
x = self.conv3(x)
return x
class BackboneWithFPNAndHeadGrasp(nn.Module):
"""
Adds a FPN on top of a model.
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediatLayerGetter apply here.
Arguments:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
in_channels_list (List[int]): number of channels for each feature map
that is returned, in the order they are present in the OrderedDict
out_channels (int): number of channels in the FPN.
Attributes:
out_channels (int): the number of channels in the FPN
"""
def __init__(self, backbone, return_layers, in_channels_list, out_channels, is_real=False):
super().__init__()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=LastLevelMaxPool(),
)
self.head = nn.Sequential(
OrderedDict(
[
(
"grasp-head-conv000",
nn.Conv2d(
1,
1,
kernel_size=(
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
),
padding=(
GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2,
),
bias=False,
),
),
("grasp-head-relu000", nn.ReLU(inplace=True)),
(
"grasp-head-conv0000",
nn.Conv2d(
1,
1,
kernel_size=(
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
),
padding=(
GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2,
),
bias=False,
),
),
("grasp-head-relu0000", nn.ReLU(inplace=True)),
("grasp-head-conv1", nn.Conv2d(1, 1, kernel_size=(1, 1), bias=False),),
("grasp-head-relu1", nn.ReLU(inplace=True)),
("grasp-head-conv2", nn.Conv2d(1, 1, kernel_size=(1, 1), bias=False),),
]
)
)
inplanes = 256 # the channels of 'out' layer.
final_out_channels = 1
self.classifier1 = FCNHead(inplanes, 64, last=False)
self.classifier2 = FCNHead(64, final_out_channels, last=False)
self.out_channels = out_channels
def forward(self, x):
input_shape_half = (x.shape[-2] // 2, x.shape[-1] // 2)
input_shape = x.shape[-2:]
x = self.body(x)
x = self.fpn(x)
x = x["0"]
x = self.classifier1(x)
x = F.interpolate(x, size=input_shape_half, mode="nearest")
x = self.classifier2(x)
x = F.interpolate(x, size=input_shape, mode="nearest")
x = self.head(x)
return x
class BackboneWithFPN(nn.Module):
"""
Adds a FPN on top of a model.
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediatLayerGetter apply here.
Arguments:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
in_channels_list (List[int]): number of channels for each feature map
that is returned, in the order they are present in the OrderedDict
out_channels (int): number of channels in the FPN.
Attributes:
out_channels (int): the number of channels in the FPN
"""
def __init__(self, backbone, return_layers, in_channels_list, out_channels):
super(BackboneWithFPN, self).__init__()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=LastLevelMaxPool(),
)
self.out_channels = out_channels
def forward(self, x):
x = self.body(x)
x = self.fpn(x)
return x
def resnet_fpn_net(
backbone_name,
norm_layer=misc_nn_ops.FrozenBatchNorm2d,
trainable_layers=5,
grasp=True,
is_real=False,
pretrained=False,
input_channels=4,
):
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained, norm_layer=norm_layer, input_channels=input_channels
)
"""
Constructs a specified ResNet backbone with FPN on top. Freezes the specified number of layers in the backbone.
Examples::
>>> from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
>>> backbone = resnet_fpn_backbone('resnet50', pretrained=True, trainable_layers=3)
>>> # get some dummy image
>>> x = torch.rand(1,3,64,64)
>>> # compute the output
>>> output = backbone(x)
>>> print([(k, v.shape) for k, v in output.items()])
>>> # returns
>>> [('0', torch.Size([1, 256, 16, 16])),
>>> ('1', torch.Size([1, 256, 8, 8])),
>>> ('2', torch.Size([1, 256, 4, 4])),
>>> ('3', torch.Size([1, 256, 2, 2])),
>>> ('pool', torch.Size([1, 256, 1, 1]))]
Arguments:
backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50',
'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
norm_layer (torchvision.ops): it is recommended to use the default value. For details visit:
(https://github.com/facebookresearch/maskrcnn-benchmark/issues/267)
pretrained (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
"""
# select layers that wont be frozen
assert trainable_layers <= 5 and trainable_layers >= 0
layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][:trainable_layers]
# freeze layers only if pretrained backbone is used
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = 256
if grasp:
return BackboneWithFPNAndHeadGrasp(
backbone, return_layers, in_channels_list, out_channels, is_real
)
else:
return BackboneWithFPNAndHeadPush(
backbone, return_layers, in_channels_list, out_channels, is_real
)
def resent_backbone(
backbone_name, pretrained, num_classes, input_channels, norm_layer=misc_nn_ops.FrozenBatchNorm2d
):
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained,
input_channels=input_channels,
num_classes=num_classes,
norm_layer=norm_layer,
)
return backbone
| 13,140 | 38.821212 | 118 | py |
more | more-main/vision/_utils.py | from collections import OrderedDict
import torch
from torch import nn
from torch.jit.annotations import Dict
from torch.nn import functional as F
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
_version = 2
__annotations__ = {
"return_layers": Dict[str, str],
}
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {str(k): str(v) for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.items():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
| 2,641 | 37.289855 | 89 | py |
more | more-main/vision/resnet.py | import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
__all__ = [
"ResNet",
"resnet10",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
input_channels=3,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
# color + depth = 4
self.conv1 = nn.Conv2d(
input_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet10(pretrained=False, progress=True, **kwargs):
r"""ResNet-10 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet10", BasicBlock, [1, 1, 1, 1], pretrained, progress, **kwargs)
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", Bottleneck, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet("resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet("resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
| 14,901 | 34.229314 | 107 | py |
more | more-main/vision/coco_utils.py | import copy
import os
from PIL import Image
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
from pycocotools.coco import COCO
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, target):
anno = target["annotations"]
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
target["annotations"] = anno
return image, target
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
target["annotations"] = anno
return image, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
return image, target
def _coco_remove_images_without_annotations(dataset, cat_list=None):
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
min_keypoints_per_image = 10
def _has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
assert isinstance(dataset, torchvision.datasets.CocoDetection)
ids = []
for ds_idx, img_id in enumerate(dataset.ids):
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = dataset.coco.loadAnns(ann_ids)
if cat_list:
anno = [obj for obj in anno if obj["category_id"] in cat_list]
if _has_valid_annotation(anno):
ids.append(ds_idx)
dataset = torch.utils.data.Subset(dataset, ids)
return dataset
def convert_to_coco_api(ds):
coco_ds = COCO()
# annotation IDs need to start at 1, not 0, see torchvision issue #1530
ann_id = 1
dataset = {'images': [], 'categories': [], 'annotations': []}
categories = set()
for img_idx in range(len(ds)):
# find better way to get target
# targets = ds.get_annotations(img_idx)
img, targets = ds[img_idx]
if targets['num_obj'].item() == 0: continue
image_id = targets["image_id"].item()
img_dict = {}
img_dict['id'] = image_id
img_dict['height'] = img.shape[-2]
img_dict['width'] = img.shape[-1]
dataset['images'].append(img_dict)
bboxes = targets["boxes"]
bboxes[:, 2:] -= bboxes[:, :2]
bboxes = bboxes.tolist()
labels = targets['labels'].tolist()
areas = targets['area'].tolist()
iscrowd = targets['iscrowd'].tolist()
if 'masks' in targets:
masks = targets['masks']
# make masks Fortran contiguous for coco_mask
masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
if 'keypoints' in targets:
keypoints = targets['keypoints']
keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
num_objs = len(bboxes)
for i in range(num_objs):
ann = {}
ann['image_id'] = image_id
ann['bbox'] = bboxes[i]
ann['category_id'] = labels[i]
categories.add(labels[i])
ann['area'] = areas[i]
ann['iscrowd'] = iscrowd[i]
ann['id'] = ann_id
if 'masks' in targets:
ann["segmentation"] = coco_mask.encode(masks[i].numpy())
if 'keypoints' in targets:
ann['keypoints'] = keypoints[i]
ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
dataset['annotations'].append(ann)
ann_id += 1
dataset['categories'] = [{'id': i} for i in sorted(categories)]
coco_ds.dataset = dataset
coco_ds.createIndex()
return coco_ds
def get_coco_api_from_dataset(dataset):
for _ in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection):
break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
return convert_to_coco_api(dataset)
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
| 7,759 | 34.272727 | 83 | py |
more | more-main/vision/coco_eval.py | import json
import tempfile
import numpy as np
import copy
import time
import torch
import torch._six
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from collections import defaultdict
import old_utils as utils
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
coco_dt = loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = utils.all_gather(img_ids)
all_eval_imgs = utils.all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
# Ideally, pycocotools wouldn't have hard-coded prints
# so that we could avoid copy-pasting those two functions
def createIndex(self):
# create index
# print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
# print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
maskUtils = mask_util
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# print('Loading and preparing results...')
# tic = time.time()
if isinstance(resFile, torch._six.string_classes):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if 'segmentation' not in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if 'bbox' not in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x2 - x1) * (y2 - y1)
ann['id'] = id + 1
ann['bbox'] = [x1, y1, x2 - x1, y2 - y1]
# print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
createIndex(res)
return res
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
################################################################# | 12,012 | 33.421203 | 107 | py |
more | more-main/vision/transforms.py | import random
import torch
from torchvision.transforms import functional as F
def _flip_coco_person_keypoints(kps, width):
flip_inds = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
flipped_data = kps[:, flip_inds]
flipped_data[..., 0] = width - flipped_data[..., 0]
# Maintain COCO convention that if visibility == 0, then x, y = 0
inds = flipped_data[..., 2] == 0
flipped_data[inds] = 0
return flipped_data
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob and target['num_obj'].item() > 0:
height, width = image.shape[-2:]
image = image.flip(-1)
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
return image, target
class ToTensor(object):
def __call__(self, image, target):
image = F.to_tensor(image)
return image, target | 1,358 | 28.543478 | 74 | py |
RWP | RWP-main/utils.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models_imagenet
import numpy as np
import random
import os
import time
import models
import sys
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
import os.path
import pickle
from PIL import Image
def set_seed(seed=1):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Logger(object):
def __init__(self,fileN ="Default.log"):
self.terminal = sys.stdout
self.log = open(fileN,"a")
def write(self,message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
self.terminal.flush()
self.log.flush()
################################ datasets #######################################
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader, Subset
from torchvision.datasets import CIFAR10, CIFAR100, ImageFolder
class Cutout:
def __init__(self, size=16, p=0.5):
self.size = size
self.half_size = size // 2
self.p = p
def __call__(self, image):
if torch.rand([1]).item() > self.p:
return image
left = torch.randint(-self.half_size, image.size(1) - self.half_size, [1]).item()
top = torch.randint(-self.half_size, image.size(2) - self.half_size, [1]).item()
right = min(image.size(1), left + self.size)
bottom = min(image.size(2), top + self.size)
image[:, max(0, left): right, max(0, top): bottom] = 0
return image
def get_datasets(args):
if args.datasets == 'CIFAR10':
print ('cifar10 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'CIFAR100':
print ('cifar100 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'ImageNet':
traindir = os.path.join('/home/datasets/ILSVRC2012/', 'train')
valdir = os.path.join('/home/datasets/ILSVRC2012/', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
return train_loader, val_loader
def get_datasets_ddp(args):
if args.datasets == 'CIFAR10':
print ('cifar10 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
my_trainset = datasets.CIFAR10(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(my_trainset)
train_loader = torch.utils.data.DataLoader(my_trainset, batch_size=args.batch_size, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'CIFAR100':
print ('cifar100 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
my_trainset = datasets.CIFAR100(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(my_trainset)
train_loader = torch.utils.data.DataLoader(my_trainset, batch_size=args.batch_size, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, val_loader
def get_datasets_cutout(args):
print ('cutout!')
if args.datasets == 'CIFAR10':
print ('cifar10 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
Cutout()
]), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'CIFAR100':
print ('cifar100 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
Cutout()
]), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, val_loader
def get_datasets_cutout_ddp(args):
if args.datasets == 'CIFAR10':
print ('cifar10 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
my_trainset = datasets.CIFAR10(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
Cutout()
]), download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(my_trainset)
train_loader = torch.utils.data.DataLoader(my_trainset, batch_size=args.batch_size, sampler=train_sampler, drop_last=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'CIFAR100':
print ('cifar100 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
my_trainset = datasets.CIFAR100(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
Cutout()
]), download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(my_trainset)
train_loader = torch.utils.data.DataLoader(my_trainset, batch_size=args.batch_size, sampler=train_sampler, drop_last=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, val_loader
def get_model(args):
print('Model: {}'.format(args.arch))
if args.datasets == 'ImageNet':
return models_imagenet.__dict__[args.arch]()
if args.datasets == 'CIFAR10':
num_classes = 10
elif args.datasets == 'CIFAR100':
num_classes = 100
model_cfg = getattr(models, args.arch)
return model_cfg.base(*model_cfg.args, num_classes=num_classes, **model_cfg.kwargs)
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, adaptive=False, **kwargs):
assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}"
defaults = dict(rho=rho, adaptive=adaptive, **kwargs)
super(SAM, self).__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
self.defaults.update(self.base_optimizer.defaults)
@torch.no_grad()
def first_step(self, zero_grad=False):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = group["rho"] / (grad_norm + 1e-12)
for p in group["params"]:
if p.grad is None: continue
self.state[p]["old_p"] = p.data.clone()
e_w = (torch.pow(p, 2) if group["adaptive"] else 1.0) * p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
if zero_grad: self.zero_grad()
@torch.no_grad()
def second_step(self, zero_grad=False):
for group in self.param_groups:
for p in group["params"]:
if p.grad is None: continue
p.data = self.state[p]["old_p"] # get back to "w" from "w + e(w)"
self.base_optimizer.step() # do the actual "sharpness-aware" update
if zero_grad: self.zero_grad()
@torch.no_grad()
def step(self, closure=None):
assert closure is not None, "Sharpness Aware Minimization requires closure, but it was not provided"
closure = torch.enable_grad()(closure) # the closure should do a full forward-backward pass
self.first_step(zero_grad=True)
closure()
self.second_step()
def _grad_norm(self):
shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism
norm = torch.norm(
torch.stack([
((torch.abs(p) if group["adaptive"] else 1.0) * p.grad).norm(p=2).to(shared_device)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
return norm
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
| 14,352 | 38.215847 | 173 | py |
RWP | RWP-main/train_rwp_parallel.py | import argparse
from torch.nn.modules.batchnorm import _BatchNorm
import os
import time
import numpy as np
import random
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from utils import *
# Parse arguments
parser = argparse.ArgumentParser(description='DDP RWP training')
parser.add_argument('--EXP', metavar='EXP', help='experiment name', default='SGD')
parser.add_argument('--arch', '-a', metavar='ARCH',
help='The architecture of the model')
parser.add_argument('--datasets', metavar='DATASETS', default='CIFAR10', type=str,
help='The training datasets')
parser.add_argument('--optimizer', metavar='OPTIMIZER', default='sgd', type=str,
help='The optimizer for training')
parser.add_argument('--schedule', metavar='SCHEDULE', default='step', type=str,
help='The schedule for training')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 50 iterations)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--wandb', dest='wandb', action='store_true',
help='use wandb to monitor statisitcs')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--half', dest='half', action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models',
default='save_temp', type=str)
parser.add_argument('--log-dir', dest='log_dir',
help='The directory used to save the log',
default='save_temp', type=str)
parser.add_argument('--log-name', dest='log_name',
help='The log file name',
default='log', type=str)
parser.add_argument('--randomseed',
help='Randomseed for training and initialization',
type=int, default=1)
parser.add_argument('--cutout', dest='cutout', action='store_true',
help='use cutout data augmentation')
parser.add_argument('--alpha', default=0.5, type=float,
metavar='A', help='alpha for mixing gradients')
parser.add_argument('--gamma', default=0.01, type=float,
metavar='gamma', help='Perturbation magnitude gamma for RWP')
parser.add_argument("--local_rank", default=-1, type=int)
best_prec1 = 0
# Record training statistics
train_loss = []
train_err = []
test_loss = []
test_err = []
arr_time = []
args = parser.parse_args()
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='nccl')
args.world_size = torch.distributed.get_world_size()
args.workers = int((args.workers + args.world_size - 1) / args.world_size)
if args.local_rank == 0:
print ('world size: {} workers per GPU: {}'.format(args.world_size, args.workers))
device = torch.device("cuda", local_rank)
if args.wandb:
import wandb
wandb.init(project="TWA", entity="nblt")
date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
wandb.run.name = args.EXP + date
def get_model_param_vec(model):
# Return the model parameters as a vector
vec = []
for name,param in model.named_parameters():
vec.append(param.data.detach().reshape(-1))
return torch.cat(vec, 0)
def get_model_grad_vec(model):
# Return the model gradient as a vector
vec = []
for name,param in model.named_parameters():
vec.append(param.grad.detach().reshape(-1))
return torch.cat(vec, 0)
def update_grad(model, grad_vec):
idx = 0
for name,param in model.named_parameters():
arr_shape = param.grad.shape
size = param.grad.numel()
param.grad.data = grad_vec[idx:idx+size].reshape(arr_shape).clone()
idx += size
def update_param(model, param_vec):
idx = 0
for name,param in model.named_parameters():
arr_shape = param.data.shape
size = param.data.numel()
param.data = param_vec[idx:idx+size].reshape(arr_shape).clone()
idx += size
def print_param_shape(model):
for name,param in model.named_parameters():
print (name, param.data.shape)
def main():
global args, best_prec1, p0
global train_loss, train_err, test_loss, test_err, arr_time, running_weight
set_seed(args.randomseed)
# Check the save_dir exists or not
if args.local_rank == 0:
print ('save dir:', args.save_dir)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# Check the log_dir exists or not
if args.local_rank == 0:
print ('log dir:', args.log_dir)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
sys.stdout = Logger(os.path.join(args.log_dir, args.log_name))
# Define model
# model = torch.nn.DataParallel(get_model(args))
model = get_model(args).to(device)
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
# print_param_shape(model)
# Optionally resume from a checkpoint
if args.resume:
# if os.path.isfile(args.resume):
if os.path.isfile(os.path.join(args.save_dir, args.resume)):
# model.load_state_dict(torch.load(os.path.join(args.save_dir, args.resume)))
print ("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
print ('from ', args.start_epoch)
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print ("=> loaded checkpoint '{}' (epoch {})"
.format(args.evaluate, checkpoint['epoch']))
else:
print ("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Prepare Dataloader
print ('cutout:', args.cutout)
if args.cutout:
train_loader, val_loader = get_datasets_cutout_ddp(args)
else:
train_loader, val_loader = get_datasets_ddp(args)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().to(device)
if args.half:
model.half()
criterion.half()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Double the training epochs since each iteration will consume two batches of data for calculating g and g_s
args.epochs = args.epochs * 2
if args.schedule == 'step':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(args.epochs * 0.5), int(args.epochs * 0.75)], last_epoch=args.start_epoch - 1)
elif args.schedule == 'cosine':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
if args.evaluate:
validate(val_loader, model, criterion)
return
is_best = 0
print ('Start training: ', args.start_epoch, '->', args.epochs)
print ('gamma:', args.gamma)
print ('len(train_loader):', len(train_loader))
for epoch in range(args.start_epoch, args.epochs):
train_loader.sampler.set_epoch(epoch)
# train for one epoch
if args.local_rank == 0:
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(train_loader, model, criterion, optimizer, epoch)
lr_scheduler.step()
if epoch % 2 == 0: continue
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if args.local_rank == 0:
save_checkpoint({
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'model.th'))
if args.local_rank == 0:
print ('train loss: ', train_loss)
print ('train err: ', train_err)
print ('test loss: ', test_loss)
print ('test err: ', test_err)
print ('time: ', arr_time)
def train(train_loader, model, criterion, optimizer, epoch):
"""
Run one train epoch
"""
global train_loss, train_err, arr_time
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
total_loss, total_err = 0, 0
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.to(device)
input_var = input.to(device)
target_var = target
if args.half:
input_var = input_var.half()
if args.local_rank % 2 == 1:
weight = args.alpha * 2
with torch.no_grad():
noise = []
for mp in model.parameters():
if len(mp.shape) > 1:
sh = mp.shape
sh_mul = np.prod(sh[1:])
temp = mp.view(sh[0], -1).norm(dim=1, keepdim=True).repeat(1, sh_mul).view(mp.shape)
temp = torch.normal(0, args.gamma*temp).to(mp.data.device)
else:
temp = torch.empty_like(mp, device=mp.data.device)
temp.normal_(0, args.gamma*(mp.view(-1).norm().item() + 1e-16))
noise.append(temp)
mp.data.add_(noise[-1])
else:
weight = (1 - args.alpha) * 2
# compute output
output = model(input_var)
loss = criterion(output, target_var) * weight
optimizer.zero_grad()
loss.backward()
if args.local_rank % 2 == 1:
# going back to without theta
with torch.no_grad():
for mp, n in zip(model.parameters(), noise):
mp.data.sub_(n)
optimizer.step()
total_loss += loss.item() * input_var.shape[0] / weight
total_err += (output.max(dim=1)[1] != target_var).sum().item()
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (i % args.print_freq == 0 or i == len(train_loader) - 1):
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
if args.local_rank == 0:
print ('Total time for epoch [{0}] : {1:.3f}'.format(epoch, batch_time.sum))
tloss = total_loss / len(train_loader.dataset) * args.world_size
terr = total_err / len(train_loader.dataset) * args.world_size
train_loss.append(tloss)
train_err.append(terr)
print ('train loss | acc', tloss, 1 - terr)
if args.wandb:
wandb.log({"train loss": total_loss / len(train_loader.dataset)})
wandb.log({"train acc": 1 - total_err / len(train_loader.dataset)})
arr_time.append(batch_time.sum)
def validate(val_loader, model, criterion, add=True):
"""
Run evaluation
"""
global test_err, test_loss
total_loss = 0
total_err = 0
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
target = target.to(device)
input_var = input.to(device)
target_var = target.to(device)
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
output = output.float()
loss = loss.float()
total_loss += loss.item() * input_var.shape[0]
total_err += (output.max(dim=1)[1] != target_var).sum().item()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and add:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
if add:
print(' * Prec@1 {top1.avg:.3f}'
.format(top1=top1))
test_loss.append(total_loss / len(val_loader.dataset))
test_err.append(total_err / len(val_loader.dataset))
if args.wandb:
wandb.log({"test loss": total_loss / len(val_loader.dataset)})
wandb.log({"test acc": 1 - total_err / len(val_loader.dataset)})
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 16,489 | 34.310493 | 165 | py |
RWP | RWP-main/train_rwp_imagenet.py | import argparse
import os
import random
import shutil
import time
import warnings
import os
import numpy as np
import pickle
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from utils import *
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--alpha', default=0.5, type=float,
metavar='AA', help='alpha for mixing gradients')
parser.add_argument('--gamma', default=0.01, type=float,
metavar='GAMMA', help='gamma for noise')
parser.add_argument('-p', '--print-freq', default=1000, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=42, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models',
default='save_temp', type=str)
parser.add_argument('--log-dir', dest='log_dir',
help='The directory used to save the log',
default='save_temp', type=str)
parser.add_argument('--log-name', dest='log_name',
help='The log file name',
default='log', type=str)
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
param_vec = []
# Record training statistics
train_loss = []
train_acc = []
test_loss = []
test_acc = []
arr_time = []
def get_model_grad_vec(model):
# Return the model gradient as a vector
vec = []
for name,param in model.named_parameters():
vec.append(param.grad.detach().reshape(-1))
return torch.cat(vec, 0)
def update_grad(model, grad_vec):
idx = 0
for name,param in model.named_parameters():
arr_shape = param.grad.shape
size = 1
for i in range(len(list(arr_shape))):
size *= arr_shape[i]
param.grad.data = grad_vec[idx:idx+size].reshape(arr_shape).clone()
idx += size
iters = 0
def get_model_param_vec(model):
# Return the model parameters as a vector
vec = []
for name,param in model.named_parameters():
vec.append(param.detach().cpu().reshape(-1).numpy())
return np.concatenate(vec, 0)
def main():
global train_loss, train_acc, test_loss, test_acc, arr_time
args = parser.parse_args()
print ('gamma:', args.gamma)
save_dir = 'save_' + args.arch
if not os.path.exists(save_dir):
os.makedirs(save_dir)
args.save_dir = save_dir
# Check the log_dir exists or not
# if args.rank == 0:
print ('log dir:', args.log_dir)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
sys.stdout = Logger(os.path.join(args.log_dir, args.log_name))
print ('log dir:', args.log_dir)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
sample_idx = 0
def main_worker(gpu, ngpus_per_node, args):
global train_loss, train_acc, test_loss, test_acc, arr_time
global best_acc1, param_vec, sample_idx
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# Double the training epochs since each iteration will consume two batches of data for calculating g and g_s
args.epochs = args.epochs * 2
args.batch_size = args.batch_size * 2
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
torch.save(model.state_dict(), 'save_' + args.arch + '/' + str(sample_idx)+'.pt')
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node)
lr_scheduler.step()
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
torch.save(model, os.path.join(args.save_dir, 'model.pt'))
print ('train loss: ', train_loss)
print ('train acc: ', train_acc)
print ('test loss: ', test_loss)
print ('test acc: ', test_acc)
print ('time: ', arr_time)
def train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node):
global iters, param_vec, sample_idx
global train_loss, train_acc, test_loss, test_acc, arr_time
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
epoch_start = end
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
if args.rank % 2 == 1:
weight = args.alpha * 2
##################### grw #############################
noise = []
for mp in model.parameters():
if len(mp.shape) > 1:
sh = mp.shape
sh_mul = np.prod(sh[1:])
temp = mp.view(sh[0], -1).norm(dim=1, keepdim=True).repeat(1, sh_mul).view(mp.shape)
temp = torch.normal(0, args.gamma*temp).to(mp.data.device)
else:
temp = torch.empty_like(mp, device=mp.data.device)
temp.normal_(0, args.gamma*(mp.view(-1).norm().item() + 1e-16))
noise.append(temp)
mp.data.add_(noise[-1])
else:
weight = (1 - args.alpha) * 2
# compute output
output = model(images)
loss = criterion(output, target) * weight
optimizer.zero_grad()
loss.backward()
if args.rank % 2 == 1:
# going back to without theta
with torch.no_grad():
for mp, n in zip(model.parameters(), noise):
mp.data.sub_(n)
optimizer.step()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item() / weight, images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
if i % args.print_freq == 0:
progress.display(i)
if i > 0 and i % 1000 == 0 and i < 5000:
sample_idx += 1
# torch.save(model.state_dict(), 'save_' + args.arch + '/'+str(sample_idx)+'.pt')
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
sample_idx += 1
# torch.save(model.state_dict(), 'save_' + args.arch + '/'+str(sample_idx)+'.pt')
arr_time.append(time.time() - epoch_start)
train_loss.append(losses.avg)
train_acc.append(top1.avg)
def validate(val_loader, model, criterion, args):
global train_loss, train_acc, test_loss, test_acc, arr_time
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
test_acc.append(top1.avg)
test_loss.append(losses.avg)
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main() | 21,710 | 36.890052 | 118 | py |
RWP | RWP-main/models/resnet.py | """resnet in pytorch
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385v1
"""
import torch
import torch.nn as nn
class BasicBlock(nn.Module):
"""Basic Block for resnet 18 and resnet 34
"""
#BasicBlock and BottleNeck block
#have different output size
#we use class attribute expansion
#to distinct
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
#residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
#shortcut
self.shortcut = nn.Sequential()
#the shortcut output dimension is not the same with residual function
#use 1*1 convolution to match the dimension
if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class BottleNeck(nn.Module):
"""Residual block for resnet over 50 layers
"""
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, stride=stride, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BottleNeck.expansion, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion),
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BottleNeck.expansion, stride=stride, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class ResNet(nn.Module):
def __init__(self, block, num_block, num_classes=100):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
#we use a different inputsize than the original paper
#so conv2_x's stride is 1
self.conv2_x = self._make_layer(block, 64, num_block[0], 1)
self.conv3_x = self._make_layer(block, 128, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512, num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, out_channels, num_blocks, stride):
"""make resnet layers(by layer i didnt mean this 'layer' was the
same as a neuron netowork layer, ex. conv layer), one layer may
contain more than one residual block
Args:
block: block type, basic block or bottle neck block
out_channels: output depth channel number of this layer
num_blocks: how many blocks per layer
stride: the stride of the first block of this layer
Return:
return a resnet layer
"""
# we have num_block blocks per layer, the first block
# could be 1 or 2, other blocks would always be 1
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
output = self.conv1(x)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.view(output.size(0), -1)
output = self.fc(output)
return output
class resnet18:
base = ResNet
args = list()
kwargs = {'block': BasicBlock, 'num_block': [2, 2, 2, 2]}
# def resnet18():
# """ return a ResNet 18 object
# """
# kwargs = {}
# return ResNet(BasicBlock, [2, 2, 2, 2])
def resnet34():
""" return a ResNet 34 object
"""
return ResNet(BasicBlock, [3, 4, 6, 3])
def resnet50():
""" return a ResNet 50 object
"""
return ResNet(BottleNeck, [3, 4, 6, 3])
def resnet101():
""" return a ResNet 101 object
"""
return ResNet(BottleNeck, [3, 4, 23, 3])
def resnet152():
""" return a ResNet 152 object
"""
return ResNet(BottleNeck, [3, 8, 36, 3])
| 5,620 | 32.064706 | 118 | py |
RWP | RWP-main/models/vgg.py | """
VGG model definition
ported from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
"""
import math
import torch.nn as nn
import torchvision.transforms as transforms
__all__ = ['VGG16', 'VGG16BN', 'VGG19', 'VGG19BN']
def make_layers(cfg, batch_norm=False):
layers = list()
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, num_classes=10, depth=16, batch_norm=False):
super(VGG, self).__init__()
self.features = make_layers(cfg[depth], batch_norm)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Base:
base = VGG
args = list()
kwargs = dict()
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
class VGG16(Base):
pass
class VGG16BN(Base):
kwargs = {'batch_norm': True}
class VGG19(Base):
kwargs = {'depth': 19}
class VGG19BN(Base):
kwargs = {'depth': 19, 'batch_norm': True} | 2,502 | 25.913978 | 97 | py |
RWP | RWP-main/models/wide_resnet.py | """
WideResNet model definition
ported from https://github.com/meliketoy/wide-resnet.pytorch/blob/master/networks/wide_resnet.py
"""
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
__all__ = ['WideResNet28x10', 'WideResNet16x8']
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicUnit(nn.Module):
def __init__(self, channels: int, dropout: float):
super(BasicUnit, self).__init__()
self.block = nn.Sequential(OrderedDict([
("0_normalization", nn.BatchNorm2d(channels)),
("1_activation", nn.ReLU(inplace=True)),
("2_convolution", nn.Conv2d(channels, channels, (3, 3), stride=1, padding=1, bias=False)),
("3_normalization", nn.BatchNorm2d(channels)),
("4_activation", nn.ReLU(inplace=True)),
("5_dropout", nn.Dropout(dropout, inplace=True)),
("6_convolution", nn.Conv2d(channels, channels, (3, 3), stride=1, padding=1, bias=False)),
]))
def forward(self, x):
return x + self.block(x)
class DownsampleUnit(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int, dropout: float):
super(DownsampleUnit, self).__init__()
self.norm_act = nn.Sequential(OrderedDict([
("0_normalization", nn.BatchNorm2d(in_channels)),
("1_activation", nn.ReLU(inplace=True)),
]))
self.block = nn.Sequential(OrderedDict([
("0_convolution", nn.Conv2d(in_channels, out_channels, (3, 3), stride=stride, padding=1, bias=False)),
("1_normalization", nn.BatchNorm2d(out_channels)),
("2_activation", nn.ReLU(inplace=True)),
("3_dropout", nn.Dropout(dropout, inplace=True)),
("4_convolution", nn.Conv2d(out_channels, out_channels, (3, 3), stride=1, padding=1, bias=False)),
]))
self.downsample = nn.Conv2d(in_channels, out_channels, (1, 1), stride=stride, padding=0, bias=False)
def forward(self, x):
x = self.norm_act(x)
return self.block(x) + self.downsample(x)
class Block(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int, depth: int, dropout: float):
super(Block, self).__init__()
self.block = nn.Sequential(
DownsampleUnit(in_channels, out_channels, stride, dropout),
*(BasicUnit(out_channels, dropout) for _ in range(depth))
)
def forward(self, x):
return self.block(x)
class WideResNet(nn.Module):
def __init__(self, depth: int, width_factor: int, dropout: float, in_channels: int, num_classes: int):
super(WideResNet, self).__init__()
self.filters = [16, 1 * 16 * width_factor, 2 * 16 * width_factor, 4 * 16 * width_factor]
self.block_depth = (depth - 4) // (3 * 2)
self.f = nn.Sequential(OrderedDict([
("0_convolution", nn.Conv2d(in_channels, self.filters[0], (3, 3), stride=1, padding=1, bias=False)),
("1_block", Block(self.filters[0], self.filters[1], 1, self.block_depth, dropout)),
("2_block", Block(self.filters[1], self.filters[2], 2, self.block_depth, dropout)),
("3_block", Block(self.filters[2], self.filters[3], 2, self.block_depth, dropout)),
("4_normalization", nn.BatchNorm2d(self.filters[3])),
("5_activation", nn.ReLU(inplace=True)),
("6_pooling", nn.AvgPool2d(kernel_size=8)),
("7_flattening", nn.Flatten()),
("8_classification", nn.Linear(in_features=self.filters[3], out_features=num_classes)),
]))
self._initialize()
def _initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, mode="fan_in", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.zero_()
m.bias.data.zero_()
def forward(self, x):
return self.f(x)
class WideResNet28x10:
base = WideResNet
args = list()
kwargs = {'depth': 28, 'width_factor': 10, 'dropout': 0, 'in_channels': 3}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class WideResNet16x8:
base = WideResNet
args = list()
kwargs = {'depth': 16, 'width_factor': 8, 'dropout': 0, 'in_channels': 3}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]) | 5,426 | 38.904412 | 114 | py |
RandomNeuralField | RandomNeuralField-main/train.py | import sys
from pathlib import Path
import numpy as np
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader, TensorDataset
from torchvision import datasets, transforms
from src.utils.factory import read_yaml
from src.models.networks import read_model
from src.utils.factory import calc_acc
def create_loader(phase):
bs = 4096
transform = transforms.Compose(
[transforms.ToTensor()]
)
dataset = datasets.MNIST(
root='data',
train=True if phase == 'train' else False,
download=True, transform=transform
)
dataloader = DataLoader(dataset)
X, y = [], []
for img, label in dataloader:
label_list = [-0.1 for _ in range(10)]
img = img.numpy()
label_list[label] = 0.9
X.append(img / np.linalg.norm(img))
y.append(label_list)
X, y = np.array(X).squeeze(axis=1), np.array(y, dtype='float32')
if phase == 'train':
train_id, val_id = train_test_split(
np.arange(50000),
test_size=0.2,
random_state=47
)
X_train, X_val = X[train_id], X[val_id]
y_train, y_val = y[train_id], y[val_id]
X_train, X_val = torch.tensor(X_train), torch.tensor(X_val)
y_train, y_val = torch.tensor(y_train), torch.tensor(y_val)
train_tensor = TensorDataset(X_train, y_train)
val_tensor = TensorDataset(X_val, y_val)
train_loader = DataLoader(train_tensor, batch_size=bs)
val_loader = DataLoader(val_tensor, batch_size=bs)
return train_loader, val_loader
elif phase == 'test':
X_test, y_test = torch.tensor(X), torch.tensor(y)
test_tensor = TensorDataset(X_test, y_test)
return DataLoader(test_tensor, batch_size=64)
else:
NotImplementedError
def train_one_epoch(cfg, net, train_loader, optimizer, criterion):
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
running_loss, running_acc = 0., 0.
for i, (imgs, labels) in enumerate(train_loader):
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
optimizer.zero_grad()
outputs = net(imgs)
loss = criterion(outputs, labels) / 2
acc = calc_acc(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
running_acc += acc
return running_loss / (i+1), running_acc / (i+1)
def train(cfg, net, lr, train_loader, val_loader):
n_epochs = cfg.GENERAL.EPOCH
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
init_name = cfg.INITIALIZER.TYPE
# define the loss function and optimizer
criterion = nn.MSELoss(reduction='mean')
optimizer = optim.SGD(net.parameters(), lr)
best_val_loss = 1e10
keys = ['train/loss', 'train/acc', 'val/loss', 'val/acc']
for epoch in range(n_epochs):
net.train()
avg_train_loss, avg_train_acc = train_one_epoch(
cfg, net, train_loader, optimizer, criterion
)
net.eval()
with torch.no_grad():
running_vloss, running_vacc = 0.0, 0.0
for i, (imgs, labels) in enumerate(val_loader):
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
outputs = net(imgs)
val_loss = criterion(outputs, labels) / 2
val_acc = calc_acc(outputs, labels)
running_vloss += val_loss.item()
running_vacc += val_acc
avg_val_loss = running_vloss / (i+1)
avg_val_acc = running_vacc / (i+1)
vals = [avg_train_loss, avg_train_acc, avg_val_loss, avg_val_acc]
file_name = Path('output') / f'{init_name}_result.csv'
x = {k: v for k, v in zip(keys, vals)}
n_cols = len(x) + 1
header = '' if file_name.exists() else (('%20s,' * n_cols % tuple(['epoch'] + keys)).rstrip(',') + '\n')
with open(file_name, 'a') as f:
f.write(header + ('%20.5g,' * n_cols % tuple([epoch] + vals)).rstrip(',') + '\n')
if (epoch + 1) % 1000 == 0:
print(
'Epoch[{}/{}], TrainLoss: {:.5f}, ValLoss: {:.5f}, ValAcc: {:.5f}'
.format(epoch+1, n_epochs, vals[0], vals[2], vals[3])
)
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
torch.save(net.state_dict(), f'pretrained/{init_name}_best.pth')
def main():
cfg = read_yaml(fpath='src/config/config.yaml')
cfg.GENERAL.EPOCH = 50000
train_loader, val_loader = create_loader(phase='train')
# init_types = ['vanilla', 'gaussian', 'withmp', 'mexican', 'matern']
init_types = ['withmp']
for it in init_types:
if it == 'gaussian':
cfg.INITIALIZER.R_SIGMA = 0.5
cfg.INITIALIZER.S_SIGMA = 0.01
elif it == 'withmp':
cfg.INITIALIZER.R_SIGMA = 0.5
cfg.INITIALIZER.S_SIGMA = 0.01
elif it == 'mexican':
cfg.INITIALIZER.M_SIGMA = 0.01
cfg.INITIALIZER.S_SIGMA = 0.01
elif it == 'matern':
cfg.INITIALIZER.R_SIGMA = 0.5
cfg.INITIALIZER.S_SIGMA = 0.01
cfg.INITIALIZER.TYPE = it
net = read_model(cfg)
train(cfg, net, 0.5, train_loader, val_loader)
if __name__ == '__main__':
main() | 5,899 | 30.382979 | 112 | py |
RandomNeuralField | RandomNeuralField-main/src/tools/relative_frob.py | import sys
from os.path import join, dirname
import torch
import torch.nn as nn
from torch import optim
sys.path.append(join(dirname(__file__), "../.."))
from src.ntk.generate import generate_ntk
from src.utils.factory import calc_diff_frob, calc_acc
def calc_ntk_frob(cfg, net, lr, train_loader, test_loader):
n_epochs = cfg.GENERAL.EPOCH
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
# define the loss function and optimizer
criterion = nn.MSELoss(reduction='mean')
optimizer = optim.SGD(net.parameters(), lr)
for epoch in range(n_epochs):
train_loss = 0
net.train()
for imgs, labels in train_loader:
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
optimizer.zero_grad()
if epoch == 0:
ntk_0, _ = generate_ntk(net, 0, imgs, imgs, cfg, calc_lr=True)
outputs = net(imgs)
train_loss = criterion(outputs, labels) / 2
train_acc = calc_acc(outputs, labels)
train_loss.backward()
optimizer.step()
if epoch == n_epochs - 1:
ntk_t, _ = generate_ntk(net, 0, imgs, imgs, cfg, calc_lr=True)
net.eval()
with torch.no_grad():
for imgs, labels in test_loader:
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
outputs = net(imgs)
test_loss = criterion(outputs, labels) / 2
outputs = outputs.cpu().detach().numpy()
test_acc = calc_acc(outputs, labels)
ntk_diff_frob = calc_diff_frob(ntk_0, ntk_t)
return ntk_diff_frob, train_acc, test_acc | 1,949 | 31.5 | 78 | py |
RandomNeuralField | RandomNeuralField-main/src/tools/train.py | import sys
from os.path import join, dirname
import torch
import torch.nn as nn
from torch import optim
sys.path.append(join(dirname(__file__), "../.."))
from src.utils.factory import calc_acc
def train(cfg, net, lr, database):
n_epochs = cfg.GENERAL.EPOCH
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
# define the loss function and optimizer
criterion = nn.MSELoss(reduction='mean')
optimizer = optim.SGD(net.parameters(), lr)
train_loader, test_loader = database.get_loader()
results = {
'train_losses': [], 'test_losses': [],
'train_accs': [], 'test_accs': [],
'train_outputs': [], 'test_outputs': []
}
for epoch in range(n_epochs):
train_loss, test_loss = 0, 0
net.train()
for imgs, labels in train_loader:
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
optimizer.zero_grad()
outputs = net(imgs)
train_loss = criterion(outputs, labels) / 2
outputs = outputs.cpu().detach().numpy()
train_acc = calc_acc(outputs, labels)
train_loss.backward()
optimizer.step()
if epoch == 0:
f_train_0 = outputs
results['train_losses'].append(train_loss.cpu().detach().numpy())
results['train_accs'].append(train_acc)
results['train_outputs'].append(outputs)
net.eval()
with torch.no_grad():
for imgs, labels in test_loader:
imgs = imgs.view(-1, input_shape).to(f'cuda:{device_id[0]}')
labels = labels.float().to(f'cuda:{device_id[0]}')
outputs = net(imgs)
test_loss = criterion(outputs, labels) / 2
outputs = outputs.cpu().detach().numpy()
test_acc = calc_acc(outputs, labels)
if epoch == 0:
f_test_0 = outputs
results['test_losses'].append(test_loss.cpu().detach().numpy())
results['test_accs'].append(test_acc)
results['test_outputs'].append(outputs)
print('Epoch[{}/{}], TrainLoss: {:.4f}, TestLoss: {:.4f}, TestAcc: {:.4f}'
.format(epoch+1, n_epochs, train_loss, test_loss, test_acc))
return f_train_0, f_test_0, results | 2,554 | 32.181818 | 82 | py |
RandomNeuralField | RandomNeuralField-main/src/dataset/dataset.py | import numpy as np
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import DataLoader, TensorDataset
from torchvision import datasets, transforms
class MakeDataset:
def __init__(self, cfg):
self.cfg = cfg
self.dataset_name = cfg.DATA.NAME
self.n_class = cfg.DATA.CLASS
self.data_num = cfg.DATA.DATA_NUM
self.test_ratio = cfg.DATA.SPLIT_RATIO
self.dataloader = self.loader_setup()
def loader_setup(self):
transform = transforms.Compose(
[transforms.ToTensor()]
)
if self.dataset_name == 'mnist':
dataset = datasets.MNIST(
root='../data', train=True, download=True, transform=transform
)
elif self.dataset_name == 'fashion':
dataset = datasets.FashionMNIST(
root='../data', train=True, download=True, transform=transform
)
else:
NotImplementedError
dataloader = DataLoader(dataset)
return dataloader
def get_array(self):
X, y = [], []
for i, (img, label) in enumerate(self.dataloader):
label_list = [-0.1 for _ in range(self.n_class)]
img = img.numpy()
label_list[label] = 0.9
X.append(img / np.linalg.norm(img))
y.append(label_list)
if i == self.data_num - 1:
break
X, y = np.array(X).squeeze(axis=1), np.array(y, dtype='float32')
train_id, test_id = train_test_split(
np.arange(self.data_num),
test_size=self.test_ratio,
random_state=47
)
X_train, X_test = X[train_id], X[test_id]
y_train, y_test = y[train_id], y[test_id]
return X_train, X_test, y_train, y_test
def get_tensor(self):
X_train, X_test, y_train, y_test = self.get_array()
X_train, X_test = torch.tensor(X_train), torch.tensor(X_test)
y_train, y_test = torch.tensor(y_train), torch.tensor(y_test)
return X_train, X_test, y_train, y_test
def get_loader(self):
X_train, X_test, y_train, y_test = self.get_tensor()
train_tensor = TensorDataset(X_train, y_train)
test_tensor = TensorDataset(X_test, y_test)
train_loader = DataLoader(train_tensor, batch_size=self.data_num)
test_loader = DataLoader(test_tensor, batch_size=self.data_num)
return train_loader, test_loader | 2,678 | 30.151163 | 78 | py |
RandomNeuralField | RandomNeuralField-main/src/ntk/generate.py | from tqdm.auto import tqdm
import numpy as np
import torch
from torch.autograd import grad
def generate_ntk(net, label, train, test, cfg, calc_lr=False):
input_shape = cfg.MODEL.INPUT_FEATURES
device_id = cfg.GENERAL.GPUS
if len(train.size()) > 2:
train = train.view(-1, input_shape)
test = test.view(-1, input_shape)
if torch.cuda.is_available():
train = train.to(f'cuda:{device_id[0]}')
test = test.to(f'cuda:{device_id[0]}')
f_train = net(train)
train_grads = []
for i in range(len(f_train)):
train_grads.append(
grad(f_train[i][label], net.parameters(), retain_graph=True)
)
K_train = torch.zeros((len(f_train), len(f_train)))
for i in tqdm(range(len(f_train))):
grad_i = train_grads[i]
for j in range(i+1):
grad_j = train_grads[j]
K_train[i, j] = sum([torch.sum(
torch.mul(grad_i[k], grad_j[k])
) for k in range(len(grad_j))])
K_train = K_train.cpu().numpy()
K_train = K_train + K_train.T - np.diag(K_train.diagonal())
if calc_lr:
NTK_train = np.kron(K_train, np.eye(cfg.DATA.CLASS))
vals = np.linalg.eigvalsh(NTK_train)
lr = 2 / (max(vals) + 1e-12)
return NTK_train, lr
else:
f_test = net(test)
K_test = torch.zeros((len(f_test), len(f_train)))
test_grads = []
for i in tqdm(range(len(f_test))):
test_grads.append(
grad(f_test[i][label], net.parameters(), retain_graph=True)
)
for j, train_grad in enumerate(train_grads):
for k, test_grad in enumerate(test_grads):
K_test[k, j] = sum([torch.sum(
torch.mul(train_grad[u], test_grad[u])
) for u in range(len(test_grad))])
K_test = K_test.cpu().numpy()
return K_train, K_test | 1,997 | 31.754098 | 75 | py |
RandomNeuralField | RandomNeuralField-main/src/models/initializers.py | import sys
from os.path import join, dirname
import numpy as np
import torch.nn as nn
from torch import Tensor
sys.path.append(join(dirname(__file__), "../.."))
from src.models.utils import sym_mat, receptive_mat, weight_correlation, matern_kernel
class Initializers(nn.Module):
def __init__(self, cfg):
self.type = cfg.INITIALIZER.TYPE
self.r_sigma = cfg.INITIALIZER.R_SIGMA
self.s_sigma = cfg.INITIALIZER.S_SIGMA
self.m_sigma = cfg.INITIALIZER.M_SIGMA
self.nu = cfg.INITIALIZER.NU
def get_initializer(self, in_features, out_features):
if self.type == 'gaussian' or self.type == 'withmp':
init_weight = self.get_gaussian_type(in_features, out_features)
elif self.type == 'mexican':
init_weight = self.get_mexican_type(in_features, out_features)
elif self.type == 'matern':
init_weight = self.get_matern_type(in_features, out_features)
else:
NotImplementedError
return init_weight
def get_gaussian_type(self, in_features, out_features):
if in_features != out_features:
R = np.sqrt(
np.exp(
- receptive_mat(in_features,
out_features,
self.r_sigma)
)
)
elif in_features == out_features:
R = np.sqrt(
np.exp(
- sym_mat(
in_features
) / (in_features*self.r_sigma)**2
)
)
weight_correlated = weight_correlation(
in_features, out_features, self.s_sigma
)
init_weight = R * weight_correlated
return Tensor(init_weight)
def get_mexican_type(self, in_features, out_features):
coef = 2 / (np.sqrt(3*self.m_sigma) * pow(np.pi, 1/4))
if in_features != out_features:
mh = receptive_mat(in_features, out_features, self.m_sigma)
elif in_features == out_features:
mh = sym_mat(in_features) / self.m_sigma**2
M = coef * (np.ones((out_features, in_features)) - mh) * np.sqrt(np.exp(-mh))
weight_correlated = weight_correlation(
in_features, out_features, self.s_sigma
)
init_weight = M * weight_correlated
return Tensor(init_weight)
def get_matern_type(self, in_features, out_features):
if in_features != out_features:
R = np.sqrt(
np.exp(
- receptive_mat(in_features,
out_features,
self.r_sigma)
)
)
elif in_features == out_features:
R = np.sqrt(
np.exp(
- sym_mat(
in_features
) / (2 * (in_features * self.r_sigma)**2)
)
)
init_mk = matern_kernel(
in_features, out_features, self.s_sigma, self.nu
)
init_weight = R * init_mk
return Tensor(init_weight) | 3,358 | 31.931373 | 86 | py |
RandomNeuralField | RandomNeuralField-main/src/models/networks.py | import sys
from os.path import join, dirname
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.append(join(dirname(__file__), "../.."))
from src.models.initializers import Initializers
def read_model(cfg):
init_type = cfg.INITIALIZER.TYPE
device_id = cfg.GENERAL.GPUS
# define the type of network
if init_type == 'vanilla':
net = VanillaNet(cfg)
else:
net = Networks(cfg)
if torch.cuda.is_available():
net.to(f'cuda:{device_id[0]}')
return net
class LinearNTK(nn.Linear):
def __init__(self, in_features, out_features, b_sig, w_sig=2, bias=True):
super(LinearNTK, self).__init__(in_features, out_features)
self.reset_parameters()
self.b_sig = b_sig
self.w_sig = w_sig
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=1)
if self.bias is not None:
nn.init.normal_(self.bias, mean=0, std=1)
def forward(self, input):
return F.linear(input,
self.w_sig * self.weight / np.sqrt(self.in_features),
self.b_sig * self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}, b_sig={}'.format(
self.in_features, self.out_features, self.bias is not None, self.b_sig
)
class VanillaNet(nn.Module):
def __init__(self, cfg):
super(VanillaNet, self).__init__()
self.visualize = cfg.MODEL.VISUALIZE
in_features = cfg.MODEL.INPUT_FEATURES
mid_features = cfg.MODEL.MID_FEATURES
out_features = cfg.DATA.CLASS
b_sig = cfg.MODEL.B_SIGMA
self.l1 = LinearNTK(in_features, mid_features, b_sig)
self.l2 = LinearNTK(mid_features, mid_features, b_sig)
self.l3 = LinearNTK(mid_features, out_features, b_sig)
def forward(self, x):
if self.visualize:
return self.l1(x)
else:
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
h3 = self.l3(h2)
return h3
class Networks(nn.Module):
def __init__(self, cfg):
super(Networks, self).__init__()
self.init_type = cfg.INITIALIZER.TYPE
self.mid_features = cfg.MODEL.MID_FEATURES
self.visualize = cfg.MODEL.VISUALIZE
in_features = cfg.MODEL.INPUT_FEATURES
out_features = cfg.DATA.CLASS
b_sig = cfg.MODEL.B_SIGMA
init_weight = Initializers(cfg).get_initializer(in_features, self.mid_features)
self.l1 = LinearNTK(in_features, self.mid_features, b_sig)
self.l1.weight.data = init_weight
if self.init_type == 'withmp':
self.l2 = nn.MaxPool1d(kernel_size=3, stride=1, padding=1)
else:
self.l2 = LinearNTK(self.mid_features, self.mid_features, b_sig)
self.l3 = LinearNTK(self.mid_features, out_features, b_sig)
def forward(self, x):
if self.visualize:
return self.l1(x)
else:
h1 = F.relu(self.l1(x))
if self.init_type == 'withmp':
h2 = self.l2(h1)
h3 = self.l3(h2)
else:
h2 = F.relu(self.l2(h1))
h3 = self.l3(h2)
return h3 | 3,468 | 28.905172 | 87 | py |
DivMF | DivMF-master/src/main.py | '''
Top-K Diversity Regularizer
This software may be used only for research evaluation purposes.
For other purposes (e.g., commercial), please contact the authors.
'''
import time
import math
import click
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from model import BPR
from utils import *
# Slice the given list into chunks of size n.
def list_chunk(lst, n):
return [lst[i:i+n] for i in range(0, len(lst), n)]
@click.command()
@click.option('--data', type=str, default='ml-1m', help='Select Dataset')
@click.option('--seed', type=int, default=0, help='Set random seed')
@click.option('--reg', type=bool, default=True, help='Use TDR if True')
@click.option('--unmask', type=bool, default=False, help='Use unmask scheme if True')
@click.option('--ut', type=int, default=0, help='Number of unmasking top items')
@click.option('--ur', type=int, default=0, help='Number of unmasking random items')
@click.option('--ep', type=int, default=200, help='Number of total epoch')
@click.option('--reclen', type=int, default=30, help='Number of epoch with reccommendation loss')
@click.option('--dim', type=int, default=32, help='Number of latent factors')
@click.option('--cpu', type=bool, default=False, help='Use CPU while TDR')
@click.option('--dut', type=float, default=0, help='Change on the number of unmasking top items per epoch')
@click.option('--dur', type=float, default=0, help='Change on the number of unmasking random items per epoch')
@click.option('--rbs', type=int, default=0, help='Number of rows in mini batch')
@click.option('--cbs', type=int, default=0, help='Number of columns in mini batch')
def main(data, seed, reg, unmask, ut, ur, ep, reclen, dim, cpu, dut, dur, rbs, cbs):
set_seed(seed)
device = DEVICE
# set hyperparameters
config = {
'lr': 1e-3,
'decay': 1e-4,
'latent_dim': dim,
'batch_size': 4096,
'epochs': ep,
'ks': [5, 10],
'trn_neg': 4,
'test_neg': 99
}
print(config)
torch.multiprocessing.set_sharing_strategy('file_system')
# load data
trn_path = f'../data/{data}/train'
vld_path = f'../data/{data}/validation'
test_path = f'../data/{data}/test'
train_data, test_data, user_num, item_num, train_mat = load_all(trn_path, test_path)
train_dataset = BPRData(
train_data, item_num, train_mat, config['trn_neg'], True)
test_dataset = BPRData(
test_data, item_num, train_mat, 0, False)
train_loader = DataLoader(train_dataset,
batch_size=config['batch_size'], shuffle=True,
num_workers=4)
test_loader = DataLoader(test_dataset,
batch_size=config['test_neg'] + 1,
shuffle=False, num_workers=0)
# define model and optimizer
model = BPR(user_num, item_num, config['latent_dim'])
model.to(device)
optimizer = optim.Adam(
model.parameters(), lr=config['lr'], weight_decay=config['decay'])
# show dataset stat
print('user:', user_num, ' item:', item_num, ' tr len:', len(train_data))
header = f'Epoch | '
for k in config['ks']:
header += f'Recall@{k:2d} NDCG@{k:2d} C@{k:2d} G@{k:2d} E@{k:2d} | '
header += f'Duration (sec)'
print(header)
# obtain items in training set and ground truth items from data
train_data = [[] for _ in range(user_num)]
gt = []
with open(test_path, 'r') as fd:
line = fd.readline()
while line != None and line != '':
arr = line.split('\t')
gt.append(eval(arr[0])[1])
line = fd.readline()
init_time = time.time()
# start model training
for epoch in range(1, config['epochs']+1):
model.train()
start_time = time.time()
train_loader.dataset.ng_sample()
if epoch == 1:
num_batch = 0
# train with recommendation loss
if epoch <= reclen:
for user, item_i, item_j in train_loader:
if epoch == 1:
for u, i in zip(user, item_i):
train_data[u].append(i)
user = user.to(device)
item_i = item_i.to(device)
item_j = item_j.to(device)
# recommendation loss
model.zero_grad()
prediction_i, prediction_j = model(user, item_i, item_j)
rec_loss = - (prediction_i - prediction_j).sigmoid().log().sum()
rec_loss.backward()
optimizer.step()
if epoch == 1:
num_batch += 1
# move model to cpu if option cpu is true
if epoch == reclen and cpu:
device = torch.device('cpu')
model = model.to(device)
optimizer_to(optimizer, device)
# train with diversity regularizer
if reg and epoch > reclen:
# top-k inference
k = config['ks'][1]
if rbs == 0:
row_batch_size = user_num
else:
row_batch_size = rbs
row_batch = list_chunk(torch.randperm(user_num).tolist(), row_batch_size)
if cbs == 0:
col_batch_size = item_num
else:
col_batch_size = cbs
col_batch = list_chunk(torch.randperm(item_num).tolist(), col_batch_size)
# calculate number of unmasking items for each mini batch
bk = math.ceil(k / len(col_batch))
bur = math.ceil(max(ur + int((epoch-reclen-1)*dur), 0) / len(col_batch))
but = math.ceil(max(ut + int((epoch-reclen-1)*dut), 0) / len(col_batch))
for rb in row_batch:
for cb in col_batch:
# inference top-k recommendation lists
model.zero_grad()
scores = []
items = torch.LongTensor(cb).to(device)
for u in rb:
u = torch.tensor([u]).to(device)
score, _ = model(u, items, items)
scores.append(score)
scores = torch.stack(scores)
scores = torch.softmax(scores, dim=1)
# unmasking mechanism
if unmask:
k_ = len(cb) - (bk+but)
else:
k_ = len(cb) - bk
mask_idx = torch.topk(-scores, k=k_)[1] # mask index for being filled 0
if unmask:
for u in range(len(rb)):
idx = torch.randperm(mask_idx.shape[1])
mask_idx[u] = mask_idx[u][idx]
if bur > 0:
mask_idx = mask_idx[:, :-bur]
mask = torch.zeros(size=scores.shape, dtype=torch.bool)
mask[torch.arange(mask.size(0)).unsqueeze(1), mask_idx] = True
topk_scores = scores.masked_fill(mask.to(device), 0)
# coverage regularizer
scores_sum = torch.sum(topk_scores, dim=0, keepdim=False)
epsilon = 0.01
scores_sum += epsilon
d_loss = -torch.sum(torch.log(scores_sum))
# skewness regularizer
topk_scores = torch.topk(scores, k=k)[0]
norm_scores = topk_scores / torch.sum(topk_scores, dim=1, keepdim=True)
e_loss = torch.sum(torch.sum(norm_scores * torch.log(norm_scores), dim=1))
# sum of losses
regularizations = d_loss + e_loss
regularizations.backward()
optimizer.step()
# evaluate metrics
model.eval()
HRs, NDCGs, coverages, Gs, Es = [], [], [], [], []
for k in config['ks']:
rec_list = make_rec_list(model, k, user_num, item_num, train_data, device)
HR, NDCG, coverage, giny, entropy = metrics_from_list(rec_list, item_num, gt)
HRs.append(HR)
NDCGs.append(NDCG)
coverages.append(coverage)
Gs.append(giny)
Es.append(entropy)
epoch_elapsed_time = time.time() - start_time
total_elapsed_time = time.time() - init_time
# print evaluated metrics to console
content = f'{epoch:6d} | '
for hr, ndcg, coverage, g, e in zip(HRs, NDCGs, coverages, Gs, Es):
content += f'{hr:.4f} {ndcg:.4f} {coverage:.4f} {g:.4f} {e:.4f} | '
content += f'{epoch_elapsed_time:.1f} {total_elapsed_time:.1f}'
print(content)
if __name__ == '__main__':
main()
| 8,840 | 37.947137 | 110 | py |
DivMF | DivMF-master/src/utils.py | '''
Top-K Diversity Regularizer
This software may be used only for research evaluation purposes.
For other purposes (e.g., commercial), please contact the authors.
'''
import random
import numpy as np
from numpy.core.numeric import indices
import pandas as pd
import scipy.sparse as sp
import torch
from torch._C import Value
import torch.utils.data as data
from tqdm import tqdm
CUDA = torch.cuda.is_available()
DEVICE = torch.device('cuda' if CUDA else 'cpu')
def set_seed(seed):
'''
Set pytorch random seed as seed.
'''
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if CUDA:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def hit(gt_item, pred_items):
'''
Check whether given recommendation list hits or not.
gt_item : ground truth item
pred_items : list of recommended items
'''
if gt_item in pred_items:
return 1
return 0
def ndcg(gt_item, pred_items):
'''
Calculate nDCG
gt_item : ground truth item
pred_items : list of recommended items
'''
if gt_item in pred_items:
index = pred_items.index(gt_item)
return np.reciprocal(np.log2(index+2))
return 0
def metrics_from_list(R, item_num, gt):
'''
Calculate all metrics from recommendation list
return average Hit Ratio, nDCG, coverage, gini index, entropy of R
R : list of recommendation lists
item_num : number of items in dataset
gt : list of ground truth items
'''
HR, NDCG = [], []
rec_items = []
cnt = [0 for i in range(item_num)]
for r, gt_item in zip(R, gt):
HR.append(hit(gt_item, r))
NDCG.append(ndcg(gt_item, r))
rec_items += r
for i in r:
cnt[i] += 1
coverage = len(set(rec_items))/item_num
giny = 0
cnt.sort()
height, area = 0, 0
for c in cnt:
height += c
area += height-c/2
fair_area = height*item_num/2
giny = (fair_area-area)/fair_area
a = torch.FloatTensor(cnt)
a/=sum(a)
entropy = torch.distributions.Categorical(probs=a).entropy()
return np.mean(HR), np.mean(NDCG), coverage, giny, entropy
def make_rec_list(model, top_k, user_num, item_num, train_data, device=DEVICE):
'''
Build recommendation lists from the model
model : recommendation model
top_k : length of a recommendation list
user_num : number of users in dataset
item_num : number of items in dataset
train_data : lists of items that a user interacted in training dataset
device : device where the model mounted on
'''
rtn = []
for u in range(user_num):
items = torch.tensor(list(set(range(item_num))-set(train_data[u]))).to(device)
u = torch.tensor([u]).to(device)
score, _ = model(u, items, items)
_, indices = torch.topk(score, top_k)
recommends = torch.take(items, indices).cpu().numpy().tolist()
rtn.append(recommends)
return rtn
def load_all(trn_path, test_neg, test_num=100):
""" We load all the three file here to save time in each epoch. """
'''
Load dataset from given path
trn_path : path of training dataset
test_neg : path of test dataset
'''
train_data = pd.read_csv(
trn_path,
sep='\t', header=None, names=['user', 'item'],
usecols=[0, 1], dtype={0: np.int32, 1: np.int32})
user_num = train_data['user'].max() + 1
item_num = train_data['item'].max() + 1
train_data = train_data.values.tolist()
# load ratings as a dok matrix
train_mat = sp.dok_matrix((user_num, item_num), dtype=np.float32)
for x in train_data:
train_mat[x[0], x[1]] = 1.0
test_data = []
with open(test_neg, 'r') as fd:
line = fd.readline()
while line != None and line != '':
arr = line.split('\t')
u = eval(arr[0])[0]
test_data.append([u, eval(arr[0])[1]])
for i in arr[1:]:
test_data.append([u, int(i)])
line = fd.readline()
return train_data, test_data, user_num, item_num, train_mat
class BPRData(data.Dataset):
def __init__(self, features,
num_item, train_mat=None, num_ng=0, is_training=None):
super(BPRData, self).__init__()
""" Note that the labels are only useful when training, we thus
add them in the ng_sample() function.
features : data
num_item : number of items
train_mat : interaction matrix
num_ng : number of negative samples
is_training : is model training
"""
self.features = features
self.num_item = num_item
self.train_mat = train_mat
self.num_ng = num_ng
self.is_training = is_training
def ng_sample(self):
'''
Sample negative items for BPR
'''
assert self.is_training, 'no need to sampling when testing'
self.features_fill = []
for x in self.features:
u, i = x[0], x[1]
for t in range(self.num_ng):
j = np.random.randint(self.num_item)
while (u, j) in self.train_mat:
j = np.random.randint(self.num_item)
self.features_fill.append([u, i, j])
def __len__(self):
'''
Number of instances.
'''
return self.num_ng * len(self.features) if \
self.is_training else len(self.features)
def __getitem__(self, idx):
'''
Grab an instance.
'''
features = self.features_fill if \
self.is_training else self.features
user = features[idx][0]
item_i = features[idx][1]
item_j = features[idx][2] if \
self.is_training else features[idx][1]
return user, item_i, item_j
def optimizer_to(optim, device):
'''
Move optimizer to target device
optim : optimizer
device : target device
'''
for param in optim.state.values():
# Not sure there are any global tensors in the state dict
if isinstance(param, torch.Tensor):
param.data = param.data.to(device)
if param._grad is not None:
param._grad.data = param._grad.data.to(device)
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor):
subparam.data = subparam.data.to(device)
if subparam._grad is not None:
subparam._grad.data = subparam._grad.data.to(device) | 6,732 | 30.316279 | 86 | py |
DivMF | DivMF-master/src/model.py | '''
Top-K Diversity Regularizer
This software may be used only for research evaluation purposes.
For other purposes (e.g., commercial), please contact the authors.
'''
import torch.nn as nn
class BPR(nn.Module):
def __init__(self, user_num, item_num, factor_num):
super(BPR, self).__init__()
"""
user_num: number of users;
item_num: number of items;
factor_num: number of predictive factors.
"""
self.embed_user = nn.Embedding(user_num, factor_num)
self.embed_item = nn.Embedding(item_num, factor_num)
nn.init.normal_(self.embed_user.weight, std=0.01)
nn.init.normal_(self.embed_item.weight, std=0.01)
def forward(self, user, item_i, item_j):
'''
Calculate prediction scores of a user for two item lists.
'''
user = self.embed_user(user)
item_i = self.embed_item(item_i)
item_j = self.embed_item(item_j)
prediction_i = (user * item_i).sum(dim=-1)
prediction_j = (user * item_j).sum(dim=-1)
return prediction_i, prediction_j
| 972 | 25.297297 | 66 | py |
3SD | 3SD-main/data_loader.py | # data loader
from __future__ import print_function, division
import glob
import torch
from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
#==========================dataset load==========================
class RescaleT(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# img = transform.resize(image,(new_h,new_w),mode='constant')
# lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(self.output_size,self.output_size),mode='constant')
lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl}
class Rescale(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
img = transform.resize(image,(new_h,new_w),mode='constant')
lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl}
class RandomCrop(object):
def __init__(self,output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
label = label[top: top + new_h, left: left + new_w]
return {'imidx':imidx,'image':image, 'label':label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
tmpLbl = np.zeros(label.shape)
image = image/np.max(image)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self,flag=0):
self.flag = flag
def __call__(self, sample):
imidx, image, label =sample['imidx'], sample['image'], sample['label']
tmpLbl = np.zeros(label.shape)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
# change the color space
if self.flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0],image.shape[1],6))
tmpImgt = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImgt[:,:,0] = image[:,:,0]
tmpImgt[:,:,1] = image[:,:,0]
tmpImgt[:,:,2] = image[:,:,0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:,:,0] = (tmpImgt[:,:,0]-np.min(tmpImgt[:,:,0]))/(np.max(tmpImgt[:,:,0])-np.min(tmpImgt[:,:,0]))
tmpImg[:,:,1] = (tmpImgt[:,:,1]-np.min(tmpImgt[:,:,1]))/(np.max(tmpImgt[:,:,1])-np.min(tmpImgt[:,:,1]))
tmpImg[:,:,2] = (tmpImgt[:,:,2]-np.min(tmpImgt[:,:,2]))/(np.max(tmpImgt[:,:,2])-np.min(tmpImgt[:,:,2]))
tmpImg[:,:,3] = (tmpImgtl[:,:,0]-np.min(tmpImgtl[:,:,0]))/(np.max(tmpImgtl[:,:,0])-np.min(tmpImgtl[:,:,0]))
tmpImg[:,:,4] = (tmpImgtl[:,:,1]-np.min(tmpImgtl[:,:,1]))/(np.max(tmpImgtl[:,:,1])-np.min(tmpImgtl[:,:,1]))
tmpImg[:,:,5] = (tmpImgtl[:,:,2]-np.min(tmpImgtl[:,:,2]))/(np.max(tmpImgtl[:,:,2])-np.min(tmpImgtl[:,:,2]))
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
tmpImg[:,:,3] = (tmpImg[:,:,3]-np.mean(tmpImg[:,:,3]))/np.std(tmpImg[:,:,3])
tmpImg[:,:,4] = (tmpImg[:,:,4]-np.mean(tmpImg[:,:,4]))/np.std(tmpImg[:,:,4])
tmpImg[:,:,5] = (tmpImg[:,:,5]-np.mean(tmpImg[:,:,5]))/np.std(tmpImg[:,:,5])
elif self.flag == 1: #with Lab color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImg[:,:,0] = image[:,:,0]
tmpImg[:,:,1] = image[:,:,0]
tmpImg[:,:,2] = image[:,:,0]
else:
tmpImg = image
tmpImg = color.rgb2lab(tmpImg)
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.min(tmpImg[:,:,0]))/(np.max(tmpImg[:,:,0])-np.min(tmpImg[:,:,0]))
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.min(tmpImg[:,:,1]))/(np.max(tmpImg[:,:,1])-np.min(tmpImg[:,:,1]))
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.min(tmpImg[:,:,2]))/(np.max(tmpImg[:,:,2])-np.min(tmpImg[:,:,2]))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
else: # with rgb color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
image = image/np.max(image)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class SalObjDataset(Dataset):
def __init__(self,img_name_list,lbl_name_list,transform=None):
# self.root_dir = root_dir
# self.image_name_list = glob.glob(image_dir+'*.png')
# self.label_name_list = glob.glob(label_dir+'*.png')
self.image_name_list = img_name_list
self.label_name_list = lbl_name_list
self.transform = transform
def __len__(self):
return len(self.image_name_list)
def __getitem__(self,idx):
# image = Image.open(self.image_name_list[idx])#io.imread(self.image_name_list[idx])
# label = Image.open(self.label_name_list[idx])#io.imread(self.label_name_list[idx])
image = io.imread(self.image_name_list[idx])
imname = self.image_name_list[idx]
imidx = np.array([idx])
if(0==len(self.label_name_list)):
label_3 = np.zeros(image.shape)
else:
label_3 = io.imread(self.label_name_list[idx])
label = np.zeros(label_3.shape[0:2])
if(3==len(label_3.shape)):
label = label_3[:,:,0]
elif(2==len(label_3.shape)):
label = label_3
if(3==len(image.shape) and 2==len(label.shape)):
label = label[:,:,np.newaxis]
elif(2==len(image.shape) and 2==len(label.shape)):
image = image[:,:,np.newaxis]
label = label[:,:,np.newaxis]
sample = {'imidx':imidx, 'image':image, 'label':label}
if self.transform:
sample = self.transform(sample)
return sample
| 9,040 | 32.609665 | 113 | py |
3SD | 3SD-main/new_data_loader.py | # data loader
from __future__ import print_function, division
import glob
import torch
from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
#==========================dataset load==========================
class RescaleT(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label, edge = sample['imidx'], sample['image'],sample['label'],sample['edge']
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# img = transform.resize(image,(new_h,new_w),mode='constant')
# lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(self.output_size,self.output_size),mode='constant')
lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True)
edge = transform.resize(edge, (self.output_size, self.output_size), mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl,'edge':edge}
class Rescale(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label, edge = sample['imidx'], sample['image'],sample['label'],sample['edge']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
edge = edge[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
img = transform.resize(image,(new_h,new_w),mode='constant')
lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
edge = transform.resize(edge, (new_h, new_w), mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl,'edge':edge}
class RandomCrop(object):
def __init__(self,output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self,sample):
imidx, image, label, edge = sample['imidx'], sample['image'], sample['label'], sample['edge']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
edge = edge[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
label = label[top: top + new_h, left: left + new_w]
edge = edge[top: top + new_h, left: left + new_w]
return {'imidx':imidx,'image':image, 'label':label, 'edge':edge}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
imidx, image, label, edge = sample['imidx'], sample['image'], sample['label'], sample['edge']
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
tmpLbl = np.zeros(label.shape)
tmpedge = np.zeros(edge.shape)
image = image/np.max(image)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
edge = edge / np.max(edge)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
tmpedge[:, :, 0] = edge[:, :, 0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
tmpedge = edge.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl), 'edge': torch.from_numpy(tmpedge)}
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self,flag=0):
self.flag = flag
def __call__(self, sample):
imidx, image, label, edge =sample['imidx'], sample['image'], sample['label'], sample['edge']
tmpLbl = np.zeros(label.shape)
tmpedge = np.zeros(edge.shape)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
edge = edge / np.max(edge)
# change the color space
if self.flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0],image.shape[1],6))
tmpImgt = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImgt[:,:,0] = image[:,:,0]
tmpImgt[:,:,1] = image[:,:,0]
tmpImgt[:,:,2] = image[:,:,0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:,:,0] = (tmpImgt[:,:,0]-np.min(tmpImgt[:,:,0]))/(np.max(tmpImgt[:,:,0])-np.min(tmpImgt[:,:,0]))
tmpImg[:,:,1] = (tmpImgt[:,:,1]-np.min(tmpImgt[:,:,1]))/(np.max(tmpImgt[:,:,1])-np.min(tmpImgt[:,:,1]))
tmpImg[:,:,2] = (tmpImgt[:,:,2]-np.min(tmpImgt[:,:,2]))/(np.max(tmpImgt[:,:,2])-np.min(tmpImgt[:,:,2]))
tmpImg[:,:,3] = (tmpImgtl[:,:,0]-np.min(tmpImgtl[:,:,0]))/(np.max(tmpImgtl[:,:,0])-np.min(tmpImgtl[:,:,0]))
tmpImg[:,:,4] = (tmpImgtl[:,:,1]-np.min(tmpImgtl[:,:,1]))/(np.max(tmpImgtl[:,:,1])-np.min(tmpImgtl[:,:,1]))
tmpImg[:,:,5] = (tmpImgtl[:,:,2]-np.min(tmpImgtl[:,:,2]))/(np.max(tmpImgtl[:,:,2])-np.min(tmpImgtl[:,:,2]))
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
tmpImg[:,:,3] = (tmpImg[:,:,3]-np.mean(tmpImg[:,:,3]))/np.std(tmpImg[:,:,3])
tmpImg[:,:,4] = (tmpImg[:,:,4]-np.mean(tmpImg[:,:,4]))/np.std(tmpImg[:,:,4])
tmpImg[:,:,5] = (tmpImg[:,:,5]-np.mean(tmpImg[:,:,5]))/np.std(tmpImg[:,:,5])
elif self.flag == 1: #with Lab color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImg[:,:,0] = image[:,:,0]
tmpImg[:,:,1] = image[:,:,0]
tmpImg[:,:,2] = image[:,:,0]
else:
tmpImg = image
tmpImg = color.rgb2lab(tmpImg)
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.min(tmpImg[:,:,0]))/(np.max(tmpImg[:,:,0])-np.min(tmpImg[:,:,0]))
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.min(tmpImg[:,:,1]))/(np.max(tmpImg[:,:,1])-np.min(tmpImg[:,:,1]))
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.min(tmpImg[:,:,2]))/(np.max(tmpImg[:,:,2])-np.min(tmpImg[:,:,2]))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
else: # with rgb color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
image = image/np.max(image)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
tmpedge[:, :, 0] = edge[:, :, 0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
tmpedge = edge.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl), 'edge': torch.from_numpy(tmpedge)}
class SalObjDataset(Dataset):
def __init__(self,img_name_list,lbl_name_list,edge_name_list,transform=None):
# self.root_dir = root_dir
# self.image_name_list = glob.glob(image_dir+'*.png')
# self.label_name_list = glob.glob(label_dir+'*.png')
self.image_name_list = img_name_list
self.label_name_list = lbl_name_list
self.edge_name_list = edge_name_list
self.transform = transform
def __len__(self):
return len(self.image_name_list)
def __getitem__(self,idx):
# image = Image.open(self.image_name_list[idx])#io.imread(self.image_name_list[idx])
# label = Image.open(self.label_name_list[idx])#io.imread(self.label_name_list[idx])
image = io.imread(self.image_name_list[idx])
imname = self.image_name_list[idx]
imidx = np.array([idx])
if(0==len(self.label_name_list)):
label_3 = np.zeros(image.shape)
else:
label_3 = io.imread(self.label_name_list[idx])
label = np.zeros(label_3.shape[0:2])
if(3==len(label_3.shape)):
label = label_3[:,:,0]
elif(2==len(label_3.shape)):
label = label_3
if (0 == len(self.edge_name_list)):
edge_3 = np.zeros(image.shape)
else:
edge_3 = io.imread(self.edge_name_list[idx])
edge = np.zeros(edge_3.shape[0:2])
if (3 == len(edge_3.shape)):
edge = edge_3[:, :, 0]
elif (2 == len(edge_3.shape)):
edge = edge_3
if(3==len(image.shape) and 2==len(label.shape) and 2==len(edge.shape)):
label = label[:,:,np.newaxis]
edge = edge[:, :, np.newaxis]
elif(2==len(image.shape) and 2==len(label.shape) and 2==len(edge.shape)):
image = image[:,:,np.newaxis]
label = label[:,:,np.newaxis]
edge = edge[:, :, np.newaxis]
sample = {'imidx':imidx, 'image':image, 'label':label, 'edge':edge}
if self.transform:
sample = self.transform(sample)
return sample
| 10,287 | 33.756757 | 147 | py |
3SD | 3SD-main/basenet_train.py | import os
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms as T
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
import torchvision.transforms as standard_transforms
import numpy as np
import random
import glob
import os
import copy
from new_data_loader import Rescale
from new_data_loader import RescaleT
from new_data_loader import RandomCrop
from new_data_loader import ToTensor
from new_data_loader import ToTensorLab
from new_data_loader import SalObjDataset
from functools import wraps, partial
import smoothness
from model import U2NET
from model import U2NETP
import pdb
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image
# ------- util tool functions ----------
def exists(val):
return val is not None
def default(val, default):
return val if exists(val) else default
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
class L2Norm(nn.Module):
def forward(self, x, eps = 1e-6):
norm = x.norm(dim = 1, keepdim = True).clamp(min = eps)
return x / norm
#normalize camp map
def norm_cam_map(input_cam,bag_map,pred_class):
B, C, H, W = input_cam.shape
bag_map = F.upsample(bag_map, size=[H,W], mode='bilinear')
cam_map = torch.zeros(B,1,H,W).cuda()
probs = pred_class.softmax(dim = -1)
for idx in range(B):
tmp_cam_vec = input_cam[idx,:,:,:].view( C, H * W).softmax(dim = -1)
tmp_cam_vec = tmp_cam_vec[torch.argmax(probs[idx,:]),:]
tmp_cam_vec = tmp_cam_vec - tmp_cam_vec.min()
tmp_cam_vec = tmp_cam_vec / (tmp_cam_vec.max())
tmp_vec = tmp_cam_vec
tmp_vec = tmp_vec.view(1, H, W)
cam_map[idx,:,:,:] = tmp_vec
cam_map = F.upsample(cam_map, size=[320,320], mode='bilinear')
return cam_map
# ------- 1. define loss function --------
bce_loss = nn.BCELoss(size_average=True)
def muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, labels_v):
eps = 0.000000001
loss0 = bce_loss(d0,labels_v)
loss1 = bce_loss(d1,labels_v)
loss2 = bce_loss(d2,labels_v)
loss3 = bce_loss(d3,labels_v)
loss4 = bce_loss(d4,labels_v)
loss5 = bce_loss(d5,labels_v)
loss6 = bce_loss(d6,labels_v)
loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6
print("l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\n"%(loss0.data.item(),loss1.data.item(),loss2.data.item(),loss3.data.item(),loss4.data.item(),loss5.data.item(),loss6.data.item()))
return loss0, loss
def gated_edge(pred,edge):
kernel = np.ones((11, 11)) / 121.0
kernel_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel, 0), 0)) # size: (1, 1, 11,11)
if torch.cuda.is_available():
kernel_tensor = Variable(kernel_tensor.type(torch.FloatTensor).cuda(), requires_grad=False)
dilated_pred = torch.clamp(torch.nn.functional.conv2d(pred, kernel_tensor, padding=(5, 5)), 0, 1) # performing dilation
gated_edge_out = edge *dilated_pred
'''B, C, H, W = gated_edge_out.shape
gated_edge_out = gated_edge_out.view(B, C * H * W)
gated_edge_out = gated_edge_out / (gated_edge_out.max(dim=1)[0].view(B, 1))
gated_edge_out = gated_edge_out.view(B, C, H, W)'''
return gated_edge_out
def dino_loss_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = (student_logits / student_temp).softmax(dim = -1)
teacher_probs = ((teacher_logits-centers) / teacher_temp).softmax(dim = -1)
return - (teacher_probs * torch.log(student_probs + eps)).sum(dim = -1).mean()
def dino_loss_bag_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = student_logits
teacher_probs = ((teacher_logits-centers))
# creating positive and negative pairs
student_global = F.upsample(student_logits, size=[1,1], mode='bilinear')
B,C,H,W = student_logits.shape
student_probs = student_probs.view(B,C,H*W).transpose(1,2)
student_global = student_global.view(B,C,1)
student_global = student_global/student_global.norm(dim=1).view(B,1,1)
student_probs = student_probs/student_probs.norm(dim=-1).view(B,H*W,1)
sim_student = torch.bmm(student_probs,student_global)
pos_student_mask = Variable(torch.zeros(sim_student.shape).cuda(),requires_grad=False)
pos_student_mask[sim_student>0.95*sim_student.data.detach().max()] = 1
neg_student_mask = Variable(torch.zeros(sim_student.shape).cuda(),requires_grad=False)
neg_student_mask[sim_student<1.1*sim_student.data.detach().min()] = 1
neg_student_mask = torch.bmm(pos_student_mask,neg_student_mask.transpose(1,2))
teacher_global = F.upsample(teacher_probs, size=[1,1], mode='bilinear')
teacher_probs = teacher_probs.view(B,C,H*W).transpose(1,2)
teacher_global = teacher_global.view(B,C,1)
teacher_global = teacher_global/teacher_global.norm(dim=1).view(B,1,1)
teacher_probs = teacher_probs/teacher_probs.norm(dim=-1).view(B,H*W,1)
sim_teacher = torch.bmm(teacher_probs,teacher_global)
pos_teacher_mask = Variable(torch.zeros(sim_teacher.shape).cuda(),requires_grad=False)
pos_teacher_mask[sim_teacher>0.95*sim_teacher.data.detach().max()] = 1
pos_teacher_mask = torch.bmm(pos_student_mask,pos_teacher_mask.transpose(1,2))
neg_teacher_mask = Variable(torch.zeros(sim_teacher.shape).cuda(),requires_grad=False)
neg_teacher_mask[sim_teacher<1.1*sim_teacher.data.detach().min()] = 1
neg_teacher_mask = torch.bmm(pos_student_mask,neg_teacher_mask.transpose(1,2))
pos_student_mask = torch.bmm(pos_student_mask,pos_student_mask.transpose(1,2))
sim_student = torch.exp(torch.bmm(student_probs,student_probs.transpose(1,2))/student_temp)
sim_teacher = torch.exp(torch.bmm(student_probs,teacher_probs.transpose(1,2))/teacher_temp)
denom = (pos_student_mask+neg_student_mask)*sim_student + (pos_teacher_mask+neg_teacher_mask)*sim_teacher
denom = denom.sum(dim=-1).view(B,H*W,1) +0.000001
loss = pos_student_mask*sim_student/denom + (1-pos_student_mask)
loss = -1*pos_student_mask*torch.log(loss) -1*pos_teacher_mask*torch.log(pos_teacher_mask*sim_teacher/denom + (1-pos_teacher_mask))
return 0.003*loss.mean()
# ------- 2. set the directory of training dataset --------
model_name = 'u2net' #'u2netp'
data_dir = './data/training/DUTS/'#os.path.join(os.getcwd(), 'train_data' + os.sep)
tra_image_dir = 'img/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'im_aug' + os.sep)
tra_label_dir = 'gt/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
tra_edge_dir = 'edge/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
image_ext = '.jpg'
label_ext = '.png'
model_dir = os.path.join(os.getcwd(), 'saved_models', 'fullysup_patch32_' + model_name + os.sep)
if (os.path.isdir(model_dir)==False):
os.mkdir(model_dir)
epoch_num = 100000
batch_size_train = 10
batch_size_val = 1
train_num = 0
val_num = 0
tra_img_name_list = list(glob.glob(data_dir + tra_image_dir + '*' + image_ext))
tra_lbl_name_list = []
tra_edge_name_list = []
for img_path in tra_img_name_list:
img_name = img_path.split(os.sep)[-1]
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
tra_lbl_name_list.append(data_dir + tra_label_dir + imidx + label_ext)
tra_edge_name_list.append(data_dir + tra_edge_dir + imidx + label_ext)
print("---")
print("train images: ", len(tra_img_name_list))
print("train labels: ", len(tra_lbl_name_list))
print("train edges: ", len(tra_edge_name_list))
print("---")
train_num = len(tra_img_name_list)
salobj_dataset = SalObjDataset(
img_name_list=tra_img_name_list,
lbl_name_list=tra_lbl_name_list,
edge_name_list=tra_edge_name_list,
transform=transforms.Compose([
RescaleT(352),
RandomCrop(320),
ToTensorLab(flag=0)]))
salobj_dataloader = DataLoader(salobj_dataset, batch_size=batch_size_train, shuffle=True, num_workers=1)
# ------- 3. dino model and pseudo label generation --------
class Dino(nn.Module):
def __init__(
self,
net,
image_size,
patch_size = 16,
num_classes_K = 200,
student_temp = 0.9,
teacher_temp = 0.04,
local_upper_crop_scale = 0.4,
global_lower_crop_scale = 0.5,
moving_average_decay = 0.9,
center_moving_average_decay = 0.9,
augment_fn = None,
augment_fn2 = None
):
super().__init__()
self.net = net
# default BYOL augmentation
DEFAULT_AUG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p = 0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p = 0.2
),
)
self.augment1 = default(augment_fn, DEFAULT_AUG)
self.augment2 = default(augment_fn2, DEFAULT_AUG)
DEFAULT_AUG_BAG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p=0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p=0.2
),
)
self.augment_bag = default(None, DEFAULT_AUG_BAG)
# local and global crops
self.local_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.05, local_upper_crop_scale))
self.local_crop_bag = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.3, 0.6))
self.global_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (global_lower_crop_scale, 1.))
self.student_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
self.teacher_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
if torch.cuda.is_available():
self.student_encoder = torch.nn.DataParallel(self.student_encoder)
self.teacher_encoder = torch.nn.DataParallel(self.teacher_encoder)
self.teacher_ema_updater = EMA(moving_average_decay)
self.register_buffer('teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('last_teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('teacher_centers_bag', torch.zeros(1,num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
self.register_buffer('last_teacher_centers_bag', torch.zeros(1, num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
#print(self.teacher_centers_bag.shape)
self.teacher_centering_ema_updater = EMA(center_moving_average_decay)
self.student_temp = student_temp
self.teacher_temp = teacher_temp
# get device of network and make wrapper same device
#device = get_module_device(net)
if torch.cuda.is_available():
self.cuda()
# send a mock image tensor to instantiate singleton parameters
self.forward(torch.randn(2, 3, 320,320).cuda())
@singleton('teacher_encoder')
def _get_teacher_encoder(self):
teacher_encoder = copy.deepcopy(self.student_encoder)
set_requires_grad(teacher_encoder, False)
return teacher_encoder
def reset_moving_average(self):
del self.teacher_encoder
self.teacher_encoder = None
def update_moving_average(self):
assert self.teacher_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.teacher_ema_updater, self.teacher_encoder, self.student_encoder)
new_teacher_centers = self.teacher_centering_ema_updater.update_average(self.teacher_centers, self.last_teacher_centers)
self.teacher_centers.copy_(new_teacher_centers)
#pdb.set_trace()
new_teacher_centers_bag = self.teacher_centering_ema_updater.update_average(self.teacher_centers_bag,self.last_teacher_centers_bag)
self.teacher_centers_bag.copy_(new_teacher_centers_bag)
def forward(
self,
x,
return_embedding = False,
return_projection = True,
student_temp = None,
teacher_temp = None
):
if return_embedding:
return self.student_encoder(x, return_projection = return_projection)
image_one, image_two = self.augment1(x), self.augment2(x)
local_image_one, local_image_two = self.local_crop(image_one), self.local_crop(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-1]
student_proj_two = self.student_encoder(local_image_two)[-1]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)[-1]
teacher_proj_two = teacher_encoder(global_image_two)[-1]
#print(teacher_proj_one.shape)
loss_fn_ = partial(
dino_loss_fn,
student_temp = default(student_temp, self.student_temp),
teacher_temp = default(teacher_temp, self.teacher_temp),
centers = self.teacher_centers
)
teacher_logits_avg = torch.cat((teacher_proj_one, teacher_proj_two)).mean(dim = 0)
self.last_teacher_centers.copy_(teacher_logits_avg)
loss = (loss_fn_(teacher_proj_one, student_proj_two) + loss_fn_(teacher_proj_two, student_proj_one)) / 2
return loss
def bag_loss(self, x, return_embedding = False,return_projection = True,student_temp = None,teacher_temp = None):
if return_embedding:
return self.student_encoder(x, return_projection=return_projection)
image_one, image_two = self.augment_bag(x), self.augment_bag(x)
local_image_one, local_image_two = self.local_crop_bag(image_one), self.local_crop_bag(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-2]
student_proj_two = self.student_encoder(local_image_two)[-2]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)
teacher_proj_two = teacher_encoder(global_image_two)
#pdb.set_trace()
teacher_logits_avg = torch.cat((teacher_proj_one[-2], teacher_proj_two[-2])).mean(dim=0)
self.last_teacher_centers_bag.copy_(teacher_logits_avg)
student_proj_two_glb = student_proj_two.mean(dim=-1).mean(dim=-1)
student_proj_one_glb = student_proj_one.mean(dim=-1).mean(dim=-1)
loss_fn_bag = partial(
dino_loss_bag_fn,
student_temp=default(student_temp, self.student_temp),
teacher_temp=default(teacher_temp, self.teacher_temp),
centers=self.teacher_centers_bag
)
loss_fn_ = partial(
dino_loss_fn,
student_temp=default(student_temp, self.student_temp),
teacher_temp=default(teacher_temp, self.teacher_temp),
centers=self.teacher_centers
)
loss = (loss_fn_bag(teacher_proj_one[-2], student_proj_two) + loss_fn_bag(teacher_proj_two[-2],
student_proj_one)) / 4
loss += (loss_fn_(teacher_proj_one[-1], student_proj_two_glb) + loss_fn_(teacher_proj_two[-1],
student_proj_one_glb)) / 4
return loss
# ------- 4. define model --------
# define the net
'''if(model_name=='u2net'):
net = U2NET(3, 1)
elif(model_name=='u2netp'):
net = U2NETP(3,1)'''
dino = Dino(model_name,[320],32)
if torch.cuda.is_available():
dino.cuda()
#dino = torch.nn.DataParallel(dino)
# ------- 5. define optimizer --------
print("---define optimizer...")
optimizer = optim.Adam(dino.parameters(), lr=0.0006, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
dino_optimizer = optim.Adam(dino.parameters(), lr=0.0003, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
# ------- 6. training process --------
print("---start training...")
ite_num = 0
running_loss = 0.0
running_tar_loss = 0.0
ite_num4val = 0
save_frq = 10000 # save the model every 10000 iterations
sm_loss_weight = 0.3
smooth_loss = smoothness.smoothness_loss(size_average=True)
for epoch in range(0,epoch_num):
#net.train()
dino.train()
for i, data in enumerate(salobj_dataloader):
ite_num = ite_num + 1
ite_num4val = ite_num4val + 1
inputs, labels, edges = data['image'], data['label'], data['edge']
inputs = inputs.type(torch.FloatTensor)
labels = labels.type(torch.FloatTensor)
edges = edges.type(torch.FloatTensor)
# wrap them in Variable
if torch.cuda.is_available():
inputs_v, labels_v, edges_v = Variable(inputs.cuda(), requires_grad=False), Variable(labels.cuda(),requires_grad=False), Variable(edges.cuda(),requires_grad=False)
else:
inputs_v, labels_v, edges_v = Variable(inputs, requires_grad=False), Variable(labels, requires_grad=False), Variable(edges, requires_grad=False)
# y zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
loss = 0
loss2 = 0
pseudo_label_gts = 0
d0, d1, d2, d3, d4, d5, d6, pred_edges, cam_map, bag_map, pred_class = dino.student_encoder(inputs_v)
loss2, loss = muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6 , labels_v)
smoothLoss_cur1 = sm_loss_weight * smooth_loss(d0, T.Grayscale()(inputs_v))
edge_loss = bce_loss(gated_edge(labels_v,pred_edges), gated_edge(labels_v,edges_v))
loss += edge_loss + smoothLoss_cur1
if loss == loss:
loss.backward()
optimizer.step()
# # print statistics
if loss == loss:
running_loss += loss.data.item()
if loss2 >0:
running_tar_loss += loss2.data.item()
# del temporary outputs and loss
del d0, d1, d2, d3, d4, d5, d6, loss2, loss, cam_map, pred_edges, edge_loss, pseudo_label_gts, pred_class, dino_loss, dino_bag_loss
print("[epoch: %3d/%3d, batch: %5d/%5d, ite: %d] train loss: %3f, tar: %3f " % (
epoch + 1, epoch_num, (i + 1) * batch_size_train, train_num, ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))
if ite_num % save_frq == 0:
torch.save(dino.student_encoder.state_dict(), model_dir + model_name+"_bce_itr_%d_train_%3f_tar_%3f.pth" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))
running_loss = 0.0
running_tar_loss = 0.0
dino.train() # resume train
ite_num4val = 0
if (epoch+1) % 10 ==0:
torch.save(dino.student_encoder.state_dict(), model_dir + model_name+"_bce_epoch_%d_train.pth" % (epoch))
torch.save(dino.state_dict(), model_dir + model_name+"_bce_epoch_%d_train_fulldino.pth" % (epoch))
| 21,206 | 37.279783 | 204 | py |
3SD | 3SD-main/3SD_train.py | import os
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms as T
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
import torchvision.transforms as standard_transforms
import numpy as np
import random
import glob
import os
import copy
from new_data_loader import Rescale
from new_data_loader import RescaleT
from new_data_loader import RandomCrop
from new_data_loader import ToTensor
from new_data_loader import ToTensorLab
from new_data_loader import SalObjDataset
from functools import wraps, partial
import smoothness
from model import U2NET
from model import U2NETP
import pdb
#from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
#from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
#from pytorch_grad_cam.utils.image import show_cam_on_image
# ------- util tool functions ----------
def exists(val):
return val is not None
def default(val, default):
return val if exists(val) else default
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
class L2Norm(nn.Module):
def forward(self, x, eps = 1e-6):
norm = x.norm(dim = 1, keepdim = True).clamp(min = eps)
return x / norm
#normalize camp map
def norm_cam_map(input_cam,bag_map,pred_class):
B, C, H, W = input_cam.shape
bag_map = F.upsample(bag_map, size=[H,W], mode='bilinear')
cam_map = torch.zeros(B,1,H,W).cuda()
probs = pred_class.softmax(dim = -1)
for idx in range(B):
tmp_cam_vec = input_cam[idx,:,:,:].view( C, H * W).softmax(dim = -1)
tmp_cam_vec = tmp_cam_vec[torch.argmax(probs[idx,:]),:]
tmp_cam_vec = tmp_cam_vec - tmp_cam_vec.min()
tmp_cam_vec = tmp_cam_vec / (tmp_cam_vec.max())
tmp_vec = tmp_cam_vec
tmp_vec = tmp_vec.view(1, H, W)
cam_map[idx,:,:,:] = tmp_vec
cam_map = F.upsample(cam_map, size=[320,320], mode='bilinear')
return cam_map
# ------- 1. define loss function --------
bce_loss = nn.BCELoss(size_average=True)
def muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, labels_v):
eps = 0.000000001
loss0 = bce_loss(d0,labels_v)
loss1 = bce_loss(d1,labels_v)
loss2 = bce_loss(d2,labels_v)
loss3 = bce_loss(d3,labels_v)
loss4 = bce_loss(d4,labels_v)
loss5 = bce_loss(d5,labels_v)
loss6 = bce_loss(d6,labels_v)
loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6
print("l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\n"%(loss0.data.item(),loss1.data.item(),loss2.data.item(),loss3.data.item(),loss4.data.item(),loss5.data.item(),loss6.data.item()))
return loss0, loss
def gated_edge(pred,edge):
kernel = np.ones((11, 11)) / 121.0
kernel_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel, 0), 0)) # size: (1, 1, 11,11)
if torch.cuda.is_available():
kernel_tensor = Variable(kernel_tensor.type(torch.FloatTensor).cuda(), requires_grad=False)
dilated_pred = torch.clamp(torch.nn.functional.conv2d(pred, kernel_tensor, padding=(5, 5)), 0, 1) # performing dilation
gated_edge_out = edge *dilated_pred
'''B, C, H, W = gated_edge_out.shape
gated_edge_out = gated_edge_out.view(B, C * H * W)
gated_edge_out = gated_edge_out / (gated_edge_out.max(dim=1)[0].view(B, 1))
gated_edge_out = gated_edge_out.view(B, C, H, W)'''
return gated_edge_out
def dino_loss_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = (student_logits / student_temp).softmax(dim = -1)
teacher_probs = ((teacher_logits-centers) / teacher_temp).softmax(dim = -1)
return - (teacher_probs * torch.log(student_probs + eps)).sum(dim = -1).mean()
def dino_loss_bag_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = student_logits
teacher_probs = ((teacher_logits-centers))
# creating positive and negative pairs
student_global = F.upsample(student_logits, size=[1,1], mode='bilinear')
B,C,H,W = student_logits.shape
student_probs = student_probs.view(B,C,H*W).transpose(1,2)
student_global = student_global.view(B,C,1)
student_global = student_global/student_global.norm(dim=1).view(B,1,1)
student_probs = student_probs/student_probs.norm(dim=-1).view(B,H*W,1)
sim_student = torch.bmm(student_probs,student_global)
pos_student_mask = Variable(torch.zeros(sim_student.shape).cuda(),requires_grad=False)
pos_student_mask[sim_student>0.95*sim_student.data.detach().max()] = 1
neg_student_mask = Variable(torch.zeros(sim_student.shape).cuda(),requires_grad=False)
neg_student_mask[sim_student<1.1*sim_student.data.detach().min()] = 1
neg_student_mask = torch.bmm(pos_student_mask,neg_student_mask.transpose(1,2))
teacher_global = F.upsample(teacher_probs, size=[1,1], mode='bilinear')
teacher_probs = teacher_probs.view(B,C,H*W).transpose(1,2)
teacher_global = teacher_global.view(B,C,1)
teacher_global = teacher_global/teacher_global.norm(dim=1).view(B,1,1)
teacher_probs = teacher_probs/teacher_probs.norm(dim=-1).view(B,H*W,1)
sim_teacher = torch.bmm(teacher_probs,teacher_global)
pos_teacher_mask = Variable(torch.zeros(sim_teacher.shape).cuda(),requires_grad=False)
pos_teacher_mask[sim_teacher>0.95*sim_teacher.data.detach().max()] = 1
pos_teacher_mask = torch.bmm(pos_student_mask,pos_teacher_mask.transpose(1,2))
neg_teacher_mask = Variable(torch.zeros(sim_teacher.shape).cuda(),requires_grad=False)
neg_teacher_mask[sim_teacher<1.1*sim_teacher.data.detach().min()] = 1
neg_teacher_mask = torch.bmm(pos_student_mask,neg_teacher_mask.transpose(1,2))
pos_student_mask = torch.bmm(pos_student_mask,pos_student_mask.transpose(1,2))
sim_student = torch.exp(torch.bmm(student_probs,student_probs.transpose(1,2))/student_temp)
sim_teacher = torch.exp(torch.bmm(student_probs,teacher_probs.transpose(1,2))/teacher_temp)
denom = (neg_student_mask)*sim_student + (neg_teacher_mask)*sim_teacher
denom = denom.sum(dim=-1).view(B,H*W,1) +0.000001
loss = pos_student_mask*sim_student/denom + (1-pos_student_mask)
loss = -1*pos_student_mask*torch.log(loss) -1*pos_teacher_mask*torch.log(pos_teacher_mask*sim_teacher/denom + (1-pos_teacher_mask))
return 0.003*loss.mean()
# ------- 2. set the directory of training dataset --------
model_name = 'u2net' #'u2netp'
data_dir = './data/training/DUTS/'#os.path.join(os.getcwd(), 'train_data' + os.sep)
tra_image_dir = 'img/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'im_aug' + os.sep)
tra_label_dir = 'gt/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
tra_edge_dir = 'edge/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
#syn_data_dir = './data/training/DUTS-TR/'#os.path.join(os.getcwd(), 'train_data' + os.sep)
#syn_tra_image_dir = 'img/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'im_aug' + os.sep)
#syn_tra_label_dir = 'gt/'#os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep)
image_ext = '.jpg'
label_ext = '.png'
model_dir = os.path.join(os.getcwd(), 'saved_models', 'final_patch32_pseudo_dino_edge_pre_trans_' + model_name + os.sep)
if (os.path.isdir(model_dir)==False):
os.mkdir(model_dir)
epoch_num = 100000
batch_size_train = 10
batch_size_val = 1
train_num = 0
val_num = 0
tra_img_name_list = list(glob.glob(data_dir + tra_image_dir + '*' + image_ext))
tra_lbl_name_list = []
tra_edge_name_list = []
for img_path in tra_img_name_list:
img_name = img_path.split(os.sep)[-1]
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
tra_lbl_name_list.append(data_dir + tra_label_dir + imidx + label_ext)
tra_edge_name_list.append(data_dir + tra_edge_dir + imidx + label_ext)
#syn_tra_img_name_list = list(glob.glob(syn_data_dir + syn_tra_image_dir + '*' + label_ext))
#pdb.set_trace()
#syn_tra_lbl_name_list = []
#for img_path in syn_tra_img_name_list:
# img_name = img_path.split(os.sep)[-1]
# aaa = img_name.split(".")
# bbb = aaa[0:-1]
# imidx = bbb[0]
# for i in range(1,len(bbb)):
# imidx = imidx + "." + bbb[i]
# syn_tra_lbl_name_list.append(syn_data_dir + syn_tra_label_dir + imidx + label_ext)
#pdb.set_trace()
#tra_img_name_list += syn_tra_img_name_list
#tra_lbl_name_list += syn_tra_lbl_name_list
print("---")
print("train images: ", len(tra_img_name_list))
print("train labels: ", len(tra_lbl_name_list))
print("train edges: ", len(tra_edge_name_list))
print("---")
train_num = len(tra_img_name_list)
salobj_dataset = SalObjDataset(
img_name_list=tra_img_name_list,
lbl_name_list=tra_lbl_name_list,
edge_name_list=tra_edge_name_list,
transform=transforms.Compose([
RescaleT(352),
RandomCrop(320),
ToTensorLab(flag=0)]))
salobj_dataloader = DataLoader(salobj_dataset, batch_size=batch_size_train, shuffle=True, num_workers=1)
# ------- 3. dino model and pseudo label generation --------
class Dino(nn.Module):
def __init__(
self,
net,
image_size,
patch_size = 16,
num_classes_K = 200,
student_temp = 0.9,
teacher_temp = 0.04,
local_upper_crop_scale = 0.4,
global_lower_crop_scale = 0.5,
moving_average_decay = 0.9,
center_moving_average_decay = 0.9,
augment_fn = None,
augment_fn2 = None
):
super().__init__()
self.net = net
# default BYOL augmentation
DEFAULT_AUG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p = 0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p = 0.2
),
)
self.augment1 = default(augment_fn, DEFAULT_AUG)
self.augment2 = default(augment_fn2, DEFAULT_AUG)
DEFAULT_AUG_BAG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p=0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p=0.2
),
)
self.augment_bag = default(None, DEFAULT_AUG_BAG)
# local and global crops
self.local_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.05, local_upper_crop_scale))
self.local_crop_bag = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.3, 0.6))
self.global_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (global_lower_crop_scale, 1.))
self.student_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
self.teacher_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
if torch.cuda.is_available():
self.student_encoder = torch.nn.DataParallel(self.student_encoder)
self.teacher_encoder = torch.nn.DataParallel(self.teacher_encoder)
self.teacher_ema_updater = EMA(moving_average_decay)
self.register_buffer('teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('last_teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('teacher_centers_bag', torch.zeros(1,num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
self.register_buffer('last_teacher_centers_bag', torch.zeros(1, num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
#print(self.teacher_centers_bag.shape)
self.teacher_centering_ema_updater = EMA(center_moving_average_decay)
self.student_temp = student_temp
self.teacher_temp = teacher_temp
# get device of network and make wrapper same device
#device = get_module_device(net)
if torch.cuda.is_available():
self.cuda()
# send a mock image tensor to instantiate singleton parameters
self.forward(torch.randn(2, 3, 320,320).cuda())
@singleton('teacher_encoder')
def _get_teacher_encoder(self):
teacher_encoder = copy.deepcopy(self.student_encoder)
set_requires_grad(teacher_encoder, False)
return teacher_encoder
def reset_moving_average(self):
del self.teacher_encoder
self.teacher_encoder = None
def update_moving_average(self):
assert self.teacher_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.teacher_ema_updater, self.teacher_encoder, self.student_encoder)
new_teacher_centers = self.teacher_centering_ema_updater.update_average(self.teacher_centers, self.last_teacher_centers)
self.teacher_centers.copy_(new_teacher_centers)
#pdb.set_trace()
new_teacher_centers_bag = self.teacher_centering_ema_updater.update_average(self.teacher_centers_bag,self.last_teacher_centers_bag)
self.teacher_centers_bag.copy_(new_teacher_centers_bag)
def forward(
self,
x,
return_embedding = False,
return_projection = True,
student_temp = None,
teacher_temp = None
):
if return_embedding:
return self.student_encoder(x, return_projection = return_projection)
image_one, image_two = self.augment1(x), self.augment2(x)
local_image_one, local_image_two = self.local_crop(image_one), self.local_crop(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-1]
student_proj_two = self.student_encoder(local_image_two)[-1]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)[-1]
teacher_proj_two = teacher_encoder(global_image_two)[-1]
#print(teacher_proj_one.shape)
loss_fn_ = partial(
dino_loss_fn,
student_temp = default(student_temp, self.student_temp),
teacher_temp = default(teacher_temp, self.teacher_temp),
centers = self.teacher_centers
)
teacher_logits_avg = torch.cat((teacher_proj_one, teacher_proj_two)).mean(dim = 0)
self.last_teacher_centers.copy_(teacher_logits_avg)
loss = (loss_fn_(teacher_proj_one, student_proj_two) + loss_fn_(teacher_proj_two, student_proj_one)) / 2
return loss
def bag_loss(self, x, return_embedding = False,return_projection = True,student_temp = None,teacher_temp = None):
if return_embedding:
return self.student_encoder(x, return_projection=return_projection)
image_one, image_two = self.augment_bag(x), self.augment_bag(x)
local_image_one, local_image_two = self.local_crop_bag(image_one), self.local_crop_bag(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-2]
student_proj_two = self.student_encoder(local_image_two)[-2]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)
teacher_proj_two = teacher_encoder(global_image_two)
#pdb.set_trace()
teacher_logits_avg = torch.cat((teacher_proj_one[-2], teacher_proj_two[-2])).mean(dim=0)
self.last_teacher_centers_bag.copy_(teacher_logits_avg)
student_proj_two_glb = student_proj_two.mean(dim=-1).mean(dim=-1)
student_proj_one_glb = student_proj_one.mean(dim=-1).mean(dim=-1)
loss_fn_bag = partial(
dino_loss_bag_fn,
student_temp=default(student_temp, self.student_temp),
teacher_temp=default(teacher_temp, self.teacher_temp),
centers=self.teacher_centers_bag
)
loss_fn_ = partial(
dino_loss_fn,
student_temp=default(student_temp, self.student_temp),
teacher_temp=default(teacher_temp, self.teacher_temp),
centers=self.teacher_centers
)
loss = (loss_fn_bag(teacher_proj_one[-2], student_proj_two) + loss_fn_bag(teacher_proj_two[-2],
student_proj_one)) / 4
loss += (loss_fn_(teacher_proj_one[-1], student_proj_two_glb) + loss_fn_(teacher_proj_two[-1],
student_proj_one_glb)) / 4
return loss
# ------- 4. define model --------
# define the net
'''if(model_name=='u2net'):
net = U2NET(3, 1)
elif(model_name=='u2netp'):
net = U2NETP(3,1)'''
dino = Dino(model_name,[320],32)
if torch.cuda.is_available():
dino.cuda()
#dino = torch.nn.DataParallel(dino)
# ------- 5. define optimizer --------
print("---define optimizer...")
optimizer = optim.Adam(dino.parameters(), lr=0.0006, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
dino_optimizer = optim.Adam(dino.parameters(), lr=0.0003, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
# ------- 6. training process --------
print("---start training...")
ite_num = 0
running_loss = 0.0
running_tar_loss = 0.0
ite_num4val = 0
save_frq = 10000 # save the model every 10000 iterations
sm_loss_weight = 0.3
smooth_loss = smoothness.smoothness_loss(size_average=True)
for epoch in range(0,epoch_num):
#net.train()
dino.train()
for i, data in enumerate(salobj_dataloader):
ite_num = ite_num + 1
ite_num4val = ite_num4val + 1
inputs, labels, edges = data['image'], data['label'], data['edge']
inputs = inputs.type(torch.FloatTensor)
labels = labels.type(torch.FloatTensor)
edges = edges.type(torch.FloatTensor)
# wrap them in Variable
if torch.cuda.is_available():
inputs_v, labels_v, edges_v = Variable(inputs.cuda(), requires_grad=False), Variable(labels.cuda(),requires_grad=False), Variable(edges.cuda(),requires_grad=False)
else:
inputs_v, labels_v, edges_v = Variable(inputs, requires_grad=False), Variable(labels, requires_grad=False), Variable(edges, requires_grad=False)
# y zero the parameter gradients
dino_optimizer.zero_grad()
# forward + backward + optimize
dino_loss = 0
if (ite_num % 2 == 0):
dino_loss = dino(inputs_v)
if dino_loss == dino_loss and dino_loss !=0 :
print("dino_loss : %3f "%(dino_loss))
dino_loss.backward()
dino_optimizer.step()
dino.update_moving_average()
dino_optimizer.zero_grad()
dino_optimizer.zero_grad()
dino_bag_loss = 0
if (ite_num % 2 == 1):
dino_bag_loss = dino.bag_loss(inputs_v)
if dino_loss == dino_loss and dino_bag_loss != 0:
print("dino_bag_loss : %3f " % (dino_bag_loss))
dino_bag_loss.backward()
dino_optimizer.step()
dino.update_moving_average()
dino_optimizer.zero_grad()
optimizer.zero_grad()
loss = 0
loss2 = 0
pseudo_label_gts = 0
d0, d1, d2, d3, d4, d5, d6, pred_edges, cam_map, bag_map, pred_class = dino.student_encoder(inputs_v)
edge_loss = 0
if epoch>=120:
norm_cam = norm_cam_map(cam_map.detach().data,bag_map.detach().data,pred_class)
norm_cam[norm_cam<0.5] = 0
pseudo_label_gts = gated_edge(norm_cam,norm_cam+edges_v-(norm_cam*edges_v))
B, C, H, W = labels_v.shape
pseudo_label_gts = pseudo_label_gts.view(B, C * H * W)
pseudo_label_gts = (pseudo_label_gts-pseudo_label_gts.min(dim=1)[0].view(B, 1))/ (pseudo_label_gts.max(dim=1)[0].view(B, 1))
pseudo_label_gts = pseudo_label_gts.view(B, C, H, W).detach().data
loss2, loss = muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6 , pseudo_label_gts)
edge_loss = bce_loss(gated_edge(pseudo_label_gts,pred_edges), gated_edge(pseudo_label_gts,edges_v))
smoothLoss_cur1 = sm_loss_weight * smooth_loss(d0, T.Grayscale()(inputs_v))
loss += edge_loss + smoothLoss_cur1
if loss == loss:
loss.backward()
optimizer.step()
# # print statistics
if loss == loss:
running_loss += loss.data.item()
if loss2 >0:
running_tar_loss += loss2.data.item()
# del temporary outputs and loss
del d0, d1, d2, d3, d4, d5, d6, loss2, loss, cam_map, pred_edges, edge_loss, pseudo_label_gts, pred_class, dino_loss, dino_bag_loss
print("[epoch: %3d/%3d, batch: %5d/%5d, ite: %d] train loss: %3f, tar: %3f " % (
epoch + 1, epoch_num, (i + 1) * batch_size_train, train_num, ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))
if ite_num % save_frq == 0:
torch.save(dino.student_encoder.state_dict(), model_dir + model_name+"_bce_itr_%d_train_%3f_tar_%3f.pth" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))
running_loss = 0.0
running_tar_loss = 0.0
dino.train() # resume train
ite_num4val = 0
if (epoch+1) % 10 ==0:
torch.save(dino.student_encoder.state_dict(), model_dir + model_name+"_bce_epoch_%d_train.pth" % (epoch))
torch.save(dino.state_dict(), model_dir + model_name+"_bce_epoch_%d_train_fulldino.pth" % (epoch))
| 23,488 | 38.018272 | 204 | py |
3SD | 3SD-main/compute_and_plot.py | import os
import torch
from sklearn.metrics import f1_score, precision_score, recall_score
'''from sklearn.metrics import (precision_recall_curve, PrecisionRecallDisplay)
from sklearn.metrics import precision_recall_curve'''
import cv2
import pdb
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib as mpl
import glob
def Sobel_op(img):
kernel_x = np.array([[1,0,-1],[2,0,-2],[1,0,-1]])
kernel_x_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel_x, 0), 0)) # size: (1, 1, 11,11)
kernel_x_tensor = Variable(kernel_x_tensor.type(torch.FloatTensor), requires_grad=False)
kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
kernel_y_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel_y, 0), 0)) # size: (1, 1, 11,11)
kernel_y_tensor = Variable(kernel_y_tensor.type(torch.FloatTensor), requires_grad=False)
Gx = torch.nn.functional.conv2d(img, kernel_x_tensor, padding=(1, 1))
Gy = torch.nn.functional.conv2d(img, kernel_y_tensor, padding=(1, 1))
G = torch.sqrt(Gx*Gx + Gy*Gy)
G = F.tanh(G)
kernel = np.ones((3, 3)) / 9.0
kernel_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel, 0), 0)) # size: (1, 1, 11,11)
kernel_tensor = Variable(kernel_tensor.type(torch.FloatTensor), requires_grad=False)
dilated_G = torch.clamp(torch.nn.functional.conv2d(G, kernel_tensor, padding=(1,1)), 0, 1)
return dilated_G
def B_measure(gt,target):
h, w = gt.shape
gt = gt.astype(np.float32)
target = target.astype(np.float32)
gt = torch.from_numpy(gt)
target = torch.from_numpy(target)
G_gt = Sobel_op(gt.view(1,1,h,w))
G_target = Sobel_op(target.view(1, 1, h, w))
B = 1 - (2*(torch.sum(G_gt*G_target))/(torch.sum(G_target*G_target)+torch.sum(G_gt*G_gt)))
return B
def E_measure(gt,target):
gt=gt
target=target
#pdb.set_trace()
phi_gt = np.subtract(gt, gt.mean())
phi_target = np.subtract(target, target.mean())
numerator = 2*phi_gt*phi_target
deno = phi_gt*phi_gt + phi_target*phi_target
phi = numerator/deno
Enhance_phi = 0.25*(1+phi)**2
Em = Enhance_phi.mean()
return Em
def files(path):
for file in os.listdir(path):
if os.path.isfile(os.path.join(path,file)):
yield file
def object_s(pred, gt):
temp = pred[gt == 1]
x = temp.mean()
sigma_x = temp.std()
score = 2.0 * x / (x * x + 1.0 + sigma_x + 1e-20)
return score
def S_object(pred, gt):
fg = torch.where(gt == 0, torch.zeros_like(pred), pred)
bg = torch.where(gt == 1, torch.zeros_like(pred), 1 - pred)
o_fg = object_s(fg, gt)
o_bg = object_s(bg, 1 - gt)
u = gt.mean()
Q = u * o_fg + (1 - u) * o_bg
return Q
def centroid( gt):
rows, cols = gt.size()[-2:]
gt = gt.view(rows, cols)
cuda = False
if gt.sum() == 0:
if cuda:
X = torch.eye(1).cuda() * round(cols / 2)
Y = torch.eye(1).cuda() * round(rows / 2)
else:
X = torch.eye(1) * round(cols / 2)
Y = torch.eye(1) * round(rows / 2)
else:
total = gt.sum()
if cuda:
i = torch.from_numpy(np.arange(0, cols)).cuda().float()
j = torch.from_numpy(np.arange(0, rows)).cuda().float()
else:
i = torch.from_numpy(np.arange(0, cols)).float()
j = torch.from_numpy(np.arange(0, rows)).float()
X = torch.round((gt.sum(dim=0) * i).sum() / total + 1e-20)
Y = torch.round((gt.sum(dim=1) * j).sum() / total + 1e-20)
return X.long(), Y.long()
def divideGT( gt, X, Y):
h, w = gt.size()[-2:]
area = h * w
gt = gt.view(h, w)
LT = gt[:Y, :X]
RT = gt[:Y, X:w]
LB = gt[Y:h, :X]
RB = gt[Y:h, X:w]
X = X.float()
Y = Y.float()
w1 = X * Y / area
w2 = (w - X) * Y / area
w3 = X * (h - Y) / area
w4 = 1 - w1 - w2 - w3
return LT, RT, LB, RB, w1, w2, w3, w4
def dividePrediction( pred, X, Y):
h, w = pred.size()[-2:]
pred = pred.view(h, w)
LT = pred[:Y, :X]
RT = pred[:Y, X:w]
LB = pred[Y:h, :X]
RB = pred[Y:h, X:w]
return LT, RT, LB, RB
def ssim( pred, gt):
gt = gt.float()
h, w = pred.size()[-2:]
N = h * w
x = pred.mean()
y = gt.mean()
sigma_x2 = ((pred - x) * (pred - x)).sum() / (N - 1 + 1e-20)
sigma_y2 = ((gt - y) * (gt - y)).sum() / (N - 1 + 1e-20)
sigma_xy = ((pred - x) * (gt - y)).sum() / (N - 1 + 1e-20)
aplha = 4 * x * y * sigma_xy
beta = (x * x + y * y) * (sigma_x2 + sigma_y2)
if aplha != 0:
Q = aplha / (beta + 1e-20)
elif aplha == 0 and beta == 0:
Q = 1.0
else:
Q = 0
return Q
def S_region(pred, gt):
X, Y = centroid(gt)
gt1, gt2, gt3, gt4, w1, w2, w3, w4 = divideGT(gt, X, Y)
p1, p2, p3, p4 = dividePrediction(pred, X, Y)
Q1 = ssim(p1, gt1)
Q2 = ssim(p2, gt2)
Q3 = ssim(p3, gt3)
Q4 = ssim(p4, gt4)
Q = w1 * Q1 + w2 * Q2 + w3 * Q3 + w4 * Q4
return Q
def S_measure(target,gt):
alpha = 0.5
h, w = gt.shape
gt = torch.from_numpy(gt).type(torch.FloatTensor)
target = torch.from_numpy(target).type(torch.FloatTensor)
gt = gt.view(1,1,h,w)
target = target.view(1,1,h,w)
Q = alpha * S_object(target, gt) + (1 - alpha) * S_region(target, gt)
return Q
gt_path = './testing/gt/'
target_path = './testing/output_u2net_results/'
test_datasets = ['DUTS']
output_dir = './plots/'
Num_th = 20
Threshold = 0.5
Flag_figs = 0
for dataset in test_datasets:
name = 'exp' + '_' + dataset
precision_list = np.zeros((Num_th, 1))
recall_list = np.zeros((Num_th, 1))
F_score = np.zeros((Num_th, 1))
f1_score_list = []
MAE_list = []
Emeasure_list = []
Bmeasure_list = []
Smeasure_list = []
count = 0
print("----------------------------------------------------------------------------------------")
img_name_list = list(glob.glob(gt_path + dataset + '/*' + '.jpg')) + list(glob.glob(gt_path + dataset + '/*' + '.png'))
print("{} dataset starting, Total image : {} ".format(name,len(img_name_list)))
for file in files(gt_path + dataset):
gt_name = os.path.join(gt_path,dataset,file)
target_name = os.path.join(target_path,dataset,file)
# pdb.set_trace()
# print(target_name)#,precision_list,recall_list)
Gt = cv2.imread(gt_name,0)
pred = cv2.imread(target_name,0)
h, w = Gt.shape
# print(w,h,pred.shape)
pred = cv2.resize(pred,(w,h))
Gt = Gt.astype(np.float32)
pred = pred.astype(np.float32)
Bmeasure_list.append(B_measure(Gt, pred))
gt = np.zeros(Gt.shape)
target = np.zeros(pred.shape)
gt[Gt<Threshold] = 0
gt[Gt>=Threshold] = 1
target[pred<Threshold] = 0
target[pred>=Threshold] = 1
Emeasure_list.append(E_measure(gt, target))
MAE_list.append(np.absolute(np.subtract(gt, target)).mean())
Smeasure_list.append(S_measure(target, gt))
f1_score_list.append(f1_score(gt.reshape(h*w),target.reshape(h*w),labels='binary'))
if Flag_figs == 1:
t_count = 0
for th in np.linspace(0.001, 0.99, Num_th):
gt = np.zeros(Gt.shape)
target = np.zeros(pred.shape)
gt[Gt < th] = 0
gt[Gt >= th] = 1
target[pred < th] = 0
target[pred >= th] = 1
precision_list[t_count] += precision_score(gt.reshape(h*w),target.reshape(h*w))
recall_list[t_count] += recall_score(gt.reshape(h*w),target.reshape(h*w))
#F_score[t_count] += f1_score(gt.reshape(h*w),target.reshape(h*w),labels='binary')
t_count +=1
count +=1
if count%500==0:
print(count)
# print("{} : F1_score : {} gtsum : {} pred sum : {} ".format(file,f1_score_list[-1],gt.sum(),target.sum()))
# pdb.set_trace()
precision_list = precision_list/count
recall_list = recall_list/count
F_score = F_score/count
MAE = sum(MAE_list)/len(MAE_list)
F_mu = sum(f1_score_list)/len(f1_score_list)
E_mu = sum(Emeasure_list)/len(Emeasure_list)
B_mu = sum(Bmeasure_list)/len(Bmeasure_list)
S_mu = sum(Smeasure_list) / len(Smeasure_list)
np.savez('%s/%s.npz' % (output_dir, name), precision_list=precision_list, recall_list=recall_list, F_score=F_score, MAE=MAE, F_mu=F_mu, E_mu=E_mu, B_mu=B_mu, S_mu=S_mu)
print("Dataset:{} Mean F1_Score : {}".format(dataset,F_mu))
print("Dataset:{} Mean MAE : {}".format(dataset,MAE))
print("Dataset:{} Mean E_measure : {}".format(dataset,E_mu))
print("Dataset:{} Mean B_measure : {}".format(dataset,B_mu))
print("Dataset:{} Mean S_measure : {}".format(dataset, S_mu))
print("{} dataset done".format(dataset))
print("----------------------------------------------------------------------------------------")
#print("Mean precision_Score : {}".format(sum(precision_list)/len(precision_list)))
#print("Mean recall_Score : {}".format(sum(recall_list)/len(recall_list)))
#pr_display = PrecisionRecallDisplay(precision=precision_list, recall=recall_list).plot()
#mpl.use('tkagg')
plt.plot(recall_list,precision_list)
plt.savefig(output_dir + name+'_'+'Precision_recall.png')
plt.clf()
plt.plot(np.linspace(0, 255, Num_th), F_score)
plt.savefig(output_dir + name+'_'+'Fscore.png')
plt.clf()
| 9,521 | 33.625455 | 172 | py |
3SD | 3SD-main/u2net_test_pseudo_dino_final.py | import os
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms as T
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms#, utils
# import torch.optim as optim
from functools import wraps, partial
import pdb
import numpy as np
from PIL import Image
import glob
import random
from data_loader import RescaleT
from data_loader import ToTensor
from data_loader import ToTensorLab
from data_loader import SalObjDataset
from model import U2NET # full size version 173.6 MB
from model import U2NETP # small version u2net 4.7 MB
import smoothness
# ------- util tool functions ----------
def exists(val):
return val is not None
def default(val, default):
return val if exists(val) else default
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
class L2Norm(nn.Module):
def forward(self, x, eps = 1e-6):
norm = x.norm(dim = 1, keepdim = True).clamp(min = eps)
return x / norm
def dino_loss_fn(
teacher_logits,
student_logits,
teacher_temp,
student_temp,
centers,
eps = 1e-20
):
teacher_logits = teacher_logits.detach()
student_probs = (student_logits / student_temp).softmax(dim = -1)
teacher_probs = ((teacher_logits-centers) / teacher_temp).softmax(dim = -1)
return - (teacher_probs * torch.log(student_probs + eps)).sum(dim = -1).mean()
# normalize the predicted SOD probability map
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
def save_output(image_name,pred,d_dir):
predict = pred
predict = predict.squeeze()
predict_np = predict.cpu().data.numpy()
im = Image.fromarray(predict_np*255).convert('RGB')
img_name = image_name.split(os.sep)[-1]
image = io.imread(image_name)
imo = im.resize((image.shape[1],image.shape[0]),resample=Image.BILINEAR)
pb_np = np.array(imo)
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
imo.save(d_dir+imidx+'.png')
# ------- 3. dino model and pseudo label generation --------
class Dino(nn.Module):
def __init__(
self,
net,
image_size,
patch_size = 16,
num_classes_K = 200,
student_temp = 0.9,
teacher_temp = 0.04,
local_upper_crop_scale = 0.4,
global_lower_crop_scale = 0.5,
moving_average_decay = 0.9,
center_moving_average_decay = 0.9,
augment_fn = None,
augment_fn2 = None
):
super().__init__()
self.net = net
# default BYOL augmentation
DEFAULT_AUG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p = 0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p = 0.2
),
)
self.augment1 = default(augment_fn, DEFAULT_AUG)
self.augment2 = default(augment_fn2, DEFAULT_AUG)
DEFAULT_AUG_BAG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p=0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p=0.2
),
)
self.augment_bag = default(None, DEFAULT_AUG_BAG)
# local and global crops
self.local_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.05, local_upper_crop_scale))
self.local_crop_bag = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (0.3, 0.6))
self.global_crop = T.RandomResizedCrop((image_size[0], image_size[0]), scale = (global_lower_crop_scale, 1.))
self.student_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
self.teacher_encoder = U2NET(3, 1,image_size,patch_size) if (self.net=='u2net') else U2NETP(3, 1)
if torch.cuda.is_available():
self.student_encoder = torch.nn.DataParallel(self.student_encoder)
self.teacher_encoder = torch.nn.DataParallel(self.teacher_encoder)
self.teacher_ema_updater = EMA(moving_average_decay)
self.register_buffer('teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('last_teacher_centers', torch.zeros(1, num_classes_K))
self.register_buffer('teacher_centers_bag', torch.zeros(1,num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
self.register_buffer('last_teacher_centers_bag', torch.zeros(1, num_classes_K,image_size[0]//patch_size,image_size[0]//patch_size))
self.teacher_centering_ema_updater = EMA(center_moving_average_decay)
self.student_temp = student_temp
self.teacher_temp = teacher_temp
# get device of network and make wrapper same device
#device = get_module_device(net)
if torch.cuda.is_available():
self.cuda()
# send a mock image tensor to instantiate singleton parameters
self.forward(torch.randn(2, 3, 320,320).cuda())
@singleton('teacher_encoder')
def _get_teacher_encoder(self):
teacher_encoder = copy.deepcopy(self.student_encoder)
set_requires_grad(teacher_encoder, False)
return teacher_encoder
def reset_moving_average(self):
del self.teacher_encoder
self.teacher_encoder = None
def update_moving_average(self):
assert self.teacher_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.teacher_ema_updater, self.teacher_encoder, self.student_encoder)
new_teacher_centers = self.teacher_centering_ema_updater.update_average(self.teacher_centers, self.last_teacher_centers)
self.teacher_centers.copy_(new_teacher_centers)
#pdb.set_trace()
new_teacher_centers_bag = self.teacher_centering_ema_updater.update_average(self.teacher_centers_bag,self.last_teacher_centers_bag)
self.teacher_centers_bag.copy_(new_teacher_centers_bag)
def forward(
self,
x,
return_embedding = False,
return_projection = True,
student_temp = None,
teacher_temp = None
):
if return_embedding:
return self.student_encoder(x, return_projection = return_projection)
image_one, image_two = self.augment1(x), self.augment2(x)
local_image_one, local_image_two = self.local_crop(image_one), self.local_crop(image_two)
global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two)
student_proj_one = self.student_encoder(local_image_one)[-1]
student_proj_two = self.student_encoder(local_image_two)[-1]
with torch.no_grad():
teacher_encoder = self._get_teacher_encoder()
teacher_proj_one = teacher_encoder(global_image_one)[-1]
teacher_proj_two = teacher_encoder(global_image_two)[-1]
#print(teacher_proj_one.shape)
loss_fn_ = partial(
dino_loss_fn,
student_temp = default(student_temp, self.student_temp),
teacher_temp = default(teacher_temp, self.teacher_temp),
centers = self.teacher_centers
)
teacher_logits_avg = torch.cat((teacher_proj_one, teacher_proj_two)).mean(dim = 0)
self.last_teacher_centers.copy_(teacher_logits_avg)
loss = (loss_fn_(teacher_proj_one, student_proj_two) + loss_fn_(teacher_proj_two, student_proj_one)) / 2
return loss
def main():
# --------- 1. get image path and name ---------
model_name='u2net'#u2netp
test_datasets = ['DUTS_Test','HKU-IS','DUT','THUR']
for dataset in test_datasets:
image_dir = os.path.join(os.getcwd(), './../testing/', 'img',dataset)
folder_pred = os.path.join(os.getcwd(), '../testing/','output_' + model_name + '_results' + os.sep)
prediction_dir = os.path.join(os.getcwd(), '../testing/', 'output_' + model_name + '_results' , dataset+ os.sep)
model_dir = os.path.join(os.getcwd(), 'saved_models', 'final_patch32_pseudo_dino_edge_pre_trans_' + model_name, model_name + '_bce_epoch_139_train_fulldino.pth')
if (os.path.exists(folder_pred) == False):
os.mkdir(folder_pred)
if (os.path.exists(prediction_dir)==False):
os.mkdir(prediction_dir)
img_name_list = list(glob.glob(image_dir + '/*'+'.jpg')) + list(glob.glob(image_dir + '/*'+'.png'))
#print(img_name_list)
# --------- 2. dataloader ---------
#1. dataloader
test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
lbl_name_list = [],
transform=transforms.Compose([RescaleT(320),
ToTensorLab(flag=0)])
)
test_salobj_dataloader = DataLoader(test_salobj_dataset,
batch_size=1,
shuffle=False,
num_workers=1)
# --------- 3. model define ---------
dino = Dino(model_name, [320],32)
if torch.cuda.is_available():
dino.load_state_dict(torch.load(model_dir))
dino.cuda()
else:
net.load_state_dict(torch.load(model_dir, map_location='cpu'))
dino.train()
# --------- 4. inference for each image ---------
for i_test, data_test in enumerate(test_salobj_dataloader):
#print("inferencing:",img_name_list[i_test].split(os.sep)[-1])
inputs_test = data_test['image']
inputs_test = inputs_test.type(torch.FloatTensor)
if torch.cuda.is_available():
inputs_test = Variable(inputs_test.cuda())
else:
inputs_test = Variable(inputs_test)
with torch.no_grad():
#loss = dino(inputs_test)
d1,d2,d3,d4,d5,d6,d7,edge,cam_map,bag_map,pred_class = dino.student_encoder(inputs_test)
#pdb.set_trace()
# normalization
pred = d1[:,0,:,:]
pred = normPRED(pred)
# save results to test_results folder
if not os.path.exists(prediction_dir):
os.makedirs(prediction_dir, exist_ok=True)
save_output(img_name_list[i_test],pred,prediction_dir)
print("inferencing:",img_name_list[i_test].split(os.sep)[-1])
del d1,d2,d3,d4,d5,d6,d7
if __name__ == "__main__":
main()
| 12,334 | 34.14245 | 169 | py |
3SD | 3SD-main/u2net_test.py | import os
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms#, utils
# import torch.optim as optim
import numpy as np
from PIL import Image
import glob
from data_loader import RescaleT
from data_loader import ToTensor
from data_loader import ToTensorLab
from data_loader import SalObjDataset
from model import U2NET # full size version 173.6 MB
from model import U2NETP # small version u2net 4.7 MB
# normalize the predicted SOD probability map
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
def save_output(image_name,pred,d_dir):
predict = pred
predict = predict.squeeze()
predict_np = predict.cpu().data.numpy()
im = Image.fromarray(predict_np*255).convert('RGB')
img_name = image_name.split(os.sep)[-1]
image = io.imread(image_name)
imo = im.resize((image.shape[1],image.shape[0]),resample=Image.BILINEAR)
pb_np = np.array(imo)
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
imo.save(d_dir+imidx+'.png')
def main():
# --------- 1. get image path and name ---------
model_name='u2net'#u2netp
test_datasets = ['ECSSD', 'PASCAL', 'DUTS_Test', 'HKU-IS', 'DUT', 'THUR']
for dataset in test_datasets:
image_dir = os.path.join(os.getcwd(), './../testing/', 'img',dataset)
prediction_dir = os.path.join(os.getcwd(), '../testing/','class_' + model_name + '_results' , dataset + os.sep)
model_dir = os.path.join(os.getcwd(), 'saved_models', 'trans_syn_' + model_name, model_name + '_bce_epoch_229_train.pth')
if (os.path.exists(prediction_dir)==False):
os.mkdir(prediction_dir)
img_name_list = list(glob.glob(image_dir + '/*'+'.jpg')) + list(glob.glob(image_dir + '/*'+'.png'))
#print(img_name_list)
# --------- 2. dataloader ---------
#1. dataloader
test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
lbl_name_list = [],
transform=transforms.Compose([RescaleT(320),
ToTensorLab(flag=0)])
)
test_salobj_dataloader = DataLoader(test_salobj_dataset,
batch_size=1,
shuffle=False,
num_workers=1)
# --------- 3. model define ---------
if(model_name=='u2net'):
print("...load U2NET---173.6 MB")
net = U2NET(3,1)
elif(model_name=='u2netp'):
print("...load U2NEP---4.7 MB")
net = U2NETP(3,1)
if torch.cuda.is_available():
net = torch.nn.DataParallel(net)
net.load_state_dict(torch.load(model_dir))
net.cuda()
else:
net.load_state_dict(torch.load(model_dir, map_location='cpu'))
net.eval()
# --------- 4. inference for each image ---------
for i_test, data_test in enumerate(test_salobj_dataloader):
print("inferencing:",img_name_list[i_test].split(os.sep)[-1])
inputs_test = data_test['image']
inputs_test = inputs_test.type(torch.FloatTensor)
if torch.cuda.is_available():
inputs_test = Variable(inputs_test.cuda())
else:
inputs_test = Variable(inputs_test)
with torch.no_grad():
d1,d2,d3,d4,d5,d6,d7= net(inputs_test)
# normalization
pred = d1[:,0,:,:]
pred = normPRED(pred)
# save results to test_results folder
if not os.path.exists(prediction_dir):
os.makedirs(prediction_dir, exist_ok=True)
save_output(img_name_list[i_test],pred,prediction_dir)
del d1,d2,d3,d4,d5,d6,d7
if __name__ == "__main__":
main()
| 4,238 | 32.642857 | 129 | py |
3SD | 3SD-main/smoothness/__init__.py | import torch
import torch.nn.functional as F
# from torch.autograd import Variable
# import numpy as np
def laplacian_edge(img):
laplacian_filter = torch.Tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
filter = torch.reshape(laplacian_filter, [1, 1, 3, 3])
filter = filter.cuda()
lap_edge = F.conv2d(img, filter, stride=1, padding=1)
return lap_edge
def gradient_x(img):
sobel = torch.Tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
filter = torch.reshape(sobel,[1,1,3,3])
filter = filter.cuda()
gx = F.conv2d(img, filter, stride=1, padding=1)
return gx
def gradient_y(img):
sobel = torch.Tensor([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
filter = torch.reshape(sobel, [1, 1,3,3])
filter = filter.cuda()
gy = F.conv2d(img, filter, stride=1, padding=1)
return gy
def charbonnier_penalty(s):
cp_s = torch.pow(torch.pow(s, 2) + 0.001**2, 0.5)
return cp_s
def get_saliency_smoothness(pred, gt, size_average=True):
alpha = 10
s1 = 10
s2 = 1
## first oder derivative: sobel
sal_x = torch.abs(gradient_x(pred))
sal_y = torch.abs(gradient_y(pred))
gt_x = gradient_x(gt)
gt_y = gradient_y(gt)
w_x = torch.exp(torch.abs(gt_x) * (-alpha))
w_y = torch.exp(torch.abs(gt_y) * (-alpha))
cps_x = charbonnier_penalty(sal_x * w_x)
cps_y = charbonnier_penalty(sal_y * w_y)
cps_xy = cps_x + cps_y
## second order derivative: laplacian
lap_sal = torch.abs(laplacian_edge(pred))
lap_gt = torch.abs(laplacian_edge(gt))
weight_lap = torch.exp(lap_gt * (-alpha))
weighted_lap = charbonnier_penalty(lap_sal*weight_lap)
smooth_loss = s1*torch.mean(cps_xy) + s2*torch.mean(weighted_lap)
return smooth_loss
class smoothness_loss(torch.nn.Module):
def __init__(self, size_average = True):
super(smoothness_loss, self).__init__()
self.size_average = size_average
def forward(self, pred, target):
return get_saliency_smoothness(pred, target, self.size_average)
| 2,014 | 30.484375 | 78 | py |
3SD | 3SD-main/model/u2net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class REBNCONV(nn.Module):
def __init__(self,in_ch=3,out_ch=3,dirate=1):
super(REBNCONV,self).__init__()
self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self,x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src,tar):
src = F.upsample(src,size=tar.shape[2:],mode='bilinear')
return src
### RSU-7 ###
class RSU7(nn.Module):#UNet07DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))
hx6dup = _upsample_like(hx6d,hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):#UNet06DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):#UNet05DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):#UNet04DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):#UNet04FRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))
hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))
return hx1d + hxin
##### U^2-Net ####
class U2NET(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NET,self).__init__()
self.stage1 = RSU7(in_ch,32,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,32,128)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(128,64,256)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(256,128,512)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(512,256,512)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(512,256,512)
# decoder
self.stage5d = RSU4F(1024,256,512)
self.stage4d = RSU4(1024,128,256)
self.stage3d = RSU5(512,64,128)
self.stage2d = RSU6(256,32,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(128,out_ch,3,padding=1)
self.side4 = nn.Conv2d(256,out_ch,3,padding=1)
self.side5 = nn.Conv2d(512,out_ch,3,padding=1)
self.side6 = nn.Conv2d(512,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#-------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
### U^2-Net small ###
class U2NETP(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NETP,self).__init__()
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,64)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(64,16,64)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(64,16,64)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(64,16,64)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(64,16,64)
# decoder
self.stage5d = RSU4F(128,16,64)
self.stage4d = RSU4(128,16,64)
self.stage3d = RSU5(128,16,64)
self.stage2d = RSU6(128,16,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(64,out_ch,3,padding=1)
self.side4 = nn.Conv2d(64,out_ch,3,padding=1)
self.side5 = nn.Conv2d(64,out_ch,3,padding=1)
self.side6 = nn.Conv2d(64,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#decoder
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
| 14,719 | 26.984791 | 118 | py |
3SD | 3SD-main/model/u2net_transformer_pseudo_dino_final.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
from model.utils import trunc_normal_
import pdb
class REBNCONV(nn.Module):
def __init__(self,in_ch=3,out_ch=3,dirate=1):
super(REBNCONV,self).__init__()
self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self,x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src,tar):
src = F.upsample(src,size=tar.shape[2:],mode='bilinear')
return src
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
#pdb.set_trace()
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=400, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x)
#print(x.shape)
x = x.flatten(2).transpose(1, 2)
#print(self.num_patches)
return x
### RSU-7 ###
class RSU7(nn.Module):#UNet07DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))
hx6dup = _upsample_like(hx6d,hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):#UNet06DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):#UNet05DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):#UNet04DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):#UNet04FRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))
hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))
return hx1d + hxin
class Edge_Dec(nn.Module):
def __init__(self, mid_ch=8, out_ch=1):
super(Edge_Dec,self).__init__()
self.side1 = nn.Conv2d(64, mid_ch, 3, padding=1)
self.side2 = nn.Conv2d(128, mid_ch, 3, padding=1)
self.side3 = nn.Conv2d(256, mid_ch, 3, padding=1)
self.side4 = nn.Conv2d(512, mid_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, mid_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, mid_ch, 3, padding=1)
self.outconv = nn.Conv2d(6 * mid_ch, out_ch, 1)
self.relu = nn.ReLU(inplace=True)
def forward(self,hx1,hx2,hx3,hx4,hx5,hx6):
d1 = self.relu(self.side1(hx1))
d2 = self.relu(self.side2(hx2))
d2 = _upsample_like(d2, d1)
d3 = self.relu(self.side3(hx3))
d3 = _upsample_like(d3, d1)
d4 = self.relu(self.side4(hx4))
d4 = _upsample_like(d4, d1)
d5 = self.relu(self.side5(hx5))
d5 = _upsample_like(d5, d1)
d6 = self.relu(self.side6(hx6))
d6 = _upsample_like(d6, d1)
d0 = F.sigmoid(self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1)))
return d0
class Class_Dec(nn.Module):
def __init__(self, input_ch, Num_classes):
super(Class_Dec,self).__init__()
self.fc_layer1 = nn.Conv2d(input_ch, Num_classes, kernel_size=1, stride=1,padding=0, bias=False)
self.fc_layer_bag = nn.Conv2d(input_ch, Num_classes, kernel_size=1, stride=1, padding=0, bias=False)
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
def forward(self,class_input):
#print("class")
self.global_pool = F.upsample(class_input, size=[1,1], mode='bilinear')
output = self.fc_layer1(self.global_pool)
B,C,H,W = output.shape
output = output.view(B,C*H*W)
cam_map = self.fc_layer1(class_input)
#bag output
bag_output = self.fc_layer_bag(class_input)
return output,cam_map,bag_output
##### U^2-Net ####
class U2NET(nn.Module):
def __init__(self,in_ch=3,out_ch=1,img_size=[400], patch_size=16, in_chans=3, num_classes=0, embed_dim=384, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super(U2NET,self).__init__()
### Transformfer encoder ###
self.image_size = img_size[0]
self.num_features = self.embed_dim = embed_dim
self.preprocess = RSU7(in_ch,16,8)
self.patch_size = patch_size
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=8, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
### U^2Net encoder branch for local task ###
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,128)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(128,32,256)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(256,64,512)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(512,128,512)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(512,128,512)
# decoder
self.stage5d = RSU4F(1408,256,512)
self.stage4d = RSU4(1024,128,256)
self.stage3d = RSU5(512,64,128)
self.stage2d = RSU6(256,32,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(65,out_ch,3,padding=1)
self.side2 = nn.Conv2d(65,out_ch,3,padding=1)
self.side3 = nn.Conv2d(129,out_ch,3,padding=1)
self.side4 = nn.Conv2d(257,out_ch,3,padding=1)
self.side5 = nn.Conv2d(513,out_ch,3,padding=1)
self.side6 = nn.Conv2d(513,out_ch,3,padding=1)
# edge decoder
self.edge_dec = Edge_Dec(8,out_ch)
self.outconv = nn.Conv2d(7*out_ch,out_ch,1)
# classification decoder
self.class_dec = Class_Dec(896,200)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
"""def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]"""
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def forward(self,x):
hx = x
#pdb.set_trace()
tx = F.upsample(x,size=[self.image_size,self.image_size],mode='bilinear')
tx = self.prepare_tokens(self.preprocess(tx))
for blk in self.blocks:
tx = blk(tx)
tx = self.norm(tx)
tx = tx.transpose(1,2)
B, N, C = tx.shape
#print(B,N,C)
tx = F.upsample(tx.reshape(B,N,C,1),size=[C-1,1],mode='bilinear')
tx = tx.reshape(B,N,self.image_size//self.patch_size ,self.image_size//self.patch_size )
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#-------------------- class decoder -------------------
txc = tx#F.upsample(tx, size=hx5.shape[2:], mode='bilinear')
class_input = torch.cat((F.upsample(hx6, size=txc.shape[2:], mode='bilinear'),txc),1)
pred_class,cam_map,bag_output = self.class_dec(class_input)
#print(bag_output.shape,self.patch_size)
txd = F.upsample(tx, size=hx5.shape[2:], mode='bilinear')
#-------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up,hx5,txd),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
edge = self.edge_dec(hx1,hx2,hx3,hx4,hx5,hx6)
d1 = self.side1(torch.cat((hx1d,edge),1))
d2 = self.side2(torch.cat((hx2d,_upsample_like(edge,hx2d)),1))
d2 = _upsample_like(d2,d1)
d3 = self.side3(torch.cat((hx3d,_upsample_like(edge,hx3d)),1))
d3 = _upsample_like(d3,d1)
d4 = self.side4(torch.cat((hx4d,_upsample_like(edge,hx4d)),1))
d4 = _upsample_like(d4,d1)
d5 = self.side5(torch.cat((hx5d,_upsample_like(edge,hx5d)),1))
d5 = _upsample_like(d5,d1)
d6 = self.side6(torch.cat((hx6,_upsample_like(edge,hx6)),1))
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6,edge),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6),edge,cam_map, bag_output, pred_class
### U^2-Net small ###
class U2NETP(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NETP,self).__init__()
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,64)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(64,16,64)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(64,16,64)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(64,16,64)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(64,16,64)
# decoder
self.stage5d = RSU4F(128,16,64)
self.stage4d = RSU4(128,16,64)
self.stage3d = RSU5(128,16,64)
self.stage2d = RSU6(128,16,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(64,out_ch,3,padding=1)
self.side4 = nn.Conv2d(64,out_ch,3,padding=1)
self.side5 = nn.Conv2d(64,out_ch,3,padding=1)
self.side6 = nn.Conv2d(64,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#decoder
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[400], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 33,498 | 32.499 | 155 | py |
3SD | 3SD-main/model/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Misc functions.
Mostly copy-paste from torchvision references or other public repos like DETR:
https://github.com/facebookresearch/detr/blob/master/util/misc.py
"""
import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
class GaussianBlur(object):
"""
Apply Gaussian Blur to the PIL image.
"""
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def __call__(self, img):
do_it = random.random() <= self.prob
if not do_it:
return img
return img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
class Solarization(object):
"""
Apply Solarization to the PIL image.
"""
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return ImageOps.solarize(img)
else:
return img
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
else:
print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.")
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
if url is not None:
print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)
model.load_state_dict(state_dict, strict=True)
else:
print("There is no reference weights available for this model => We use random weights.")
def clip_gradients(model, clip):
norms = []
for name, p in model.named_parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
norms.append(param_norm.item())
clip_coef = clip / (param_norm + 1e-6)
if clip_coef < 1:
p.grad.data.mul_(clip_coef)
return norms
def cancel_gradients_last_layer(epoch, model, freeze_last_layer):
if epoch >= freeze_last_layer:
return
for n, p in model.named_parameters():
if "last_layer" in n:
p.grad = None
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):
"""
Re-start from checkpoint
"""
if not os.path.isfile(ckp_path):
return
print("Found checkpoint at {}".format(ckp_path))
# open checkpoint file
checkpoint = torch.load(ckp_path, map_location="cpu")
# key is what to look for in the checkpoint file
# value is the object to load
# example: {'state_dict': model}
for key, value in kwargs.items():
if key in checkpoint and value is not None:
try:
msg = value.load_state_dict(checkpoint[key], strict=False)
print("=> loaded '{}' from checkpoint '{}' with msg {}".format(key, ckp_path, msg))
except TypeError:
try:
msg = value.load_state_dict(checkpoint[key])
print("=> loaded '{}' from checkpoint: '{}'".format(key, ckp_path))
except ValueError:
print("=> failed to load '{}' from checkpoint: '{}'".format(key, ckp_path))
else:
print("=> key '{}' not found in checkpoint: '{}'".format(key, ckp_path))
# re load variable important for the run
if run_variables is not None:
for var_name in run_variables:
if var_name in checkpoint:
run_variables[var_name] = checkpoint[var_name]
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def fix_random_seeds(seed=31):
"""
Fix random seeds.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.6f} ({global_avg:.6f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.6f}')
data_time = SmoothedValue(fmt='{avg:.6f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.6f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
# launched with torch.distributed.launch
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
# launched with submitit on a slurm cluster
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
# launched naively with `python main_dino.py`
# we manually add MASTER_ADDR and MASTER_PORT to env variables
elif torch.cuda.is_available():
print('Will run the code on one GPU.')
args.rank, args.gpu, args.world_size = 0, 0, 1
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
else:
print('Does not support training without GPU.')
sys.exit(1)
dist.init_process_group(
backend="nccl",
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.cuda.set_device(args.gpu)
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
dist.barrier()
setup_for_distributed(args.rank == 0)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class LARS(torch.optim.Optimizer):
"""
Almost copy-paste from https://github.com/facebookresearch/barlowtwins/blob/main/main.py
"""
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, eta=0.001,
weight_decay_filter=None, lars_adaptation_filter=None):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum,
eta=eta, weight_decay_filter=weight_decay_filter,
lars_adaptation_filter=lars_adaptation_filter)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if p.ndim != 1:
dp = dp.add(p, alpha=g['weight_decay'])
if p.ndim != 1:
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['eta'] * param_norm / update_norm), one), one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])
class MultiCropWrapper(nn.Module):
"""
Perform forward pass separately on each resolution input.
The inputs corresponding to a single resolution are clubbed and single
forward is run on the same resolution inputs. Hence we do several
forward passes = number of different resolutions used. We then
concatenate all the output features and run the head forward on these
concatenated features.
"""
def __init__(self, backbone, head):
super(MultiCropWrapper, self).__init__()
# disable layers dedicated to ImageNet labels classification
backbone.fc, backbone.head = nn.Identity(), nn.Identity()
self.backbone = backbone
self.head = head
def forward(self, x):
# convert to list
if not isinstance(x, list):
x = [x]
idx_crops = torch.cumsum(torch.unique_consecutive(
torch.tensor([inp.shape[-1] for inp in x]),
return_counts=True,
)[1], 0)
start_idx = 0
for end_idx in idx_crops:
_out = self.backbone(torch.cat(x[start_idx: end_idx]))
if start_idx == 0:
output = _out
else:
output = torch.cat((output, _out))
start_idx = end_idx
# Run the head forward on the concatenated features.
return self.head(output)
def get_params_groups(model):
regularized = []
not_regularized = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# we do not regularize biases nor Norm parameters
if name.endswith(".bias") or len(param.shape) == 1:
not_regularized.append(param)
else:
regularized.append(param)
return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}]
def has_batchnorms(model):
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
for name, module in model.named_modules():
if isinstance(module, bn_types):
return True
return False
| 21,117 | 33.06129 | 115 | py |
3SD | 3SD-main/model/u2net_refactor.py | import torch
import torch.nn as nn
import math
__all__ = ['U2NET_full', 'U2NET_lite']
def _upsample_like(x, size):
return nn.Upsample(size=size, mode='bilinear', align_corners=False)(x)
def _size_map(x, height):
# {height: size} for Upsample
size = list(x.shape[-2:])
sizes = {}
for h in range(1, height):
sizes[h] = size
size = [math.ceil(w / 2) for w in size]
return sizes
class REBNCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dilate=1):
super(REBNCONV, self).__init__()
self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dilate, dilation=1 * dilate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu_s1(self.bn_s1(self.conv_s1(x)))
class RSU(nn.Module):
def __init__(self, name, height, in_ch, mid_ch, out_ch, dilated=False):
super(RSU, self).__init__()
self.name = name
self.height = height
self.dilated = dilated
self._make_layers(height, in_ch, mid_ch, out_ch, dilated)
def forward(self, x):
sizes = _size_map(x, self.height)
x = self.rebnconvin(x)
# U-Net like symmetric encoder-decoder structure
def unet(x, height=1):
if height < self.height:
x1 = getattr(self, f'rebnconv{height}')(x)
if not self.dilated and height < self.height - 1:
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
else:
x2 = unet(x1, height + 1)
x = getattr(self, f'rebnconv{height}d')(torch.cat((x2, x1), 1))
return _upsample_like(x, sizes[height - 1]) if not self.dilated and height > 1 else x
else:
return getattr(self, f'rebnconv{height}')(x)
return x + unet(x)
def _make_layers(self, height, in_ch, mid_ch, out_ch, dilated=False):
self.add_module('rebnconvin', REBNCONV(in_ch, out_ch))
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.add_module(f'rebnconv1', REBNCONV(out_ch, mid_ch))
self.add_module(f'rebnconv1d', REBNCONV(mid_ch * 2, out_ch))
for i in range(2, height):
dilate = 1 if not dilated else 2 ** (i - 1)
self.add_module(f'rebnconv{i}', REBNCONV(mid_ch, mid_ch, dilate=dilate))
self.add_module(f'rebnconv{i}d', REBNCONV(mid_ch * 2, mid_ch, dilate=dilate))
dilate = 2 if not dilated else 2 ** (height - 1)
self.add_module(f'rebnconv{height}', REBNCONV(mid_ch, mid_ch, dilate=dilate))
class U2NET(nn.Module):
def __init__(self, cfgs, out_ch):
super(U2NET, self).__init__()
self.out_ch = out_ch
self._make_layers(cfgs)
def forward(self, x):
sizes = _size_map(x, self.height)
maps = [] # storage for maps
# side saliency map
def unet(x, height=1):
if height < 6:
x1 = getattr(self, f'stage{height}')(x)
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
x = getattr(self, f'stage{height}d')(torch.cat((x2, x1), 1))
side(x, height)
return _upsample_like(x, sizes[height - 1]) if height > 1 else x
else:
x = getattr(self, f'stage{height}')(x)
side(x, height)
return _upsample_like(x, sizes[height - 1])
def side(x, h):
# side output saliency map (before sigmoid)
x = getattr(self, f'side{h}')(x)
x = _upsample_like(x, sizes[1])
maps.append(x)
def fuse():
# fuse saliency probability maps
maps.reverse()
x = torch.cat(maps, 1)
x = getattr(self, 'outconv')(x)
maps.insert(0, x)
return [torch.sigmoid(x) for x in maps]
unet(x)
maps = fuse()
return maps
def _make_layers(self, cfgs):
self.height = int((len(cfgs) + 1) / 2)
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
for k, v in cfgs.items():
# build rsu block
self.add_module(k, RSU(v[0], *v[1]))
if v[2] > 0:
# build side layer
self.add_module(f'side{v[0][-1]}', nn.Conv2d(v[2], self.out_ch, 3, padding=1))
# build fuse layer
self.add_module('outconv', nn.Conv2d(int(self.height * self.out_ch), self.out_ch, 1))
def U2NET_full():
full = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, 3, 32, 64), -1],
'stage2': ['En_2', (6, 64, 32, 128), -1],
'stage3': ['En_3', (5, 128, 64, 256), -1],
'stage4': ['En_4', (4, 256, 128, 512), -1],
'stage5': ['En_5', (4, 512, 256, 512, True), -1],
'stage6': ['En_6', (4, 512, 256, 512, True), 512],
'stage5d': ['De_5', (4, 1024, 256, 512, True), 512],
'stage4d': ['De_4', (4, 1024, 128, 256), 256],
'stage3d': ['De_3', (5, 512, 64, 128), 128],
'stage2d': ['De_2', (6, 256, 32, 64), 64],
'stage1d': ['De_1', (7, 128, 16, 64), 64],
}
return U2NET(cfgs=full, out_ch=1)
def U2NET_lite():
lite = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, 3, 16, 64), -1],
'stage2': ['En_2', (6, 64, 16, 64), -1],
'stage3': ['En_3', (5, 64, 16, 64), -1],
'stage4': ['En_4', (4, 64, 16, 64), -1],
'stage5': ['En_5', (4, 64, 16, 64, True), -1],
'stage6': ['En_6', (4, 64, 16, 64, True), 64],
'stage5d': ['De_5', (4, 128, 16, 64, True), 64],
'stage4d': ['De_4', (4, 128, 16, 64), 64],
'stage3d': ['De_3', (5, 128, 16, 64), 64],
'stage2d': ['De_2', (6, 128, 16, 64), 64],
'stage1d': ['De_1', (7, 128, 16, 64), 64],
}
return U2NET(cfgs=lite, out_ch=1)
| 6,097 | 35.08284 | 101 | py |
3SD | 3SD-main/model/u2net_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
from model.utils import trunc_normal_
import pdb
class REBNCONV(nn.Module):
def __init__(self,in_ch=3,out_ch=3,dirate=1):
super(REBNCONV,self).__init__()
self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self,x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src,tar):
src = F.upsample(src,size=tar.shape[2:],mode='bilinear')
return src
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
#pdb.set_trace()
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=400, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x)
#print(x.shape)
x = x.flatten(2).transpose(1, 2)
#print(self.num_patches)
return x
### RSU-7 ###
class RSU7(nn.Module):#UNet07DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))
hx6dup = _upsample_like(hx6d,hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):#UNet06DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):#UNet05DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):#UNet04DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):#UNet04FRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))
hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))
return hx1d + hxin
##### U^2-Net ####
class U2NET(nn.Module):
def __init__(self,in_ch=3,out_ch=1,img_size=[400], patch_size=16, in_chans=3, num_classes=0, embed_dim=384, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super(U2NET,self).__init__()
### Transformfer encoder ###
print("haha")
self.num_features = self.embed_dim = embed_dim
self.preprocess = RSU7(in_ch,16,8)
self.img_size = img_size[0]
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=8, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
### U^2Net encoder branch for local task ###
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,32,128)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(128,64,256)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(256,128,512)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(512,256,512)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(512,256,512)
# decoder
self.stage5d = RSU4F(1408,256,512)
self.stage4d = RSU4(1024,128,256)
self.stage3d = RSU5(512,64,128)
self.stage2d = RSU6(256,32,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(128,out_ch,3,padding=1)
self.side4 = nn.Conv2d(256,out_ch,3,padding=1)
self.side5 = nn.Conv2d(512,out_ch,3,padding=1)
self.side6 = nn.Conv2d(512,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
"""def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]"""
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def forward(self,x):
hx = x
#pdb.set_trace()
tx = F.upsample(x,size=[self.img_size,self.img_size],mode='bilinear')
tx = self.prepare_tokens(self.preprocess(tx))
for blk in self.blocks:
tx = blk(tx)
tx = self.norm(tx)
tx = tx.transpose(1,2)
B, N, C = tx.shape
tx = F.upsample(tx.reshape(B,N,C,1),size=[C-1,1],mode='bilinear')
tx = tx.reshape(B,N,self.img_size//16 ,self.img_size//16 )
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
txd = F.upsample(tx, size=hx5.shape[2:], mode='bilinear')
#-------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up,hx5,txd),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6),tx
### U^2-Net small ###
class U2NETP(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NETP,self).__init__()
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,64)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(64,16,64)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(64,16,64)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(64,16,64)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(64,16,64)
# decoder
self.stage5d = RSU4F(128,16,64)
self.stage4d = RSU4(128,16,64)
self.stage3d = RSU5(128,16,64)
self.stage2d = RSU6(128,16,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(64,out_ch,3,padding=1)
self.side4 = nn.Conv2d(64,out_ch,3,padding=1)
self.side5 = nn.Conv2d(64,out_ch,3,padding=1)
self.side6 = nn.Conv2d(64,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#decoder
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[400], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 30,678 | 31.917382 | 124 | py |
STFTgrad | STFTgrad-main/classifier/classifier_adaptive.py | """
Code for the adaptive classifier with the differentiable STFT front-end
This will be trained on our test input signal, alternating sinusoids of 2 frequencies
"""
# Dependencies
import numpy as np
from tqdm import tqdm
import haiku as hk
import jax.numpy as jnp
import jax
import optax
from dstft import diff_stft
import sys
# Order of input arguments:
"""
1 : list of N to initialize classifier with
2 : learning rate
3 : number of epochs
"""
n = len(sys.argv[1])
a = sys.argv[1][1:n-1]
a = a.split(',')
list_N = [int(i) for i in a]
lr = float(sys.argv[2])
nepochs = int(sys.argv[3])
# Construct the test signal to classify:
# Sampling rate
fs = 200
# Durations and frequencies of the 2 sines
dur_sins = [0.2,0.2]
freqs = [20,80]
Ns = [int(fs*i) for i in dur_sins]
# adding some noise in the sine to prevent the classifier from overfitting
list_sin = [(np.sin(2*np.pi*(freqs[i]/fs)*np.arange(Ns[i])) + 0.2*np.random.randn(Ns[i])) for i in range(len(dur_sins))]
one_period = np.concatenate(list_sin)
# Repeat this Nr times
Nr = 20
signal = np.tile(one_period,Nr)
P = sum(dur_sins)
I1 = np.arange(0,Nr*P,P)
I2 = np.arange(0.2,Nr*P,P)
# Input dimension to the classifier after the differentiable STFT (it is zero-padded to ensure this dimension)
Nzp = 50
# Differentiable FT as pre-processor to classifier
def forward(x):
mlp = hk.Sequential([
hk.Linear(2), jax.nn.softmax
])
return mlp(x)
net = hk.without_apply_rng(hk.transform(forward))
def loss_fn(param_dict, signal):
params = param_dict["nn"]
sigma = param_dict["s"]
hf = 1
N = int(jnp.round(6*sigma))
# Adding some more noise during training to prevent classifier from overfitting on irrelevant aspects of the spectra
signal = signal + 0.2*np.random.randn(signal.shape[0])
x = diff_stft(signal, s = sigma,hf = hf)
li = []
l1 = jnp.array([[1,0]])
l2 = jnp.array([[0,1]])
l_c = []
for i in range(x.shape[1]):
timi = i*int(hf*N)/fs
d1 = np.min(np.abs(I1 - timi))
d2 = np.min(np.abs(I2 - timi))
if(d1 < d2):
li.append(1)
l_c.append(l1)
else:
li.append(2)
l_c.append(l2)
li = np.array(li)
l_c = np.concatenate(l_c,axis = 0).T
xzp = jnp.concatenate([x,jnp.zeros((Nzp - (N//2 + 1),x.shape[1]))],axis = 0)
logits = net.apply(params,xzp.T)
# Regularized loss (Cross entropy + regularizer to avoid small windows)
cel = -jnp.mean(logits*l_c.T) + (0.1/sigma)
return cel
def update(
param_dict,
opt_state,
signal
):
grads = jax.grad(loss_fn)(param_dict, signal)
updates, opt_state = opt.update(grads, opt_state)
new_params = optax.apply_updates(param_dict, updates)
return new_params, opt_state
# Training the Classifier
nH_evol_fin = []
# list_N = [10,15,20,25,30,45,55,65,70]
opt = optax.adam(lr)
rng = jax.random.PRNGKey(42)
for Ni in list_N:
params = net.init(rng,np.random.randn(1,Nzp))
sinit = (Ni/6)
param_dict = {"nn":params,"s":sinit}
opt_state = opt.init(param_dict)
pfdict = 0
nH = []
for t in tqdm(range(nepochs)):
param_dict, opt_state = update(param_dict, opt_state, signal)
pfdict = param_dict
nH.append(6*param_dict["s"])
nH_evol_fin.append(nH)
# Plotting the evolution of the window length across epochs for different initializations
import matplotlib.pyplot as pyp
import matplotlib
matplotlib.rcParams.update({'font.size': 16})
pyp.figure()
pyp.title('Convergence from varying initial values')
pyp.xlabel('Epoch')
pyp.ylabel('Window Length (N)')
for l,i in enumerate(nH_evol_fin):
pyp.plot(i,'b')
pyp.show() | 3,684 | 25.510791 | 120 | py |
STFTgrad | STFTgrad-main/classifier/classifier_ordinary.py | """
Code for a normal classifier (to obtain the loss function as a function of the window length)
This will be trained on our test input signal, alternating sinusoids of 2 frequencies
"""
# Dependencies
import numpy as np
from tqdm import tqdm
import haiku as hk
import jax.numpy as jnp
import jax
import optax
from dstft import diff_stft
import sys
# Order of input arguments:
"""
1 : list of N to initialize classifier with
2 : learning rate
3 : number of epochs
"""
n = len(sys.argv[1])
a = sys.argv[1][1:n-1]
a = a.split(',')
list_N = [int(i) for i in a]
lr = float(sys.argv[2])
nepochs = int(sys.argv[3])
# Construct the test signal to classify:
# Sampling rate
fs = 200
# Durations and frequencies of the 2 sines
dur_sins = [0.2,0.2]
freqs = [20,80]
Ns = [int(fs*i) for i in dur_sins]
# adding some noise in the sine to prevent the classifier from overfitting
list_sin = [(np.sin(2*np.pi*(freqs[i]/fs)*np.arange(Ns[i])) + 0.2*np.random.randn(Ns[i])) for i in range(len(dur_sins))]
one_period = np.concatenate(list_sin)
# Repeat this Nr times
Nr = 20
signal = np.tile(one_period,Nr)
P = sum(dur_sins)
I1 = np.arange(0,Nr*P,P)
I2 = np.arange(0.2,Nr*P,P)
# Constructing the classifier
def forward(x):
mlp = hk.Sequential([
hk.Linear(2), jax.nn.softmax
])
return mlp(x)
net = hk.without_apply_rng(hk.transform(forward))
def loss_fn(params, inp, labels):
logits = net.apply(params,inp)
cel = -jnp.mean(logits * labels)
return cel
def update(
params: hk.Params,
opt_state,
x,l
):
grads = jax.grad(loss_fn)(params, x,l)
updates, opt_state = opt.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
# Training the classifier
loss_arrs_N = []
loss_fin = []
N_sweep = list_N
opt = optax.adam(lr)
rng = jax.random.PRNGKey(42)
for N in N_sweep:
Nc = N
Nzp = 50
hf = 1
signal = signal + 0.1*np.random.randn(signal.shape[0])
x = diff_stft(signal, s = Nc/6,hf = hf)
li = []
l1 = jnp.array([[1,0]])
l2 = jnp.array([[0,1]])
l_c = []
for i in range(x.shape[1]):
timi = i*int(hf*Nc)/fs
d1 = np.min(np.abs(I1 - timi))
d2 = np.min(np.abs(I2 - timi))
if(d1 < d2):
li.append(1)
l_c.append(l1)
else:
li.append(2)
l_c.append(l2)
li = np.array(li)
l_c = np.concatenate(l_c,axis = 0)
xzp = jnp.concatenate([x,jnp.zeros((Nzp - (Nc//2 + 1),x.shape[1]))],axis = 0)
params = net.init(rng,np.random.randn(1,Nzp))
opt_state = opt.init(params)
paramsf = 0
liter = []
for t in tqdm(range(nepochs)):
params, opt_state = update(params, opt_state, xzp.T, l_c)
paramsf = params
liter.append(loss_fn(paramsf,xzp.T, l_c) + (0.6/N))
loss_arrs_N.append(liter)
loss_fin.append(loss_fn(paramsf,xzp.T, l_c))
# Plotting the spectrograms and final loss for the different N's
costs_fin = loss_fin
import matplotlib.pyplot as pyp
import matplotlib
from matplotlib.pylab import register_cmap
cdict = {
'red': ((0.0, 1.0, 1.0), (1.0, 0.0, 0.0)),
'green': ((0.0, 1.0, 1.0), (1.0, .15, .15)),
'blue': ((0.0, 1.0, 1.0), (1.0, 0.4, 0.4)),
'alpha': ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))}
register_cmap(name='InvBlueA', data=cdict)
matplotlib.rcParams.update({'font.size': 16})
def plot_various_window_size(sigi):
pyp.figure(figsize=(22, 4))
szs = N_sweep
for i in range(len(szs)):
sz, hp = szs[i], szs[i]
a = diff_stft(sigi,s = szs[i]*1.0/6,hf = 1)
pyp.gcf().add_subplot(1, len(szs), i + 1), pyp.gca().pcolorfast(a,cmap = "InvBlueA")
pyp.gca().set_title(f'FFT size: {sz}, \n Loss: {costs_fin[i]:.5f}')
pyp.xlabel('Time Frame')
pyp.ylabel('Frequency Bin')
pyp.gcf().tight_layout()
plot_various_window_size(signal[:5*one_period.shape[0]]) | 3,916 | 25.828767 | 120 | py |
STFTgrad | STFTgrad-main/classifier/dstft.py | """
Code for the differentiable STFT front-end
As explained in our paper, we use a Gaussian Window STFT, with N = floor(6\sigma)
"""
# Dependencies
import jax.numpy as jnp
import jax
def diff_stft(xinp,s,hf = 0.5):
"""
Inputs
------
xinp: jnp.array
Input audio signal in time domain
s: jnp.float
The standard deviation of the Gaussian window to be used
hf: jnp.float
The fraction of window size that will be overlapped within consecutive frames
Outputs
-------
a: jnp.array
The computed magnitude spectrogram
"""
# Effective window length of Gaussian is 6\sigma
sz = s * 6
hp = hf*sz
# Truncating to integers for use in jnp functions
intsz = int(jnp.round(sz))
inthp = int(jnp.round(hp))
m = jnp.arange(0, intsz, dtype=jnp.float32)
# Obtaining the "differentiable" window function by using the real valued \sigma
window = jnp.exp(-0.5 * jnp.power((m - sz / 2) / (s + 1e-5), 2))
window_norm = window/jnp.sum(window)
# Computing the STFT, and taking its magnitude
stft = jnp.sqrt(1/(2*window_norm.shape[0] + 1))*jnp.stack([jnp.fft.rfft(window_norm * xinp[i:i+intsz]) for i in range(0, len(xinp) - intsz, inthp)],1)
a = jnp.abs(stft)
return a
| 1,290 | 26.468085 | 154 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/adaptive_stft.py | import math
from tqdm import trange
import sys
import pathlib
import torch.autograd
import torch
import numpy as np
import torch.optim
import torch.nn as nn
from celluloid import Camera
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import torch.nn.functional as F
from adaptive_stft_utils.operators import dithering_int, Sign, InvSign
from adaptive_stft_utils.mappings import IdxToWindow, make_find_window_configs
from adaptive_stft_utils.losses import kurtosis
class NumericalError(Exception):
def __init__(self, message, grad_hist=None, window_times_signal_grads=None, f_grad=None):
self.message = message
self.grad_hist = grad_hist
self.window_times_signal_grads = window_times_signal_grads
self.f_grad = f_grad
def __str__(self):
if self.message:
return 'NumericalError, {0} '.format(self.message)
else:
return 'NumericalError'
#COLOR_MAP = 'GnBu'
COLOR_MAP = None
def optimize_stft(
s,
lr=1e-4,
num_windows=None,
sgd=False,
num_epochs=9000,
score_fn=kurtosis,
window_shape='trapezoid',
make_animation=True,
name_for_saving='',
):
if window_shape not in ['trapezoid', 'triangle']:
raise RuntimeError(f'Unknown window shape {window_shape}')
kur_hist = []
fig_width = int(s.size(0) / 27000 * 14 * 10) / 10
anim_fig = Figure(figsize=(fig_width, 7))
preview_fig = Figure(figsize=(fig_width, 7))
from IPython.core.display import display
preview_handle = display(preview_fig, display_id=True)
camera = Camera(anim_fig)
matrix_fig = plt.figure(figsize=(22, 15))
s = s.cuda()
# make (num_windows - 1) points, in addition to start and end (0, signal_len)
assert len(s.shape) == 1
last_sample = s.size(0)
if num_windows is None:
assert s.size(0) > 512
num_windows = s.size(0) // 512
idx_to_window = IdxToWindow(
signal_len=last_sample, num_windows=num_windows)
idx_to_window = idx_to_window.cuda()
if sgd:
optimizer = torch.optim.SGD(idx_to_window.parameters(), lr=lr)
else:
optimizer = torch.optim.AdamW(
idx_to_window.parameters(), lr=lr, amsgrad=True, weight_decay=1e-6)
find_window_configs = make_find_window_configs(idx_to_window, last_sample=last_sample)
xs = None
ys = None
with trange(num_epochs + 1) as t:
for ep in t:
optimizer.zero_grad()
# window_configs excludes 0 and sample_length
configs = find_window_configs()
xs, ys, s_ext, extend_left = make_window_extend_signal(configs, s, window_shape=window_shape)
rffts_not_detached = []
rffts = []
len_xs = xs.size(0)
assert len_xs > 1, f"xs: {xs}, ys: {ys}, slope: {idx_to_window.slope.item()}"
for i in range(len_xs - 1):
rfft = apply_adaptive_window(s_ext, xs[i], ys[i], xs[i + 1], ys[i + 1], window_shape=window_shape)
rfft_sq = rfft[..., 0] ** 2 + rfft[..., 1] ** 2
rffts_not_detached.append(rfft_sq)
rffts.append(rfft_sq.detach().cpu().numpy())
n_wnd = len(rffts_not_detached)
score = score_fn(rffts_not_detached)
t.set_postfix(score=score.item(),
slope=idx_to_window.slope.item(), n_wnd=n_wnd)
if (torch.isnan(score).any() or torch.isinf(score).any()):
raise NumericalError(
f'score become NaN at iteration {ep}')
(-score).backward()
torch.nn.utils.clip_grad_norm_(
idx_to_window.parameters(), max_norm=1)
optimizer.step()
kur_hist.append(score.item())
plot_width = int(768 * fig_width / 7)
def get_scaled_fft_plots():
# since each window has different size, stretch the FFT frequency to fit the largest
max_size = np.max([x.shape[0] for x in rffts])
scaled_fft_plots = np.zeros(
(max_size, last_sample), dtype=np.float32)
i = 0
from scipy.interpolate import interp1d
for i, fft in enumerate(rffts):
bins = np.linspace(0, max_size, fft.shape[0])
f_out = interp1d(bins, fft, axis=0, kind='nearest')
new_bins = np.linspace(0, max_size, max_size)
fft_out = f_out(new_bins)
fft_out /= np.max(fft_out)
if i == 0:
start_point = int(max(ys[i] - extend_left, 0))
else:
start_point = int(max((xs[i] + ys[i]) / 2 - extend_left, 0))
if i < len(rffts) - 1:
end_point = int((xs[i + 1] + ys[i + 1]) / 2) - extend_left
else:
end_point = last_sample
scaled_fft_plots[:, start_point:end_point] = np.expand_dims(fft_out, -1)
import cv2
scaled_fft_plots = np.power(scaled_fft_plots, 0.5)
return cv2.resize(scaled_fft_plots, dsize=(plot_width, 768))
if ep % (num_epochs // 8) == 0:
outfile = f'{name_for_saving}_plot_data_{ep}.npz'
model_path = f'{name_for_saving}_mapping_model_{ep}.pth'
scaled_fft_plots = get_scaled_fft_plots()
np.savez(outfile, spectro=scaled_fft_plots,
x=xs.cpu().detach().numpy(),
y=ys.cpu().detach().numpy(),
extend_left=extend_left,
sample_length=last_sample,
sample=s.cpu().detach().numpy(),
sample_extended=s_ext.cpu().detach().numpy())
torch.save(idx_to_window.state_dict(), model_path)
plt.gcf().add_subplot(3, 3, ep // (num_epochs // 8) + 1)
import matplotlib.colors
plt.gca().pcolormesh(scaled_fft_plots, norm=matplotlib.colors.Normalize(), linewidth=0, cmap=COLOR_MAP)
plt.gca().set_title(f'ep: {ep}, score: {score.item():.5f}')
for i in range(xs.size(0)):
inter_window_line = (xs[i] + ys[i]).item() / 2 - extend_left
if inter_window_line <= 0 or inter_window_line >= last_sample:
continue
plt.gca().axvline(inter_window_line / last_sample * plot_width,
linewidth=0.5, antialiased=True)
if ep % 15 == 0:
scaled_fft_plots = get_scaled_fft_plots()
def draw(fig):
fig.gca().pcolormesh(scaled_fft_plots, norm=matplotlib.colors.Normalize(), linewidth=0, cmap=COLOR_MAP)
fig.gca().text(0.3, 1.01, f'ep: {ep}, score: {score.item():.5f}', transform=fig.gca().transAxes)
for i in range(xs.size(0)):
inter_window_line = (xs[i] + ys[i]).item() / 2 - extend_left
if inter_window_line <= 0 or inter_window_line >= last_sample:
continue
fig.gca().axvline(inter_window_line / last_sample * plot_width,
linewidth=0.5, antialiased=True)
if make_animation:
draw(anim_fig)
camera.snap()
preview_fig.gca().clear()
draw(preview_fig)
# Show image on notebook
preview_fig.canvas.draw()
preview_handle.update(preview_fig)
if ep % 30 == 0:
import gc
gc.collect()
if make_animation:
ani = camera.animate(interval=33.3, blit=True)
else:
ani = None
preview_handle.update(plt.figure())
matrix_fig.tight_layout()
return idx_to_window, kur_hist, ani
def make_window_extend_signal(configs, s: torch.Tensor, window_shape: str):
last_sample: int = s.size(0)
xs = [x for (x, _) in configs]
xs[0] = torch.clamp(xs[0], -last_sample + 1, 0)
xs[-1] = torch.clamp(xs[-1], 0, last_sample * 2 - 1)
if window_shape == 'trapezoid':
ys = [xs[i + 1] - (xs[i + 1] - xs[i]) * configs[i][1]
for i in range(len(xs) - 1)]
ys.insert(0, xs[0] - (xs[1] - xs[0]) * configs[0][1])
elif window_shape == 'triangle':
ys = [xs[i] for i in range(len(xs))]
# pick x values that are in sample range
xs.pop(0)
else:
raise RuntimeError(f'Unknown window shape {window_shape}')
ys[0] = torch.clamp(ys[0], -last_sample + 1, 0)
xs = torch.cat([x.view(1) for x in xs])
# extend the signal both ways via zero padding
offset = -int(torch.floor(ys[0]))
assert offset >= 0
extend_left = offset
assert extend_left <= last_sample
extend_right = int(torch.ceil(xs[-1])) - last_sample + 1
assert extend_right >= 0
assert extend_right <= last_sample
s_left_pad = torch.zeros_like(s[:extend_left])
s_right_pad = torch.zeros_like(s[last_sample - extend_right:])
s_ext = torch.cat((s_left_pad, s, s_right_pad))
xs = xs + extend_left
ys = torch.cat([y.view(1) for y in ys])
ys = ys + extend_left
return xs, ys, s_ext, extend_left
def apply_adaptive_window(
s_ext: torch.Tensor,
x_i: torch.Tensor,
y_i: torch.Tensor,
x_next: torch.Tensor,
y_next: torch.Tensor,
window_shape: str
) -> torch.Tensor:
if window_shape == 'trapezoid':
# three parts
left_trig_start = dithering_int(y_i)
left_trig_end = dithering_int(x_i)
right_trig_start = dithering_int(y_next)
right_trig_end = dithering_int(x_next)
rect_start = left_trig_end
rect_end = right_trig_start
m = torch.arange(0, left_trig_end - left_trig_start,
dtype=torch.float32, device=s_ext.device)
ramp = m / (x_i - y_i)
left_ramp_times_signal = ramp * \
s_ext[left_trig_start:left_trig_end]
m = torch.arange(0, right_trig_end - right_trig_start,
dtype=torch.float32, device=s_ext.device)
ramp = 1 - (m / (x_next - y_next))
right_ramp_times_signal = ramp * s_ext[right_trig_start:right_trig_end]
rect_signal = s_ext[rect_start:rect_end]
# rect_signal = rect_signal * Sign.apply(y_next) * InvSign.apply(x_i)
window_times_signal = torch.cat(
(left_ramp_times_signal, rect_signal, right_ramp_times_signal), dim=-1)
elif window_shape == 'triangle':
left_trig_start = dithering_int(y_i)
left_trig_end = dithering_int(x_i)
right_trig_start = dithering_int(x_i)
right_trig_end = dithering_int(x_next)
m = torch.arange(0, left_trig_end - left_trig_start,
dtype=torch.float32, device=s_ext.device)
ramp = m / (x_i - y_i)
left_ramp_times_signal = ramp * \
s_ext[left_trig_start:left_trig_end]
m = torch.arange(0, right_trig_end - right_trig_start,
dtype=torch.float32, device=s_ext.device)
ramp = 1 - (m / (x_next - y_next))
right_ramp_times_signal = ramp * s_ext[right_trig_start:right_trig_end]
window_times_signal = torch.cat(
(left_ramp_times_signal, right_ramp_times_signal), dim=-1)
else:
raise RuntimeError(f'unknown window shape {window_shape}')
assert window_times_signal.size(0) > 1, f"x: {x_i}, y: {y_i}"
rfft = torch.rfft(window_times_signal,
signal_ndim=1, normalized=True)
return rfft
| 11,853 | 39.875862 | 123 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/MNISTExperiment.py | from models import UMNNMAFFlow
import torch
from lib import dataloader as dl
import lib as transform
import lib.utils as utils
import numpy as np
import os
import pickle
from timeit import default_timer as timer
import torchvision
from tensorboardX import SummaryWriter
writer = SummaryWriter()
def train_mnist(dataset, load=None, gen_image=False, save=None, temperature=.5, real_images=False, nb_iter=5,
nb_steps=50, solver="CC", hidden_embeding=[1024, 1024, 1024], hidden_derivative=[100, 50, 50, 50, 50],
embeding_size=30, nb_images=5, conditionnal=False, nb_flow=5, lr=1e-3, weight_decay=1e-2,
nb_epoch=500, L=1., batch_size=100):
cuda = 0 if torch.cuda.is_available() else -1
device = "cuda:0" if torch.cuda.is_available() else "cpu"
save_name = dataset + "/" + str(nb_steps) if save is None else save
if save is not None or gen_image:
if not (os.path.isdir(save_name)):
os.makedirs(save_name)
logger = utils.get_logger(logpath=os.path.join(save_name, 'logs'), filepath=os.path.abspath(__file__),
saving=save is not None)
cond_in = 10 if conditionnal else 0
nb_in = 28**2
model = UMNNMAFFlow(nb_flow=nb_flow, nb_in=nb_in, hidden_derivative=hidden_derivative,
hidden_embedding=hidden_embeding, embedding_s=embeding_size, nb_steps=nb_steps, device=device,
solver=solver, cond_in=cond_in).to(device)
if save is not None:
with open(save + "/model.txt", "w") as f:
f.write(str(model))
opt = torch.optim.Adam(model.parameters(), lr, weight_decay=weight_decay)
if nb_steps > 0:
max_forward = min(int(3000/(nb_steps/nb_steps * nb_flow * hidden_derivative[0]/100)*784/nb_in), batch_size)
logger.info("Max forward: %d" % max_forward)
random_steps = nb_steps <= 0
if conditionnal:
train_loader, valid_loader, test_loader = dl.dataloader(dataset, batch_size, cuda=cuda, conditionnal=True)
else:
train_loader, valid_loader, test_loader = dl.dataloader(dataset, batch_size, cuda=cuda)
if load is not None:
logger.info("Loading model")
model.load_state_dict(torch.load(load + '/model.pt'))
model.eval()
with torch.no_grad():
# Compute Test loss
i = 0
ll_test = 0.
bpp_avg = 0.
start = end = timer()
for batch_idx, (cur_x, target) in enumerate(test_loader):
if conditionnal:
bpp, ll_tmp, z_est = 0, 0, 0
for j in range(10):
y = target.view(-1, 1)*0 + j
y_one_hot = torch.zeros(y.shape[0], 10).scatter(1, y, 1)
context = y_one_hot.to(device)
bpp_i, ll_tmp_i, z_est_i = model.compute_bpp(cur_x.view(-1, nb_in).to(device), context=context)
bpp += bpp_i/10
ll_tmp += ll_tmp_i/10
else:
context = None
bpp, ll_tmp, z_est = model.compute_bpp(cur_x.view(-1, nb_in).to(device))
i += 1
ll_test -= ll_tmp.mean()
bpp_avg += bpp.mean()
if i == 5 and nb_epoch > 0:
break
end = timer()
logger.info("{:d} :Test loss: {:4f} - BPP: {:4f} - Elapsed time per epoch {:4f}".format(
i, -ll_test.detach().cpu().item()/i, -bpp_avg.detach().cpu().item() / i, end - start))
logger.info("{:d} :Test loss: {:4f} - BPP: {:4f} - Elapsed time per epoch {:4f}".format(
i, -ll_test.detach().cpu().item() / i, -bpp_avg.detach().cpu().item() / i, end - start))
nb_sample = nb_images
# Generate and save images
if gen_image:
if real_images:
logger.info("Regenerate real images")
x, y = next(iter(test_loader))
y = y.view(-1, 1)
context = torch.zeros(y.shape[0], 10).scatter(1, y, 1).to(device) if conditionnal else None
nb_sample = 100
z = torch.distributions.Normal(0., 1.).sample(torch.Size([nb_sample, nb_in])).to(
device) * torch.arange(0.1, 1.1, .1).unsqueeze(0).expand(int(nb_sample / 10), -1).transpose(0,
1) \
.contiguous().view(-1).unsqueeze(1).expand(-1, 784).to(device)
else:
logger.info("Generate random images")
z = torch.distributions.Normal(0., 1.).sample(torch.Size([nb_sample, nb_in])).to(
device) * temperature
z_true = z[:nb_sample, :]
if conditionnal:
if real_images:
nb_sample = 100
z_true = torch.distributions.Normal(0., 1.).sample(torch.Size([nb_sample, nb_in])).to(
device) * torch.arange(0.1, 1.1, .1).unsqueeze(0).expand(int(nb_sample/10), -1).transpose(0, 1)\
.contiguous().view(-1).unsqueeze(1).expand(-1, 784).to(device)
digit = (torch.arange(nb_sample) % 10).float().view(-1, 1)
else:
digit = (torch.arange(nb_sample) % 10).float().view(-1, 1)
logger.info("Creation of: " + str(digit))
context = torch.zeros(digit.shape[0], 10).scatter(1, digit.long(), 1).to(device)
x_est = model.invert(z_true, nb_iter, context=context)
bpp, ll, _ = model.compute_bpp(x_est, context=context)
logger.info("Bpp of generated data is: {:4f}".format(bpp.mean().item()))
logger.info("ll of generated data is: {:4f}".format(ll.mean().item()))
x = transform.logit_back(x_est.detach().cpu(), 1e-6).view(x_est.shape[0], 1, 28, 28)
torchvision.utils.save_image(x, save_name + '/' + str(temperature) + 'images.png', nrow=10,
padding=1)
exit()
with open(load + '/losses.pkl', 'rb') as f:
losses_train, losses_test = pickle.load(f)
cur_epoch = len(losses_test)
else:
losses_train = []
losses_test = []
cur_epoch = 0
for epoch in range(cur_epoch, cur_epoch + nb_epoch):
ll_tot = 0
i = 0
start = timer()
for batch_idx, (cur_x, target) in enumerate(train_loader):
if conditionnal:
y = target.view(-1, 1)
y_one_hot = torch.zeros(y.shape[0], 10).scatter(1, y, 1)
context = y_one_hot.to(device)
else:
cur_x = cur_x.view(-1, nb_in).to(device)
context = None
if random_steps:
nb_steps = np.random.randint(5, 50)*2
max_forward = min(int(1500 / nb_steps), batch_size)
model.set_steps_nb(nb_steps)
cur_x = cur_x.to(device)
ll = 0.
opt.zero_grad()
for cur_su_batch in range(0, batch_size, max_forward):
ll, z = model.compute_ll(cur_x.view(-1, nb_in)[cur_su_batch:cur_su_batch+max_forward], context=context)
ll = -ll.mean()/(batch_size/z.shape[0])
ll.backward()
ll_tot += ll.detach()
opt.step()
if L > 0:
model.forceLipshitz(L)
i += 1
if i % 10 == 0:
time_tot = timer()
logger.info("{:d} cur_loss - {:4f} - Average time elapsed per batch {:4f}".format(
i, ll_tot.item() / i, (time_tot - start) / i))
if save:
torch.save(model.state_dict(), save_name + '/model.pt')
ll_tot /= i
losses_train.append(ll_tot.detach().cpu())
with torch.no_grad():
# Generate and save images
if gen_image and epoch % 10 == 0:
z = torch.distributions.Normal(0., 1.).sample(torch.Size([nb_images, nb_in])).to(device)*temperature
if conditionnal:
digit = (torch.arange(nb_sample) % 10).float().view(-1, 1)
logger.info("Creation of: " + str(digit))
context = torch.zeros(digit.shape[0], 10).scatter(1, digit.long(), 1).to(device)
x = model.invert(z, nb_iter, context=context)
logger.info("Inversion error: {:4f}".format(torch.abs(z - model.forward(x, context=context)).mean().item()))
x = x.detach().cpu()
x = transform.logit_back(x, 1e-6).view(x.shape[0], 1, 28, 28)
writer.add_image('data/images', torchvision.utils.make_grid(x, nrow=4), epoch)
torchvision.utils.save_image(x, save_name + '/epoch_{:04d}.png'.format(epoch), nrow=4, padding=1)
model.set_steps_nb(nb_steps)
# Compute Test loss
i = 0
ll_test = 0.
for batch_idx, (cur_x, target) in enumerate(valid_loader):
if conditionnal:
y = target.view(-1, 1)
y_one_hot = torch.zeros(y.shape[0], 10).scatter(1, y, 1)
context = y_one_hot.to(device)
else:
context = None
ll_tmp, _ = model.compute_ll(cur_x.view(-1, nb_in).to(device), context=context)
i += 1
ll_test -= ll_tmp.mean()
ll_test /= i
losses_test.append(ll_test.detach().cpu())
writer.add_scalars('data/' + save_name + "/losses", {"Valid": ll_test.detach().cpu().item(),
"Train": ll_tot.detach().cpu().item()}, epoch)
# Save losses
if save:
if epoch % 5 == 0:
if not (os.path.isdir(save_name + '/models')):
os.makedirs(save_name + '/models')
torch.save(model.state_dict(), save_name + '/models/model_{:04d}.pt'.format(epoch))
with open(save_name + '/losses.pkl', 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([losses_train, losses_test], f)
logger.info("epoch: {:d} - Train loss: {:4f} - Test loss: {:4f} - L: {:4f}".format(
epoch, ll_tot.detach().cpu().item(), ll_test.detach().cpu().item(), model.computeLipshitz(10).detach()))
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument("-load", default=None, help="where to load")
parser.add_argument("-gen", default=False, action="store_true", help="where to store results")
parser.add_argument("-save", default=None, help="where to store results")
parser.add_argument("-steps", default=50, type=int, help="number of integration steps")
parser.add_argument("-temperature", default=.5, type=float, help="Temperature for sample")
parser.add_argument("-solver", default="CC", help="Temperature for sample")
parser.add_argument("-hidden_embedding", nargs='+', type=int, default=[1024, 1024, 1024], help="Nb neurons for emebding")
parser.add_argument("-hidden_derivative", nargs='+', type=int, default=[100, 50, 50, 50, 50], help="Nb neurons for derivative")
parser.add_argument("-embedding_size", type=int, default=30, help="Size of embedding part")
parser.add_argument("-real_images", type=bool, default=False, help="Generate real images")
parser.add_argument("-dataset", type=str, default="MNIST", help="Dataset")
parser.add_argument("-nb_images", type=int, default=5, help="Number of images to be generated")
parser.add_argument("-conditionnal", type=bool, default=False, help="Conditionning on class or not")
parser.add_argument("-nb_flow", type=int, default=5, help="Number of nets in the flow")
parser.add_argument("-weight_decay", type=float, default=1e-2, help="Weight Decay")
parser.add_argument("-lr", type=float, default=1e-3, help="Learning rate")
parser.add_argument("-nb_epoch", type=int, default=500, help="Number of epoch")
parser.add_argument("-nb_iter", type=int, default=500, help="Number of iter for inversion")
parser.add_argument("-Lipshitz", type=float, default=0, help="Lipshitz constant max of linear layer in derivative net")
parser.add_argument("-b_size", type=int, default=100, help="Number of samples per batch")
args = parser.parse_args()
dataset = args.dataset
dir_save = None if args.save is None else dataset + "/" + args.save
dir_load = None if args.load is None else dataset + "/" + args.load
train_mnist(dataset=dataset, load=dir_load, gen_image=args.gen, save=dir_save, nb_steps=args.steps,
temperature=args.temperature, solver=args.solver, hidden_embeding=args.hidden_embedding,
hidden_derivative=args.hidden_derivative, real_images=args.real_images, nb_images=args.nb_images,
conditionnal=args.conditionnal, nb_flow=args.nb_flow, weight_decay=args.weight_decay, lr=args.lr,
nb_epoch=args.nb_epoch, L=args.Lipshitz, nb_iter=args.nb_iter,
batch_size=args.b_size)
| 13,329 | 49.492424 | 127 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/ToyExperiments.py | from models import UMNNMAFFlow
import torch
import lib.toy_data as toy_data
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
import os
import lib.utils as utils
import lib.visualize_flow as vf
green = '#e15647'
black = '#2d5468'
white_bg = '#ececec'
def summary_plots(x, x_test, folder, epoch, model, ll_tot, ll_test):
fig = plt.figure(figsize=(7, 7))
ax = plt.subplot(1, 1, 1, aspect="equal")
vf.plt_flow(model.compute_ll, ax)
#ax = plt.subplot(1, 3, 2, aspect="equal")
#vf.plt_samples(toy_data.inf_train_gen(toy, batch_size=50000), ax, npts=500)
#ax = plt.subplot(1, 3, 3, aspect="equal")
#samples = model.invert(torch.distributions.Normal(0., 1.).sample([5000, 2]), 8, "Binary")
#vf.plt_samples(samples.detach().numpy(), ax, title="$x\sim q(x)$")
plt.savefig("%s/flow_%d.pdf" % (folder + toy, epoch))
plt.savefig("%s/flow_%d.png" % (folder + toy, epoch))
plt.close(fig)
fig = plt.figure()
z = torch.distributions.Normal(0., 1.).sample(x_test.shape)
plt.figure(figsize=(7, 7))
plt.xlim(-4.5, 4.5)
plt.ylim(-4.5, 4.5)
plt.xlabel("$z_1$", fontsize=20)
plt.ylabel("$z_2$", fontsize=20)
plt.scatter(z[:, 0], z[:, 1], alpha=.2, color=green)
x_min = z.min(0)[0] - .5
x_max = z.max(0)[0] + .5
ticks = [1, 1]
plt.xticks([-4, 0, 4])
plt.yticks([-4, 0, 4])
#plt.grid(True)
ax = plt.gca()
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_facecolor(white_bg)
ax.tick_params(axis='x', colors=black)
ax.tick_params(axis='y', colors=black)
ax.spines['bottom'].set_color(black)
ax.spines['left'].set_color(black)
#plt.xticks(np.arange(int(x_min[0]), int(x_max[0]), ticks[0]), np.arange(int(x_min[0]), int(x_max[0]), ticks[0]))
#plt.yticks(np.arange(int(x_min[1]), int(x_max[1]), ticks[1]), np.arange(int(x_min[1]), int(x_max[1]), ticks[1]))
plt.tight_layout()
plt.savefig("noise.png", transparent=True)
z_pred = model.forward(x_test)
z_pred = z_pred.detach().cpu().numpy()
#plt.subplot(221)
plt.figure()
plt.title("z pred")
plt.scatter(z_pred[:, 0], z_pred[:, 1], alpha=.2)
plt.xticks(np.arange(int(x_min[0]), int(x_max[0]), ticks[0]), np.arange(int(x_min[0]), int(x_max[0]), ticks[0]))
plt.yticks(np.arange(int(x_min[1]), int(x_max[1]), ticks[1]), np.arange(int(x_min[1]), int(x_max[1]), ticks[1]))
plt.savefig("test2.png")
start = timer()
z = torch.distributions.Normal(0., 1.).sample((10000, 2))
x_pred = model.invert(z, 5, "ParallelSimpler")
end = timer()
print("Inversion time: {:4f}s".format(end - start))
plt.subplot(223)
#plt.title("x pred")
x_pred = x_pred.detach().cpu().numpy()
plt.scatter(x_pred[:, 0], x_pred[:, 1], alpha=.2)
x_min = x.min(0)[0] - .5
x_max = x.max(0)[0] + .5
ticks = [1, 1]
plt.xticks(np.arange(int(x_min[0]), int(x_max[0]), ticks[0]), np.arange(int(x_min[0]), int(x_max[0]), ticks[0]))
plt.yticks(np.arange(int(x_min[1]), int(x_max[1]), ticks[1]), np.arange(int(x_min[1]), int(x_max[1]), ticks[1]))
#plt.subplot(224)
plt.figure(figsize=(7, 7))
plt.xlim(-4.5, 4.5)
plt.ylim(-4.5, 4.5)
#cmap = matplotlib.cm.get_cmap(None)
#ax.set_facecolor(cmap(0.))
# ax.invert_yaxis()
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.xticks([-4, 0, 4])
plt.yticks([-4, 0, 4])
plt.xlabel("$x_1$", fontsize=20)
plt.ylabel("$x_2$", fontsize=20)
plt.scatter(x[:, 0], x[:, 1], alpha=.2, color='#e15647')
#plt.xticks(np.arange(-5, 5.1, 2))
#plt.yticks(np.arange(-5, 5.1, 2))
#plt.grid(True)
ax = plt.gca()
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_facecolor(white_bg)
ax.tick_params(axis='x', colors=black)
ax.tick_params(axis='y', colors=black)
ax.spines['bottom'].set_color(black)
ax.spines['left'].set_color(black)
#plt.xticks(np.arange(int(x_min[0]), int(x_max[0]), ticks[0]), np.arange(int(x_min[0]), int(x_max[0]), ticks[0]))
#plt.yticks(np.arange(int(x_min[1]), int(x_max[1]), ticks[1]), np.arange(int(x_min[1]), int(x_max[1]), ticks[1]))
plt.tight_layout()
plt.savefig("8gaussians.png", transparent=True)
plt.suptitle(str(("epoch: ", epoch, "Train loss: ", ll_tot.item(), "Test loss: ", ll_test.item())))
plt.savefig("%s/%d.png" % (folder + toy, epoch))
plt.close(fig)
def train_toy(toy, load=True, nb_steps=20, nb_flow=1, folder=""):
device = "cpu"
logger = utils.get_logger(logpath=os.path.join(folder, toy, 'logs'), filepath=os.path.abspath(__file__))
logger.info("Creating model...")
model = UMNNMAFFlow(nb_flow=nb_flow, nb_in=2, hidden_derivative=[100, 100, 100, 100], hidden_embedding=[100, 100, 100, 100],
embedding_s=10, nb_steps=nb_steps, device=device).to(device)
logger.info("Model created.")
opt = torch.optim.Adam(model.parameters(), 1e-3, weight_decay=1e-5)
if load:
logger.info("Loading model...")
model.load_state_dict(torch.load(folder + toy+'/model.pt'))
model.train()
opt.load_state_dict(torch.load(folder + toy+'/ADAM.pt'))
logger.info("Model loaded.")
nb_samp = 100
batch_size = 100
x_test = torch.tensor(toy_data.inf_train_gen(toy, batch_size=1000)).to(device)
x = torch.tensor(toy_data.inf_train_gen(toy, batch_size=1000)).to(device)
for epoch in range(10000):
ll_tot = 0
start = timer()
for j in range(0, nb_samp, batch_size):
cur_x = torch.tensor(toy_data.inf_train_gen(toy, batch_size=batch_size)).to(device)
ll, z = model.compute_ll(cur_x)
ll = -ll.mean()
ll_tot += ll.detach()/(nb_samp/batch_size)
loss = ll
opt.zero_grad()
loss.backward()
opt.step()
end = timer()
ll_test, _ = model.compute_ll(x_test)
ll_test = -ll_test.mean()
logger.info("epoch: {:d} - Train loss: {:4f} - Test loss: {:4f} - Elapsed time per epoch {:4f} (seconds)".
format(epoch, ll_tot.item(), ll_test.item(), end-start))
if (epoch % 100) == 0:
summary_plots(x, x_test, folder, epoch, model, ll_tot, ll_test)
torch.save(model.state_dict(), folder + toy + '/model.pt')
torch.save(opt.state_dict(), folder + toy + '/ADAM.pt')
import argparse
datasets = ["8gaussians", "swissroll", "moons", "pinwheel", "cos", "2spirals", "checkerboard", "line", "line-noisy",
"circles", "joint_gaussian"]
parser = argparse.ArgumentParser(description='')
parser.add_argument("-dataset", default=None, choices=datasets, help="Which toy problem ?")
parser.add_argument("-load", default=False, action="store_true", help="Load a model ?")
parser.add_argument("-folder", default="", help="Folder")
args = parser.parse_args()
if args.dataset is None:
toys = datasets
else:
toys = [args.dataset]
for toy in toys:
if not(os.path.isdir(args.folder + toy)):
os.makedirs(args.folder + toy)
train_toy(toy, load=args.load, folder=args.folder)
| 7,250 | 37.775401 | 128 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/MonotonicMLP.py | import torch
import argparse
import torch.nn as nn
import matplotlib.pyplot as plt
from models.UMNN import MonotonicNN, IntegrandNN
def f(x_1, x_2, x_3):
return .001*(x_1**3 + x_1) + x_2 ** 2 + torch.sin(x_3)
def create_dataset(n_samples):
x = torch.randn(n_samples, 3)
y = f(x[:, 0], x[:, 1], x[:, 2])
return x, y
class MLP(nn.Module):
def __init__(self, in_d, hidden_layers):
super(MLP, self).__init__()
self.net = []
hs = [in_d] + hidden_layers + [1]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net = nn.Sequential(*self.net)
def forward(self, x, h):
return self.net(torch.cat((x, h), 1))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument("-nb_train", default=10000, type=int, help="Number of training samples")
parser.add_argument("-nb_test", default=1000, type=int, help="Number of testing samples")
parser.add_argument("-nb_epoch", default=200, type=int, help="Number of training epochs")
parser.add_argument("-load", default=False, action="store_true", help="Load a model ?")
parser.add_argument("-folder", default="", help="Folder")
args = parser.parse_args()
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model_monotonic = MonotonicNN(3, [100, 100, 100], nb_steps=100, dev=device).to(device)
model_mlp = MLP(3, [200, 200, 200]).to(device)
optim_monotonic = torch.optim.Adam(model_monotonic.parameters(), 1e-3, weight_decay=1e-5)
optim_mlp = torch.optim.Adam(model_mlp.parameters(), 1e-3, weight_decay=1e-5)
train_x, train_y = create_dataset(args.nb_train)
test_x, test_y = create_dataset(args.nb_test)
b_size = 100
for epoch in range(0, args.nb_epoch):
# Shuffle
idx = torch.randperm(args.nb_train)
train_x = train_x[idx].to(device)
train_y = train_y[idx].to(device)
avg_loss_mon = 0.
avg_loss_mlp = 0.
for i in range(0, args.nb_train-b_size, b_size):
# Monotonic
x = train_x[i:i + b_size].requires_grad_()
y = train_y[i:i + b_size].requires_grad_()
y_pred = model_monotonic(x[:, [0]], x[:, 1:])[:, 0]
loss = ((y_pred - y)**2).sum()
optim_monotonic.zero_grad()
loss.backward()
optim_monotonic.step()
avg_loss_mon += loss.item()
# MLP
y_pred = model_mlp(x[:, [0]], x[:, 1:])[:, 0]
loss = ((y_pred - y) ** 2).sum()
optim_mlp.zero_grad()
loss.backward()
optim_mlp.step()
avg_loss_mlp += loss.item()
print(epoch)
print("\tMLP: ", avg_loss_mlp/args.nb_train)
print("\tMonotonic: ", avg_loss_mon / args.nb_train)
# <<TEST>>
x = torch.arange(-5, 5, .1).unsqueeze(1).to(device)
h = torch.zeros(x.shape[0], 2).to(device)
y = f(x[:, 0], h[:, 0], h[:, 1]).detach().cpu().numpy()
y_mon = model_monotonic(x, h)[:, 0].detach().cpu().numpy()
y_mlp = model_mlp(x, h)[:, 0].detach().cpu().numpy()
x = x.detach().cpu().numpy()
plt.plot(x, y_mon, label="Monotonic model")
plt.plot(x, y_mlp, label="MLP model")
plt.plot(x, y, label="groundtruth")
plt.legend()
plt.show()
plt.savefig("Monotonicity.png")
| 3,487 | 35.715789 | 96 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/UCIExperiments.py | from models import UMNNMAFFlow
import torch
import numpy as np
import os
import pickle
import lib.utils as utils
import datasets
from timeit import default_timer as timer
from tensorboardX import SummaryWriter
writer = SummaryWriter()
def batch_iter(X, batch_size, shuffle=False):
"""
X: feature tensor (shape: num_instances x num_features)
"""
if shuffle:
idxs = torch.randperm(X.shape[0])
else:
idxs = torch.arange(X.shape[0])
if X.is_cuda:
idxs = idxs.cuda()
for batch_idxs in idxs.split(batch_size):
yield X[batch_idxs]
def load_data(name):
if name == 'bsds300':
return datasets.BSDS300()
elif name == 'power':
return datasets.POWER()
elif name == 'gas':
return datasets.GAS()
elif name == 'hepmass':
return datasets.HEPMASS()
elif name == 'miniboone':
return datasets.MINIBOONE()
else:
raise ValueError('Unknown dataset')
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def train_uci(dataset, load=None, test=False, save=None, nb_steps=50, solver="CC", hidden_embeding=[300, 300, 300, 300],
hidden_derivative=[100, 50, 50, 50, 50], embeding_size=30, nb_flow=5, lr=1e-3, weight_decay=1e-2,
nb_epoch=500, L=1., batch_size = 100, scheduler_rate=.99, scheduler_patience=500, optim="adam"):
cuda = 0 if torch.cuda.is_available() else -1
device = "cuda:0" if torch.cuda.is_available() else "cpu"
save_name = "ExperimentsResults/UCIExperiments/" + dataset + "/" + str(nb_steps) if save is None else save
logger = utils.get_logger(logpath=os.path.join(save_name, 'logs'), filepath=os.path.abspath(__file__), saving=save is not None)
logger.info("Loading data...")
data = load_data(dataset)
data.trn.x = torch.from_numpy(data.trn.x).to(device)
nb_in = data.trn.x.shape[1]
data.val.x = torch.from_numpy(data.val.x).to(device)
data.tst.x = torch.from_numpy(data.tst.x).to(device)
logger.info("Data loaded.")
logger.info("Creating model...")
model = UMNNMAFFlow(nb_flow=nb_flow, nb_in=nb_in, hidden_derivative=hidden_derivative,
hidden_embedding=hidden_embeding, embedding_s=embeding_size, nb_steps=nb_steps, device=device,
solver=solver).to(device)
logger.info("Model created.")
if save is not None:
with open(save + "/model.txt", "w") as f:
f.write(str(model))
if optim == "adam":
opt = torch.optim.Adam(model.parameters(), lr, weight_decay=weight_decay)
elif optim == "sgd":
opt = torch.optim.SGD(model.parameters(), lr, weight_decay=weight_decay, momentum=.9)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt, factor=scheduler_rate, patience=scheduler_patience,
threshold=1e-2)
random_steps = nb_steps <= 0
if load is not None:
logger.info("Loading model...")
if cuda >= 0:
model.load_state_dict(torch.load(load + '/model_best_train.pt'))
else:
model.load_state_dict(torch.load(load + '/model_best_train.pt', map_location='cpu'))
logger.info("Model loaded.")
if test:
model.eval()
with torch.no_grad():
# Compute Test loss
i = 0
ll_test = 0.
if random_steps:
model.set_steps_nb(100)
for cur_x in batch_iter(data.tst.x, shuffle=True, batch_size=batch_size):
ll_tmp, z = model.compute_ll(cur_x)
i += 1
ll_test -= ll_tmp.mean()
logger.info("Test loss: {:4f}".format(ll_test.detach().cpu().data/i))
ll_test /= i
logger.info("Number of parameters: {:d} - Test loss: {:4f}".format(len(_flatten(model.parameters())),
ll_test.detach().cpu().data))
with open(load + '/losses.pkl', 'rb') as f:
losses_train, losses_test = pickle.load(f)
cur_epoch = len(losses_test)
else:
losses_train = []
losses_test = []
cur_epoch = 0
best_valid = np.inf
best_train = np.inf
for epoch in range(cur_epoch, cur_epoch + nb_epoch):
ll_tot = 0
i = 0
start = timer()
for cur_x in batch_iter(data.trn.x, shuffle=True, batch_size=batch_size):
if random_steps:
nb_steps = np.random.randint(5, 50)*2
model.set_steps_nb(nb_steps)
opt.zero_grad()
#Useful to split batch into smaller sub-batches
max_forward = batch_size
for cur_su_batch in range(0, batch_size, max_forward):
ll, z = model.compute_ll(cur_x.view(-1, nb_in)[cur_su_batch:cur_su_batch+max_forward])
ll = -ll.mean()/(batch_size/max_forward)
ll.backward()
ll_tot += ll.detach()
torch.nn.utils.clip_grad.clip_grad_value_(model.parameters(), 1.)
opt.step()
if L > 0:
model.forcei_lpschitz(L)
i += 1
if i % 100 == 0:
time_tot = timer()
logger.info("{:d} cur_loss {:4f} - Average time elapsed per batch {:4f}".format(i, ll_tot / i, (time_tot-start)/i))
if save:
torch.save(model.state_dict(), save_name + '/model.pt')
ll_tot /= i
time_tot = timer()
losses_train.append(ll_tot.detach().cpu())
with torch.no_grad():
# Compute Test loss
i = 0
ll_val = 0.
for cur_x in batch_iter(data.val.x, shuffle=True, batch_size=batch_size):
ll_tmp, _ = model.computell(cur_x.view(-1, nb_in).to(device))
i += 1
ll_val -= ll_tmp.mean()
ll_val /= i
losses_test.append(ll_val.detach().cpu())
writer.add_scalars('data/' + save_name + "/losses", {"Valid": ll_val.detach().cpu().item(),
"Train": ll_tot.detach().cpu().item()}, epoch)
scheduler.step(ll_val)
if ll_val.detach().cpu().item() < best_valid:
best_valid = ll_val.detach().cpu().item()
torch.save(model.state_dict(), save_name + '/model_best_valid.pt'.format(epoch))
if ll_tot.detach().cpu().item() < best_train:
torch.save(model.state_dict(), save_name + '/model_best_train_valid.pt'.format(epoch))
if ll_tot.detach().cpu().item() < best_train:
best_train = ll_tot.detach().cpu().item()
torch.save(model.state_dict(), save_name + '/model_best_train.pt'.format(epoch))
# Save losses
if save:
if epoch % 5 == 0:
if not (os.path.isdir(save_name + '/models')):
os.makedirs(save_name + '/models')
torch.save(model.state_dict(), save_name + '/models/model_{:04d}.pt'.format(epoch))
with open(save_name + '/losses.pkl', 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([losses_train, losses_test], f)
logger.info("epoch: {:d} - Train loss: {:4f} - Valid loss: {:4f} - Time elapsed per epoch {:4f}".format(
epoch, ll_tot.detach().cpu().item(), ll_val.detach().cpu().item(), time_tot-start))
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument("-load", default=None, help="where to load")
parser.add_argument("-test", default=False, action="store_true", help="Only test")
parser.add_argument("-save", default=None, help="where to store results")
parser.add_argument("-steps", default=50, type=int, help="number of integration steps")
parser.add_argument("-solver", choices=["CC", "CCParallel"], default="CC", help="Solver to use")
parser.add_argument("-hidden_embedding", nargs='+', type=int, default=[512, 512], help="Nb neurons for emebding")
parser.add_argument("-hidden_derivative", nargs='+', type=int, default=[50, 50, 50, 50], help="Nb neurons for derivative")
parser.add_argument("-embedding_size", type=int, default=30, help="Size of embedding part")
parser.add_argument("-nb_flow", type=int, default=5, help="Number of nets in the flow")
parser.add_argument("-weight_decay", type=float, default=1e-2, help="Weight Decay")
parser.add_argument("-lr", type=float, default=1e-3, help="Learning rate")
parser.add_argument("-s_rate", type=float, default=.5, help="LR Scheduling rate")
parser.add_argument("-nb_epoch", type=int, default=500, help="Number of epoch")
parser.add_argument("-b_size", type=int, default=500, help="Number of samples per batch")
parser.add_argument("-s_patience", type=int, default=5, help="Number of epoch with no improvement for lr scheduling")
parser.add_argument(
'--data', choices=['power', 'gas', 'hepmass', 'miniboone', 'bsds300'], type=str, default='miniboone'
)
parser.add_argument("-Lipshitz", type=float, default=0, help="Lipshitz constant max of linear layer in derivative net")
parser.add_argument("-Optim", choices=["adamBNAF", "sgd", "adam"], type=str, default="adam", help="Optimizer")
args = parser.parse_args()
dataset = args.data
dir_save = None if args.save is None else dataset + "/" + args.save
dir_load = None if args.load is None else dataset + "/" + args.load
if dir_save is not None:
if not (os.path.isdir(dir_save)):
os.makedirs(dir_save)
with open(dir_save + "/args.txt", "w") as f:
f.write(str(args))
train_uci(dataset=dataset, load=dir_load, test=args.test, save=dir_save, nb_steps=args.steps, solver=args.solver,
hidden_embeding=args.hidden_embedding, hidden_derivative=args.hidden_derivative, nb_flow=args.nb_flow,
weight_decay=args.weight_decay, lr=args.lr, nb_epoch=args.nb_epoch, L=args.Lipshitz, batch_size=args.b_size,
scheduler_patience=args.s_patience, scheduler_rate=args.s_rate, optim=args.Optim,
embeding_size=args.embedding_size)
| 10,219 | 41.941176 | 131 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/TrainVaeFlow.py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import time
import torch
import torch.utils.data
import torch.optim as optim
import numpy as np
import math
import random
import os
import datetime
import lib.utils as utils
from models.vae_lib.models import VAE
from models.vae_lib.optimization.training import train, evaluate
from models.vae_lib.utils.load_data import load_dataset
from models.vae_lib.utils.plotting import plot_training_curve
from tensorboardX import SummaryWriter
writer = SummaryWriter()
SOLVERS = ["CC", "CCParallel", "Simpson"]
parser = argparse.ArgumentParser(description='PyTorch VAE Normalizing flows')
parser.add_argument(
'-d', '--dataset', type=str, default='mnist', choices=['mnist', 'freyfaces', 'omniglot', 'caltech'],
metavar='DATASET', help='Dataset choice.'
)
parser.add_argument(
'-freys', '--freyseed', type=int, default=123, metavar='FREYSEED',
help="""Seed for shuffling frey face dataset for test split. Ignored for other datasets.
Results in paper are produced with seeds 123, 321, 231"""
)
parser.add_argument('-nc', '--no_cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--manual_seed', type=int, help='manual seed, if not given resorts to random seed.')
parser.add_argument(
'-li', '--log_interval', type=int, default=10, metavar='LOG_INTERVAL',
help='how many batches to wait before logging training status'
)
parser.add_argument(
'-od', '--out_dir', type=str, default='snapshots', metavar='OUT_DIR',
help='output directory for model snapshots etc.'
)
# optimization settings
parser.add_argument(
'-e', '--epochs', type=int, default=2000, metavar='EPOCHS', help='number of epochs to train (default: 2000)'
)
parser.add_argument(
'-es', '--early_stopping_epochs', type=int, default=35, metavar='EARLY_STOPPING',
help='number of early stopping epochs'
)
parser.add_argument(
'-bs', '--batch_size', type=int, default=100, metavar='BATCH_SIZE', help='input batch size for training'
)
parser.add_argument('-lr', '--learning_rate', type=float, default=0.0005, metavar='LEARNING_RATE', help='learning rate')
parser.add_argument(
'-w', '--warmup', type=int, default=100, metavar='N',
help='number of epochs for warm-up. Set to 0 to turn warmup off.'
)
parser.add_argument('--max_beta', type=float, default=1., metavar='MB', help='max beta for warm-up')
parser.add_argument('--min_beta', type=float, default=0.0, metavar='MB', help='min beta for warm-up')
parser.add_argument(
'-f', '--flow', type=str, default='no_flow', choices=[
'planar', 'iaf', 'householder', 'orthogonal', 'triangular', 'MMAF', 'no_flow'
], help="""Type of flows to use, no flows can also be selected"""
)
parser.add_argument('-r', '--rank', type=int, default=1)
parser.add_argument(
'-nf', '--num_flows', type=int, default=4, metavar='NUM_FLOWS',
help='Number of flow layers, ignored in absence of flows'
)
parser.add_argument(
'-nv', '--num_ortho_vecs', type=int, default=8, metavar='NUM_ORTHO_VECS',
help=""" For orthogonal flow: How orthogonal vectors per flow do you need.
Ignored for other flow types."""
)
parser.add_argument(
'-nh', '--num_householder', type=int, default=8, metavar='NUM_HOUSEHOLDERS',
help=""" For Householder Sylvester flow: Number of Householder matrices per flow.
Ignored for other flow types."""
)
parser.add_argument(
'-mhs', '--made_h_size', type=int, default=320, metavar='MADEHSIZE',
help='Width of mades for iaf and MMAF. Ignored for all other flows.'
)
parser.add_argument('--z_size', type=int, default=64, metavar='ZSIZE', help='how many stochastic hidden units')
# gpu/cpu
parser.add_argument('--gpu_num', type=int, default=0, metavar='GPU', help='choose GPU to run on.')
# MMAF settings
parser.add_argument("-steps", default=50, type=int, help="number of integration steps")
parser.add_argument("-solver", default="CC", help="Solver used")
parser.add_argument("-hidden_embedding", nargs='+', type=int, default=[512, 512], help="Nb neurons for emebding")
parser.add_argument("-hidden_derivative", nargs='+', type=int, default=[50, 50, 50, 50], help="Nb neurons for derivative")
parser.add_argument("-embedding_size", type=int, default=30, help="Size of embedding part")
parser.add_argument("-Lipshitz", type=float, default=0, help="Lipshitz constant max of linear layer in derivative net")
# evaluation
parser.add_argument('--evaluate', type=eval, default=False, choices=[True, False])
parser.add_argument('--model_path', type=str, default='')
parser.add_argument('--retrain_encoder', type=eval, default=False, choices=[True, False])
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.manual_seed is None:
args.manual_seed = random.randint(1, 100000)
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
np.random.seed(args.manual_seed)
if args.cuda:
# gpu device number
torch.cuda.set_device(args.gpu_num)
kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}
def run(args, kwargs):
# ==================================================================================================================
# SNAPSHOTS
# ==================================================================================================================
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
snapshots_path = os.path.join(args.out_dir, 'vae_' + args.dataset + '_')
snap_dir = snapshots_path + args.flow
if args.flow != 'no_flow':
snap_dir += '_' + 'num_flows_' + str(args.num_flows)
if args.flow == 'orthogonal':
snap_dir = snap_dir + '_num_vectors_' + str(args.num_ortho_vecs)
elif args.flow == 'orthogonalH':
snap_dir = snap_dir + '_num_householder_' + str(args.num_householder)
elif args.flow == 'iaf':
snap_dir = snap_dir + '_madehsize_' + str(args.made_h_size)
elif args.flow == "MMAF":
snap_dir = snap_dir + 'MMAF'
elif args.flow == 'permutation':
snap_dir = snap_dir + '_' + 'kernelsize_' + str(args.kernel_size)
elif args.flow == 'mixed':
snap_dir = snap_dir + '_' + 'num_householder_' + str(args.num_householder)
if args.retrain_encoder:
snap_dir = snap_dir + '_retrain-encoder_'
elif args.evaluate:
snap_dir = snap_dir + '_evaluate_'
snap_dir = snap_dir + '__' + args.model_signature + '/'
args.snap_dir = snap_dir
if not os.path.exists(snap_dir):
os.makedirs(snap_dir)
# logger
utils.makedirs(args.snap_dir)
logger = utils.get_logger(logpath=os.path.join(args.snap_dir, 'logs'), filepath=os.path.abspath(__file__))
logger.info(args)
# SAVING
torch.save(args, snap_dir + args.flow + '.config')
# ==================================================================================================================
# LOAD DATA
# ==================================================================================================================
train_loader, val_loader, test_loader, args = load_dataset(args, **kwargs)
if not args.evaluate:
# ==============================================================================================================
# SELECT MODEL
# ==============================================================================================================
# flow parameters and architecture choice are passed on to model through args
if args.flow == 'no_flow':
model = VAE.VAE(args)
elif args.flow == 'planar':
model = VAE.PlanarVAE(args)
elif args.flow == 'iaf':
model = VAE.IAFVAE(args)
elif args.flow == 'orthogonal':
model = VAE.OrthogonalSylvesterVAE(args)
elif args.flow == 'householder':
model = VAE.HouseholderSylvesterVAE(args)
elif args.flow == 'triangular':
model = VAE.TriangularSylvesterVAE(args)
elif args.flow == 'MMAF':
model = VAE.MMAVAE(args)
else:
raise ValueError('Invalid flow choice')
if args.retrain_encoder:
logger.info(f"Initializing decoder from {args.model_path}")
dec_model = torch.load(args.model_path)
dec_sd = {}
for k, v in dec_model.state_dict().items():
if 'p_x' in k:
dec_sd[k] = v
model.load_state_dict(dec_sd, strict=False)
if args.cuda:
logger.info("Model on GPU")
model.cuda()
logger.info(model)
if args.retrain_encoder:
parameters = []
logger.info('Optimizing over:')
for name, param in model.named_parameters():
if 'p_x' not in name:
logger.info(name)
parameters.append(param)
else:
parameters = model.parameters()
optimizer = optim.Adamax(parameters, lr=args.learning_rate, eps=1.e-7)
# ==================================================================================================================
# TRAINING
# ==================================================================================================================
train_loss = []
val_loss = []
# for early stopping
best_loss = np.inf
best_bpd = np.inf
e = 0
epoch = 0
train_times = []
for epoch in range(1, args.epochs + 1):
t_start = time.time()
tr_loss = train(epoch, train_loader, model, optimizer, args, logger)
train_loss.append(tr_loss)
train_times.append(time.time() - t_start)
logger.info('One training epoch took %.2f seconds' % (time.time() - t_start))
v_loss, v_bpd = evaluate(val_loader, model, args, logger, epoch=epoch)
val_loss.append(v_loss)
writer.add_scalars('data/' + args.snap_dir + "/losses", {"Valid": v_loss,
"Train": tr_loss.sum() / len(train_loader)}, epoch)
# early-stopping
if v_loss < best_loss:
e = 0
best_loss = v_loss
if args.input_type != 'binary':
best_bpd = v_bpd
logger.info('->model saved<-')
torch.save(model, snap_dir + args.flow + '.model')
# torch.save(model, snap_dir + args.flow + '_' + args.architecture + '.model')
elif (args.early_stopping_epochs > 0) and (epoch >= args.warmup):
e += 1
if e > args.early_stopping_epochs:
break
if args.input_type == 'binary':
logger.info(
'--> Early stopping: {}/{} (BEST: loss {:.4f})\n'.format(e, args.early_stopping_epochs, best_loss)
)
else:
logger.info(
'--> Early stopping: {}/{} (BEST: loss {:.4f}, bpd {:.4f})\n'.
format(e, args.early_stopping_epochs, best_loss, best_bpd)
)
if math.isnan(v_loss):
raise ValueError('NaN encountered!')
train_loss = np.hstack(train_loss)
val_loss = np.array(val_loss)
plot_training_curve(train_loss, val_loss, fname=snap_dir + '/training_curve_%s.pdf' % args.flow)
# training time per epoch
train_times = np.array(train_times)
mean_train_time = np.mean(train_times)
std_train_time = np.std(train_times, ddof=1)
logger.info('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time))
# ==================================================================================================================
# EVALUATION
# ==================================================================================================================
logger.info(args)
logger.info('Stopped after %d epochs' % epoch)
logger.info('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time))
final_model = torch.load(snap_dir + args.flow + '.model')
validation_loss, validation_bpd = evaluate(val_loader, final_model, args, logger)
else:
validation_loss = "N/A"
validation_bpd = "N/A"
logger.info(f"Loading model from {args.model_path}")
final_model = torch.load(args.model_path)
test_loss, test_bpd = evaluate(test_loader, final_model, args, logger, testing=False)
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL): {:.4f}'.format(validation_loss))
logger.info('FINAL EVALUATION ON TESTING SET. ELBO (VAL): {:.4f}'.format(test_loss))
if args.input_type != 'binary':
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL) BPD : {:.4f}'.format(validation_bpd))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST) BPD: {:.4f}'.format(test_bpd))
return
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL): {:.4f}'.format(validation_loss))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST): {:.4f}'.format(test_loss))
if args.input_type != 'binary':
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL) BPD : {:.4f}'.format(validation_bpd))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST) BPD: {:.4f}'.format(test_bpd))
if __name__ == "__main__":
run(args, kwargs)
| 13,750 | 39.444118 | 124 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/models/VAE.py | from __future__ import print_function
import torch
import torch.nn as nn
from ...vae_lib.models import flows
from ...vae_lib.models.layers import GatedConv2d, GatedConvTranspose2d
class VAE(nn.Module):
"""
The base VAE class containing gated convolutional encoder and decoder architecture.
Can be used as a base class for VAE's with normalizing flows.
"""
def __init__(self, args):
super(VAE, self).__init__()
# extract model settings from args
self.z_size = args.z_size
self.input_size = args.input_size
self.input_type = args.input_type
if self.input_size == [1, 28, 28] or self.input_size == [3, 28, 28]:
self.last_kernel_size = 7
elif self.input_size == [1, 28, 20]:
self.last_kernel_size = (7, 5)
else:
raise ValueError('invalid input size!!')
self.q_z_nn, self.q_z_mean, self.q_z_var = self.create_encoder()
self.p_x_nn, self.p_x_mean = self.create_decoder()
self.q_z_nn_output_dim = 256
# auxiliary
if args.cuda:
self.FloatTensor = torch.cuda.FloatTensor
else:
self.FloatTensor = torch.FloatTensor
# log-det-jacobian = 0 without flows
self.log_det_j = self.FloatTensor(1).zero_()
def create_encoder(self):
"""
Helper function to create the elemental blocks for the encoder. Creates a gated convnet encoder.
the encoder expects data as input of shape (batch_size, num_channels, width, height).
"""
if self.input_type == 'binary':
q_z_nn = nn.Sequential(
GatedConv2d(self.input_size[0], 32, 5, 1, 2),
GatedConv2d(32, 32, 5, 2, 2),
GatedConv2d(32, 64, 5, 1, 2),
GatedConv2d(64, 64, 5, 2, 2),
GatedConv2d(64, 64, 5, 1, 2),
GatedConv2d(64, 256, self.last_kernel_size, 1, 0),
)
q_z_mean = nn.Linear(256, self.z_size)
q_z_var = nn.Sequential(
nn.Linear(256, self.z_size),
nn.Softplus(),
)
return q_z_nn, q_z_mean, q_z_var
elif self.input_type == 'multinomial':
act = None
q_z_nn = nn.Sequential(
GatedConv2d(self.input_size[0], 32, 5, 1, 2, activation=act),
GatedConv2d(32, 32, 5, 2, 2, activation=act),
GatedConv2d(32, 64, 5, 1, 2, activation=act),
GatedConv2d(64, 64, 5, 2, 2, activation=act),
GatedConv2d(64, 64, 5, 1, 2, activation=act),
GatedConv2d(64, 256, self.last_kernel_size, 1, 0, activation=act)
)
q_z_mean = nn.Linear(256, self.z_size)
q_z_var = nn.Sequential(nn.Linear(256, self.z_size), nn.Softplus(), nn.Hardtanh(min_val=0.01, max_val=7.))
return q_z_nn, q_z_mean, q_z_var
def create_decoder(self):
"""
Helper function to create the elemental blocks for the decoder. Creates a gated convnet decoder.
"""
num_classes = 256
if self.input_type == 'binary':
p_x_nn = nn.Sequential(
GatedConvTranspose2d(self.z_size, 64, self.last_kernel_size, 1, 0),
GatedConvTranspose2d(64, 64, 5, 1, 2),
GatedConvTranspose2d(64, 32, 5, 2, 2, 1),
GatedConvTranspose2d(32, 32, 5, 1, 2),
GatedConvTranspose2d(32, 32, 5, 2, 2, 1), GatedConvTranspose2d(32, 32, 5, 1, 2)
)
p_x_mean = nn.Sequential(nn.Conv2d(32, self.input_size[0], 1, 1, 0), nn.Sigmoid())
return p_x_nn, p_x_mean
elif self.input_type == 'multinomial':
act = None
p_x_nn = nn.Sequential(
GatedConvTranspose2d(self.z_size, 64, self.last_kernel_size, 1, 0, activation=act),
GatedConvTranspose2d(64, 64, 5, 1, 2, activation=act),
GatedConvTranspose2d(64, 32, 5, 2, 2, 1, activation=act),
GatedConvTranspose2d(32, 32, 5, 1, 2, activation=act),
GatedConvTranspose2d(32, 32, 5, 2, 2, 1, activation=act),
GatedConvTranspose2d(32, 32, 5, 1, 2, activation=act)
)
p_x_mean = nn.Sequential(
nn.Conv2d(32, 256, 5, 1, 2),
nn.Conv2d(256, self.input_size[0] * num_classes, 1, 1, 0),
# output shape: batch_size, num_channels * num_classes, pixel_width, pixel_height
)
return p_x_nn, p_x_mean
else:
raise ValueError('invalid input type!!')
def reparameterize(self, mu, var):
"""
Samples z from a multivariate Gaussian with diagonal covariance matrix using the
reparameterization trick.
"""
std = var.sqrt()
eps = self.FloatTensor(std.size()).normal_()
z = eps.mul(std).add_(mu)
return z
def encode(self, x):
"""
Encoder expects following data shapes as input: shape = (batch_size, num_channels, width, height)
"""
h = self.q_z_nn(x)
h = h.view(h.size(0), -1)
mean = self.q_z_mean(h)
var = self.q_z_var(h)
return mean, var
def decode(self, z):
"""
Decoder outputs reconstructed image in the following shapes:
x_mean.shape = (batch_size, num_channels, width, height)
"""
z = z.view(z.size(0), self.z_size, 1, 1)
h = self.p_x_nn(z)
x_mean = self.p_x_mean(h)
return x_mean
def forward(self, x):
"""
Evaluates the model as a whole, encodes and decodes. Note that the log det jacobian is zero
for a plain VAE (without flows), and z_0 = z_k.
"""
# mean and variance of z
z_mu, z_var = self.encode(x)
# sample z
z = self.reparameterize(z_mu, z_var)
x_mean = self.decode(z)
return x_mean, z_mu, z_var, self.log_det_j, z, z
class PlanarVAE(VAE):
"""
Variational auto-encoder with planar flows in the encoder.
"""
def __init__(self, args):
super(PlanarVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.Planar
self.num_flows = args.num_flows
# Amortized flow parameters
self.amor_u = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
self.amor_w = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow()
self.add_module('flow_' + str(k), flow_k)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# return amortized u an w for all flows
u = self.amor_u(h).view(batch_size, self.num_flows, self.z_size, 1)
w = self.amor_w(h).view(batch_size, self.num_flows, 1, self.z_size)
b = self.amor_b(h).view(batch_size, self.num_flows, 1, 1)
return mean_z, var_z, u, w, b
def forward(self, x):
"""
Forward pass with planar flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, u, w, b = self.encode(x)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
z_k, log_det_jacobian = flow_k(z[k], u[:, k, :, :], w[:, k, :, :], b[:, k, :, :])
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class OrthogonalSylvesterVAE(VAE):
"""
Variational auto-encoder with orthogonal flows in the encoder.
"""
def __init__(self, args):
super(OrthogonalSylvesterVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.Sylvester
self.num_flows = args.num_flows
self.num_ortho_vecs = args.num_ortho_vecs
assert (self.num_ortho_vecs <= self.z_size) and (self.num_ortho_vecs > 0)
# Orthogonalization parameters
if self.num_ortho_vecs == self.z_size:
self.cond = 1.e-5
else:
self.cond = 1.e-6
self.steps = 100
identity = torch.eye(self.num_ortho_vecs, self.num_ortho_vecs)
# Add batch dimension
identity = identity.unsqueeze(0)
# Put identity in buffer so that it will be moved to GPU if needed by any call of .cuda
self.register_buffer('_eye', identity)
self._eye.requires_grad = False
# Masks needed for triangular R1 and R2.
triu_mask = torch.triu(torch.ones(self.num_ortho_vecs, self.num_ortho_vecs), diagonal=1)
triu_mask = triu_mask.unsqueeze(0).unsqueeze(3)
diag_idx = torch.arange(0, self.num_ortho_vecs).long()
self.register_buffer('triu_mask', triu_mask)
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
# Amortized flow parameters
# Diagonal elements of R1 * R2 have to satisfy -1 < R1 * R2 for flow to be invertible
self.diag_activation = nn.Tanh()
self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs * self.num_ortho_vecs)
self.amor_diag1 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs), self.diag_activation
)
self.amor_diag2 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs), self.diag_activation
)
self.amor_q = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.num_ortho_vecs)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow(self.num_ortho_vecs)
self.add_module('flow_' + str(k), flow_k)
def batch_construct_orthogonal(self, q):
"""
Batch orthogonal matrix construction.
:param q: q contains batches of matrices, shape : (batch_size * num_flows, z_size * num_ortho_vecs)
:return: batches of orthogonalized matrices, shape: (batch_size * num_flows, z_size, num_ortho_vecs)
"""
# Reshape to shape (num_flows * batch_size, z_size * num_ortho_vecs)
q = q.view(-1, self.z_size * self.num_ortho_vecs)
norm = torch.norm(q, p=2, dim=1, keepdim=True)
amat = torch.div(q, norm)
dim0 = amat.size(0)
amat = amat.resize(dim0, self.z_size, self.num_ortho_vecs)
max_norm = 0.
# Iterative orthogonalization
for s in range(self.steps):
tmp = torch.bmm(amat.transpose(2, 1), amat)
tmp = self._eye - tmp
tmp = self._eye + 0.5 * tmp
amat = torch.bmm(amat, tmp)
# Testing for convergence
test = torch.bmm(amat.transpose(2, 1), amat) - self._eye
norms2 = torch.sum(torch.norm(test, p=2, dim=2)**2, dim=1)
norms = torch.sqrt(norms2)
max_norm = torch.max(norms).item()
if max_norm <= self.cond:
break
if max_norm > self.cond:
print('\nWARNING WARNING WARNING: orthogonalization not complete')
print('\t Final max norm =', max_norm)
print()
# Reshaping: first dimension is batch_size
amat = amat.view(-1, self.num_flows, self.z_size, self.num_ortho_vecs)
amat = amat.transpose(0, 1)
return amat
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# Amortized r1, r2, q, b for all flows
full_d = self.amor_d(h)
diag1 = self.amor_diag1(h)
diag2 = self.amor_diag2(h)
full_d = full_d.resize(batch_size, self.num_ortho_vecs, self.num_ortho_vecs, self.num_flows)
diag1 = diag1.resize(batch_size, self.num_ortho_vecs, self.num_flows)
diag2 = diag2.resize(batch_size, self.num_ortho_vecs, self.num_flows)
r1 = full_d * self.triu_mask
r2 = full_d.transpose(2, 1) * self.triu_mask
r1[:, self.diag_idx, self.diag_idx, :] = diag1
r2[:, self.diag_idx, self.diag_idx, :] = diag2
q = self.amor_q(h)
b = self.amor_b(h)
# Resize flow parameters to divide over K flows
b = b.resize(batch_size, 1, self.num_ortho_vecs, self.num_flows)
return mean_z, var_z, r1, r2, q, b
def forward(self, x):
"""
Forward pass with orthogonal sylvester flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, r1, r2, q, b = self.encode(x)
# Orthogonalize all q matrices
q_ortho = self.batch_construct_orthogonal(q)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], q_ortho[k, :, :, :], b[:, :, :, k])
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class HouseholderSylvesterVAE(VAE):
"""
Variational auto-encoder with householder sylvester flows in the encoder.
"""
def __init__(self, args):
super(HouseholderSylvesterVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.Sylvester
self.num_flows = args.num_flows
self.num_householder = args.num_householder
assert self.num_householder > 0
identity = torch.eye(self.z_size, self.z_size)
# Add batch dimension
identity = identity.unsqueeze(0)
# Put identity in buffer so that it will be moved to GPU if needed by any call of .cuda
self.register_buffer('_eye', identity)
self._eye.requires_grad = False
# Masks needed for triangular r1 and r2.
triu_mask = torch.triu(torch.ones(self.z_size, self.z_size), diagonal=1)
triu_mask = triu_mask.unsqueeze(0).unsqueeze(3)
diag_idx = torch.arange(0, self.z_size).long()
self.register_buffer('triu_mask', triu_mask)
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
# Amortized flow parameters
# Diagonal elements of r1 * r2 have to satisfy -1 < r1 * r2 for flow to be invertible
self.diag_activation = nn.Tanh()
self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.z_size)
self.amor_diag1 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_diag2 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_q = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.num_householder)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow(self.z_size)
self.add_module('flow_' + str(k), flow_k)
def batch_construct_orthogonal(self, q):
"""
Batch orthogonal matrix construction.
:param q: q contains batches of matrices, shape : (batch_size, num_flows * z_size * num_householder)
:return: batches of orthogonalized matrices, shape: (batch_size * num_flows, z_size, z_size)
"""
# Reshape to shape (num_flows * batch_size * num_householder, z_size)
q = q.view(-1, self.z_size)
norm = torch.norm(q, p=2, dim=1, keepdim=True) # ||v||_2
v = torch.div(q, norm) # v / ||v||_2
# Calculate Householder Matrices
vvT = torch.bmm(v.unsqueeze(2), v.unsqueeze(1)) # v * v_T : batch_dot( B x L x 1 * B x 1 x L ) = B x L x L
amat = self._eye - 2 * vvT # NOTICE: v is already normalized! so there is no need to calculate vvT/vTv
# Reshaping: first dimension is batch_size * num_flows
amat = amat.view(-1, self.num_householder, self.z_size, self.z_size)
tmp = amat[:, 0]
for k in range(1, self.num_householder):
tmp = torch.bmm(amat[:, k], tmp)
amat = tmp.view(-1, self.num_flows, self.z_size, self.z_size)
amat = amat.transpose(0, 1)
return amat
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# Amortized r1, r2, q, b for all flows
full_d = self.amor_d(h)
diag1 = self.amor_diag1(h)
diag2 = self.amor_diag2(h)
full_d = full_d.resize(batch_size, self.z_size, self.z_size, self.num_flows)
diag1 = diag1.resize(batch_size, self.z_size, self.num_flows)
diag2 = diag2.resize(batch_size, self.z_size, self.num_flows)
r1 = full_d * self.triu_mask
r2 = full_d.transpose(2, 1) * self.triu_mask
r1[:, self.diag_idx, self.diag_idx, :] = diag1
r2[:, self.diag_idx, self.diag_idx, :] = diag2
q = self.amor_q(h)
b = self.amor_b(h)
# Resize flow parameters to divide over K flows
b = b.resize(batch_size, 1, self.z_size, self.num_flows)
return mean_z, var_z, r1, r2, q, b
def forward(self, x):
"""
Forward pass with orthogonal flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, r1, r2, q, b = self.encode(x)
# Orthogonalize all q matrices
q_ortho = self.batch_construct_orthogonal(q)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
q_k = q_ortho[k]
z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], q_k, b[:, :, :, k], sum_ldj=True)
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class TriangularSylvesterVAE(VAE):
"""
Variational auto-encoder with triangular Sylvester flows in the encoder. Alternates between setting
the orthogonal matrix equal to permutation and identity matrix for each flow.
"""
def __init__(self, args):
super(TriangularSylvesterVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.TriangularSylvester
self.num_flows = args.num_flows
# permuting indices corresponding to Q=P (permutation matrix) for every other flow
flip_idx = torch.arange(self.z_size - 1, -1, -1).long()
self.register_buffer('flip_idx', flip_idx)
# Masks needed for triangular r1 and r2.
triu_mask = torch.triu(torch.ones(self.z_size, self.z_size), diagonal=1)
triu_mask = triu_mask.unsqueeze(0).unsqueeze(3)
diag_idx = torch.arange(0, self.z_size).long()
self.register_buffer('triu_mask', triu_mask)
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
# Amortized flow parameters
# Diagonal elements of r1 * r2 have to satisfy -1 < r1 * r2 for flow to be invertible
self.diag_activation = nn.Tanh()
self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.z_size)
self.amor_diag1 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_diag2 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow(self.z_size)
self.add_module('flow_' + str(k), flow_k)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# Amortized r1, r2, b for all flows
full_d = self.amor_d(h)
diag1 = self.amor_diag1(h)
diag2 = self.amor_diag2(h)
full_d = full_d.resize(batch_size, self.z_size, self.z_size, self.num_flows)
diag1 = diag1.resize(batch_size, self.z_size, self.num_flows)
diag2 = diag2.resize(batch_size, self.z_size, self.num_flows)
r1 = full_d * self.triu_mask
r2 = full_d.transpose(2, 1) * self.triu_mask
r1[:, self.diag_idx, self.diag_idx, :] = diag1
r2[:, self.diag_idx, self.diag_idx, :] = diag2
b = self.amor_b(h)
# Resize flow parameters to divide over K flows
b = b.resize(batch_size, 1, self.z_size, self.num_flows)
return mean_z, var_z, r1, r2, b
def forward(self, x):
"""
Forward pass with orthogonal flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, r1, r2, b = self.encode(x)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
if k % 2 == 1:
# Alternate with reorderering z for triangular flow
permute_z = self.flip_idx
else:
permute_z = None
z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], b[:, :, :, k], permute_z, sum_ldj=True)
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class IAFVAE(VAE):
"""
Variational auto-encoder with inverse autoregressive flows in the encoder.
"""
def __init__(self, args):
super(IAFVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
self.h_size = args.made_h_size
self.h_context = nn.Linear(self.q_z_nn_output_dim, self.h_size)
# Flow parameters
self.num_flows = args.num_flows
self.flow = flows.IAF(
z_size=self.z_size, num_flows=self.num_flows, num_hidden=1, h_size=self.h_size, conv2d=False
)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and context h for flows.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
h_context = self.h_context(h)
return mean_z, var_z, h_context
def forward(self, x):
"""
Forward pass with inverse autoregressive flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
# mean and variance of z
z_mu, z_var, h_context = self.encode(x)
# sample z
z_0 = self.reparameterize(z_mu, z_var)
# iaf flows
z_k, self.log_det_j = self.flow(z_0, h_context)
# decode
x_mean = self.decode(z_k)
return x_mean, z_mu, z_var, self.log_det_j, z_0, z_k
class MMAVAE(VAE):
"""
Variational auto-encoder with Monotonic Masked autoregressive flows in the encoder.
"""
def __init__(self, args):
super(MMAVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
self.h_size = args.made_h_size
self.h_context = nn.Linear(self.q_z_nn_output_dim, self.h_size)
# Flow parameters
self.num_flows = args.num_flows
self.device = "cuda:%d" % args.gpu_num if torch.cuda.is_available() else "cpu"
self.flow = flows.MMAF(
z_size=self.z_size, num_flows=self.num_flows, num_hidden=1, h_size=self.h_size, device=self.device, args=args#, conv2d=False
)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and context h for flows.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
h_context = self.h_context(h)
return mean_z, var_z, h_context
def forward(self, x):
"""
Forward pass with Monotonic Masked autoregressive flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = .
"""
# mean and variance of z
z_mu, z_var, h_context = self.encode(x)
# sample z
z_0 = self.reparameterize(z_mu, z_var)
# iaf flows
z_k, self.log_det_j = self.flow(z_0, h_context)
# decode
x_mean = self.decode(z_k)
return x_mean, z_mu, z_var, self.log_det_j, z_0, z_k
def forceLipshitz(self, L=1.5):
self.flow.forceLipshitz(L)
| 26,921 | 32.949559 | 136 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/models/CNFVAE.py | import torch
import torch.nn as nn
from train_misc import build_model_tabular
from UMNNMAF import lib as layers
import lib as diffeq_layers
from .VAE import VAE
from lib import NONLINEARITIES
from torchdiffeq import odeint_adjoint as odeint
def get_hidden_dims(args):
return tuple(map(int, args.dims.split("-"))) + (args.z_size,)
def concat_layer_num_params(in_dim, out_dim):
return (in_dim + 1) * out_dim + out_dim
class CNFVAE(VAE):
def __init__(self, args):
super(CNFVAE, self).__init__(args)
# CNF model
self.cnf = build_model_tabular(args, args.z_size)
if args.cuda:
self.cuda()
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
return mean_z, var_z
def forward(self, x):
"""
Forward pass with planar flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
z_mu, z_var = self.encode(x)
# Sample z_0
z0 = self.reparameterize(z_mu, z_var)
zero = torch.zeros(x.shape[0], 1).to(x)
zk, delta_logp = self.cnf(z0, zero) # run model forward
x_mean = self.decode(zk)
return x_mean, z_mu, z_var, -delta_logp.view(-1), z0, zk
class AmortizedBiasODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"):
super(AmortizedBiasODEnet, self).__init__()
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
self.input_dim = input_dim
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_dim
for dim_out in hidden_dims:
layer = base_layer(hidden_shape, dim_out)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = dim_out
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns[:-1])
def _unpack_params(self, params):
return [params]
def forward(self, t, y, am_biases):
dx = y
for l, layer in enumerate(self.layers):
dx = layer(t, dx)
this_bias, am_biases = am_biases[:, :dx.size(1)], am_biases[:, dx.size(1):]
dx = dx + this_bias
# if not last layer, use nonlinearity
if l < len(self.layers) - 1:
dx = self.activation_fns[l](dx)
return dx
class AmortizedLowRankODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, rank=1, layer_type="concat", nonlinearity="softplus"):
super(AmortizedLowRankODEnet, self).__init__()
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
self.input_dim = input_dim
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_dim
self.output_dims = hidden_dims
self.input_dims = (input_dim,) + hidden_dims[:-1]
for dim_out in hidden_dims:
layer = base_layer(hidden_shape, dim_out)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = dim_out
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns[:-1])
self.rank = rank
def _unpack_params(self, params):
return [params]
def _rank_k_bmm(self, x, u, v):
xu = torch.bmm(x[:, None], u.view(x.shape[0], x.shape[-1], self.rank))
xuv = torch.bmm(xu, v.view(x.shape[0], self.rank, -1))
return xuv[:, 0]
def forward(self, t, y, am_params):
dx = y
for l, (layer, in_dim, out_dim) in enumerate(zip(self.layers, self.input_dims, self.output_dims)):
this_u, am_params = am_params[:, :in_dim * self.rank], am_params[:, in_dim * self.rank:]
this_v, am_params = am_params[:, :out_dim * self.rank], am_params[:, out_dim * self.rank:]
this_bias, am_params = am_params[:, :out_dim], am_params[:, out_dim:]
xw = layer(t, dx)
xw_am = self._rank_k_bmm(dx, this_u, this_v)
dx = xw + xw_am + this_bias
# if not last layer, use nonlinearity
if l < len(self.layers) - 1:
dx = self.activation_fns[l](dx)
return dx
class HyperODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"):
super(HyperODEnet, self).__init__()
assert layer_type == "concat"
self.input_dim = input_dim
# build layers and add them
activation_fns = []
for dim_out in hidden_dims + (input_dim,):
activation_fns.append(NONLINEARITIES[nonlinearity])
self.activation_fns = nn.ModuleList(activation_fns[:-1])
self.output_dims = hidden_dims
self.input_dims = (input_dim,) + hidden_dims[:-1]
def _pack_inputs(self, t, x):
tt = torch.ones_like(x[:, :1]) * t
ttx = torch.cat([tt, x], 1)
return ttx
def _unpack_params(self, params):
layer_params = []
for in_dim, out_dim in zip(self.input_dims, self.output_dims):
this_num_params = concat_layer_num_params(in_dim, out_dim)
# get params for this layer
this_params, params = params[:, :this_num_params], params[:, this_num_params:]
# split into weight and bias
bias, weight_params = this_params[:, :out_dim], this_params[:, out_dim:]
weight = weight_params.view(weight_params.size(0), in_dim + 1, out_dim)
layer_params.append(weight)
layer_params.append(bias)
return layer_params
def _layer(self, t, x, weight, bias):
# weights is (batch, in_dim + 1, out_dim)
ttx = self._pack_inputs(t, x) # (batch, in_dim + 1)
ttx = ttx.view(ttx.size(0), 1, ttx.size(1)) # (batch, 1, in_dim + 1)
xw = torch.bmm(ttx, weight)[:, 0, :] # (batch, out_dim)
return xw + bias
def forward(self, t, y, *layer_params):
dx = y
for l, (weight, bias) in enumerate(zip(layer_params[::2], layer_params[1::2])):
dx = self._layer(t, dx, weight, bias)
# if not last layer, use nonlinearity
if l < len(layer_params) - 1:
dx = self.activation_fns[l](dx)
return dx
class LyperODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"):
super(LyperODEnet, self).__init__()
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
self.input_dim = input_dim
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_dim
self.dims = (input_dim,) + hidden_dims
self.output_dims = hidden_dims
self.input_dims = (input_dim,) + hidden_dims[:-1]
for dim_out in hidden_dims[:-1]:
layer = base_layer(hidden_shape, dim_out)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = dim_out
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns)
def _pack_inputs(self, t, x):
tt = torch.ones_like(x[:, :1]) * t
ttx = torch.cat([tt, x], 1)
return ttx
def _unpack_params(self, params):
return [params]
def _am_layer(self, t, x, weight, bias):
# weights is (batch, in_dim + 1, out_dim)
ttx = self._pack_inputs(t, x) # (batch, in_dim + 1)
ttx = ttx.view(ttx.size(0), 1, ttx.size(1)) # (batch, 1, in_dim + 1)
xw = torch.bmm(ttx, weight)[:, 0, :] # (batch, out_dim)
return xw + bias
def forward(self, t, x, am_params):
dx = x
for layer, act in zip(self.layers, self.activation_fns):
dx = act(layer(t, dx))
bias, weight_params = am_params[:, :self.dims[-1]], am_params[:, self.dims[-1]:]
weight = weight_params.view(weight_params.size(0), self.dims[-2] + 1, self.dims[-1])
dx = self._am_layer(t, dx, weight, bias)
return dx
def construct_amortized_odefunc(args, z_dim, amortization_type="bias"):
hidden_dims = get_hidden_dims(args)
if amortization_type == "bias":
diffeq = AmortizedBiasODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
elif amortization_type == "hyper":
diffeq = HyperODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
elif amortization_type == "lyper":
diffeq = LyperODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
elif amortization_type == "low_rank":
diffeq = AmortizedLowRankODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
rank=args.rank,
)
odefunc = layers.ODEfunc(
diffeq=diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
return odefunc
class AmortizedCNFVAE(VAE):
h_size = 256
def __init__(self, args):
super(AmortizedCNFVAE, self).__init__(args)
# CNF model
self.odefuncs = nn.ModuleList([
construct_amortized_odefunc(args, args.z_size, self.amortization_type) for _ in range(args.num_blocks)
])
self.q_am = self._amortized_layers(args)
assert len(self.q_am) == args.num_blocks or len(self.q_am) == 0
if args.cuda:
self.cuda()
self.register_buffer('integration_times', torch.tensor([0.0, args.time_length]))
self.atol = args.atol
self.rtol = args.rtol
self.solver = args.solver
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
am_params = [q_am(h) for q_am in self.q_am]
return mean_z, var_z, am_params
def forward(self, x):
self.log_det_j = 0.
z_mu, z_var, am_params = self.encode(x)
# Sample z_0
z0 = self.reparameterize(z_mu, z_var)
delta_logp = torch.zeros(x.shape[0], 1).to(x)
z = z0
for odefunc, am_param in zip(self.odefuncs, am_params):
am_param_unpacked = odefunc.diffeq._unpack_params(am_param)
odefunc.before_odeint()
states = odeint(
odefunc,
(z, delta_logp) + tuple(am_param_unpacked),
self.integration_times.to(z),
atol=self.atol,
rtol=self.rtol,
method=self.solver,
)
z, delta_logp = states[0][-1], states[1][-1]
x_mean = self.decode(z)
return x_mean, z_mu, z_var, -delta_logp.view(-1), z0, z
class AmortizedBiasCNFVAE(AmortizedCNFVAE):
amortization_type = "bias"
def _amortized_layers(self, args):
hidden_dims = get_hidden_dims(args)
bias_size = sum(hidden_dims)
return nn.ModuleList([nn.Linear(self.h_size, bias_size) for _ in range(args.num_blocks)])
class AmortizedLowRankCNFVAE(AmortizedCNFVAE):
amortization_type = "low_rank"
def _amortized_layers(self, args):
out_dims = get_hidden_dims(args)
in_dims = (out_dims[-1],) + out_dims[:-1]
params_size = (sum(in_dims) + sum(out_dims)) * args.rank + sum(out_dims)
return nn.ModuleList([nn.Linear(self.h_size, params_size) for _ in range(args.num_blocks)])
class HypernetCNFVAE(AmortizedCNFVAE):
amortization_type = "hyper"
def _amortized_layers(self, args):
hidden_dims = get_hidden_dims(args)
input_dims = (args.z_size,) + hidden_dims[:-1]
assert args.layer_type == "concat", "hypernets only support concat layers at the moment"
weight_dims = [concat_layer_num_params(in_dim, out_dim) for in_dim, out_dim in zip(input_dims, hidden_dims)]
weight_size = sum(weight_dims)
return nn.ModuleList([nn.Linear(self.h_size, weight_size) for _ in range(args.num_blocks)])
class LypernetCNFVAE(AmortizedCNFVAE):
amortization_type = "lyper"
def _amortized_layers(self, args):
dims = (args.z_size,) + get_hidden_dims(args)
weight_size = concat_layer_num_params(dims[-2], dims[-1])
return nn.ModuleList([nn.Linear(self.h_size, weight_size) for _ in range(args.num_blocks)])
| 14,375 | 33.808717 | 116 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/models/layers.py | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import numpy as np
import torch.nn.functional as F
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class GatedConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None):
super(GatedConv2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class GatedConvTranspose2d(nn.Module):
def __init__(
self, input_channels, output_channels, kernel_size, stride, padding, output_padding=0, dilation=1,
activation=None
):
super(GatedConvTranspose2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.ConvTranspose2d(
input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation
)
self.g = nn.ConvTranspose2d(
input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation
)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class MaskedLinear(nn.Module):
"""
Creates masked linear layer for MLP MADE.
For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False.
For hidden to output (y) layers:
If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True.
Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False.
"""
def __init__(self, in_features, out_features, diagonal_zeros=False, bias=True):
super(MaskedLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal(self.weight)
if self.bias is not None:
self.bias.data.zero_()
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_in % n_out == 0 or n_out % n_in == 0
mask = np.ones((n_in, n_out), dtype=np.float32)
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i + 1:, i * k:(i + 1) * k] = 0
if self.diagonal_zeros:
mask[i:i + 1, i * k:(i + 1) * k] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[(i + 1) * k:, i:i + 1] = 0
if self.diagonal_zeros:
mask[i * k:(i + 1) * k:, i:i + 1] = 0
return mask
def forward(self, x):
output = x.mm(self.mask * self.weight)
if self.bias is not None:
return output.add(self.bias.expand_as(output))
else:
return output
def __repr__(self):
if self.bias is not None:
bias = True
else:
bias = False
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', diagonal_zeros=' \
+ str(self.diagonal_zeros) + ', bias=' \
+ str(bias) + ')'
class MaskedConv2d(nn.Module):
"""
Creates masked convolutional autoregressive layer for pixelCNN.
For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False.
For hidden to output (y) layers:
If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True.
Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False.
"""
def __init__(self, in_features, out_features, size_kernel=(3, 3), diagonal_zeros=False, bias=True):
super(MaskedConv2d, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.size_kernel = size_kernel
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(torch.FloatTensor(out_features, in_features, *self.size_kernel))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal(self.weight)
if self.bias is not None:
self.bias.data.zero_()
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_out % n_in == 0 or n_in % n_out == 0, "%d - %d" % (n_in, n_out)
# Build autoregressive mask
l = (self.size_kernel[0] - 1) // 2
m = (self.size_kernel[1] - 1) // 2
mask = np.ones((n_out, n_in, self.size_kernel[0], self.size_kernel[1]), dtype=np.float32)
mask[:, :, :l, :] = 0
mask[:, :, l, :m] = 0
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i * k:(i + 1) * k, i + 1:, l, m] = 0
if self.diagonal_zeros:
mask[i * k:(i + 1) * k, i:i + 1, l, m] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[i:i + 1, (i + 1) * k:, l, m] = 0
if self.diagonal_zeros:
mask[i:i + 1, i * k:(i + 1) * k:, l, m] = 0
return mask
def forward(self, x):
output = F.conv2d(x, self.mask * self.weight, bias=self.bias, padding=(1, 1))
return output
def __repr__(self):
if self.bias is not None:
bias = True
else:
bias = False
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', diagonal_zeros=' \
+ str(self.diagonal_zeros) + ', bias=' \
+ str(bias) + ', size_kernel=' \
+ str(self.size_kernel) + ')'
| 7,128 | 32.947619 | 115 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/models/flows.py | """
Collection of flow strategies
"""
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from ...vae_lib.models.layers import MaskedConv2d, MaskedLinear
import sys
sys.path.append("../../")
from models import UMNNMAFFlow
class Planar(nn.Module):
"""
PyTorch implementation of planar flows as presented in "Variational Inference with Normalizing Flows"
by Danilo Jimenez Rezende, Shakir Mohamed. Model assumes amortized flow parameters.
"""
def __init__(self):
super(Planar, self).__init__()
self.h = nn.Tanh()
self.softplus = nn.Softplus()
def der_h(self, x):
""" Derivative of tanh """
return 1 - self.h(x)**2
def forward(self, zk, u, w, b):
"""
Forward pass. Assumes amortized u, w and b. Conditions on diagonals of u and w for invertibility
will be be satisfied inside this function. Computes the following transformation:
z' = z + u h( w^T z + b)
or actually
z'^T = z^T + h(z^T w + b)u^T
Assumes the following input shapes:
shape u = (batch_size, z_size, 1)
shape w = (batch_size, 1, z_size)
shape b = (batch_size, 1, 1)
shape z = (batch_size, z_size).
"""
zk = zk.unsqueeze(2)
# reparameterize u such that the flow becomes invertible (see appendix paper)
uw = torch.bmm(w, u)
m_uw = -1. + self.softplus(uw)
w_norm_sq = torch.sum(w**2, dim=2, keepdim=True)
u_hat = u + ((m_uw - uw) * w.transpose(2, 1) / w_norm_sq)
# compute flow with u_hat
wzb = torch.bmm(w, zk) + b
z = zk + u_hat * self.h(wzb)
z = z.squeeze(2)
# compute logdetJ
psi = w * self.der_h(wzb)
log_det_jacobian = torch.log(torch.abs(1 + torch.bmm(psi, u_hat)))
log_det_jacobian = log_det_jacobian.squeeze(2).squeeze(1)
return z, log_det_jacobian
class Sylvester(nn.Module):
"""
Sylvester normalizing flow.
"""
def __init__(self, num_ortho_vecs):
super(Sylvester, self).__init__()
self.num_ortho_vecs = num_ortho_vecs
self.h = nn.Tanh()
triu_mask = torch.triu(torch.ones(num_ortho_vecs, num_ortho_vecs), diagonal=1).unsqueeze(0)
diag_idx = torch.arange(0, num_ortho_vecs).long()
self.register_buffer('triu_mask', Variable(triu_mask))
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x)**2
def _forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
"""
All flow parameters are amortized. Conditions on diagonals of R1 and R2 for invertibility need to be satisfied
outside of this function. Computes the following transformation:
z' = z + QR1 h( R2Q^T z + b)
or actually
z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T
:param zk: shape: (batch_size, z_size)
:param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)
:param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)
:param q_ortho: shape (batch_size, z_size , num_ortho_vecs)
:param b: shape: (batch_size, 1, self.z_size)
:return: z, log_det_j
"""
# Amortized flow parameters
zk = zk.unsqueeze(1)
# Save diagonals for log_det_j
diag_r1 = r1[:, self.diag_idx, self.diag_idx]
diag_r2 = r2[:, self.diag_idx, self.diag_idx]
r1_hat = r1
r2_hat = r2
qr2 = torch.bmm(q_ortho, r2_hat.transpose(2, 1))
qr1 = torch.bmm(q_ortho, r1_hat)
r2qzb = torch.bmm(zk, qr2) + b
z = torch.bmm(self.h(r2qzb), qr1.transpose(2, 1)) + zk
z = z.squeeze(1)
# Compute log|det J|
# Output log_det_j in shape (batch_size) instead of (batch_size,1)
diag_j = diag_r1 * diag_r2
diag_j = self.der_h(r2qzb).squeeze(1) * diag_j
diag_j += 1.
log_diag_j = diag_j.abs().log()
if sum_ldj:
log_det_j = log_diag_j.sum(-1)
else:
log_det_j = log_diag_j
return z, log_det_j
def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
return self._forward(zk, r1, r2, q_ortho, b, sum_ldj)
class TriangularSylvester(nn.Module):
"""
Sylvester normalizing flow with Q=P or Q=I.
"""
def __init__(self, z_size):
super(TriangularSylvester, self).__init__()
self.z_size = z_size
self.h = nn.Tanh()
diag_idx = torch.arange(0, z_size).long()
self.register_buffer('diag_idx', diag_idx)
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x)**2
def _forward(self, zk, r1, r2, b, permute_z=None, sum_ldj=True):
"""
All flow parameters are amortized. conditions on diagonals of R1 and R2 need to be satisfied
outside of this function.
Computes the following transformation:
z' = z + QR1 h( R2Q^T z + b)
or actually
z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T
with Q = P a permutation matrix (equal to identity matrix if permute_z=None)
:param zk: shape: (batch_size, z_size)
:param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param b: shape: (batch_size, 1, self.z_size)
:return: z, log_det_j
"""
# Amortized flow parameters
zk = zk.unsqueeze(1)
# Save diagonals for log_det_j
diag_r1 = r1[:, self.diag_idx, self.diag_idx]
diag_r2 = r2[:, self.diag_idx, self.diag_idx]
if permute_z is not None:
# permute order of z
z_per = zk[:, :, permute_z]
else:
z_per = zk
r2qzb = torch.bmm(z_per, r2.transpose(2, 1)) + b
z = torch.bmm(self.h(r2qzb), r1.transpose(2, 1))
if permute_z is not None:
# permute order of z again back again
z = z[:, :, permute_z]
z += zk
z = z.squeeze(1)
# Compute log|det J|
# Output log_det_j in shape (batch_size) instead of (batch_size,1)
diag_j = diag_r1 * diag_r2
diag_j = self.der_h(r2qzb).squeeze(1) * diag_j
diag_j += 1.
log_diag_j = diag_j.abs().log()
if sum_ldj:
log_det_j = log_diag_j.sum(-1)
else:
log_det_j = log_diag_j
return z, log_det_j
def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
return self._forward(zk, r1, r2, q_ortho, b, sum_ldj)
class IAF(nn.Module):
"""
PyTorch implementation of inverse autoregressive flows as presented in
"Improving Variational Inference with Inverse Autoregressive Flow" by Diederik P. Kingma, Tim Salimans,
Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling.
Inverse Autoregressive Flow with either MADE MLPs or Pixel CNNs. Contains several flows. Each transformation
takes as an input the previous stochastic z, and a context h. The structure of each flow is then as follows:
z <- autoregressive_layer(z) + h, allow for diagonal connections
z <- autoregressive_layer(z), allow for diagonal connections
:
z <- autoregressive_layer(z), do not allow for diagonal connections.
Note that the size of h needs to be the same as h_size, which is the width of the MADE layers.
"""
def __init__(self, z_size, num_flows=2, num_hidden=0, h_size=50, forget_bias=1., conv2d=False):
super(IAF, self).__init__()
self.z_size = z_size
self.num_flows = num_flows
self.num_hidden = num_hidden
self.h_size = h_size
self.conv2d = conv2d
if not conv2d:
ar_layer = MaskedLinear
else:
ar_layer = MaskedConv2d
self.activation = torch.nn.ELU
# self.activation = torch.nn.ReLU
self.forget_bias = forget_bias
self.flows = []
self.param_list = []
# For reordering z after each flow
flip_idx = torch.arange(self.z_size - 1, -1, -1).long()
self.register_buffer('flip_idx', flip_idx)
for k in range(num_flows):
arch_z = [ar_layer(z_size, h_size), self.activation()]
self.param_list += list(arch_z[0].parameters())
z_feats = torch.nn.Sequential(*arch_z)
arch_zh = []
for j in range(num_hidden):
arch_zh += [ar_layer(h_size, h_size), self.activation()]
self.param_list += list(arch_zh[-2].parameters())
zh_feats = torch.nn.Sequential(*arch_zh)
linear_mean = ar_layer(h_size, z_size, diagonal_zeros=True)
linear_std = ar_layer(h_size, z_size, diagonal_zeros=True)
self.param_list += list(linear_mean.parameters())
self.param_list += list(linear_std.parameters())
if torch.cuda.is_available():
z_feats = z_feats.cuda()
zh_feats = zh_feats.cuda()
linear_mean = linear_mean.cuda()
linear_std = linear_std.cuda()
self.flows.append((z_feats, zh_feats, linear_mean, linear_std))
self.param_list = torch.nn.ParameterList(self.param_list)
def forward(self, z, h_context):
logdets = 0.
for i, flow in enumerate(self.flows):
if (i + 1) % 2 == 0 and not self.conv2d:
# reverse ordering to help mixing
z = z[:, self.flip_idx]
h = flow[0](z)
h = h + h_context
h = flow[1](h)
mean = flow[2](h)
gate = F.sigmoid(flow[3](h) + self.forget_bias)
z = gate * z + (1 - gate) * mean
logdets += torch.sum(gate.log().view(gate.size(0), -1), 1)
return z, logdets
class MMAF(nn.Module):
def __init__(self, z_size, num_flows=2, num_hidden=0, h_size=50, device='cpu', args=None):
super(MMAF, self).__init__()
self.model = UMNNMAFFlow(nb_flow=num_flows, nb_in=z_size, hidden_derivative=args.hidden_derivative,
hidden_embedding=args.hidden_embedding, embedding_s=args.embedding_size,
nb_steps=args.steps, solver=args.solver, cond_in=h_size, device=device)
self.z_size = z_size
self.num_flows = num_flows
self.num_hidden = num_hidden
self.h_size = h_size
self.steps = args.steps
def forward(self, z, h_context):
if self.steps == 0:
nb_steps = np.random.randint(10, 50) * 2
self.model.set_steps_nb(nb_steps)
return self.model.compute_log_jac_bis(z, h_context)
def forceLipshitz(self, L=1.5):
if L > 0:
self.model.forceLipshitz(L)
| 10,990 | 32.306061 | 118 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/optimization/loss.py | from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
from ...vae_lib.utils.distributions import log_normal_diag, log_normal_standard, log_bernoulli
import torch.nn.functional as F
def binary_loss_function(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.):
"""
Computes the binary loss function while summing over batch dimension, not averaged!
:param recon_x: shape: (batch_size, num_channels, pixel_width, pixel_height), bernoulli parameters p(x=1)
:param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].
:param z_mu: mean of z_0
:param z_var: variance of z_0
:param z_0: first stochastic latent variable
:param z_k: last stochastic latent variable
:param ldj: log det jacobian
:param beta: beta for kl loss
:return: loss, ce, kl
"""
reconstruction_function = nn.BCELoss(size_average=False)
batch_size = x.size(0)
# - N E_q0 [ ln p(x|z_k) ]
bce = reconstruction_function(recon_x, x)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
# N E_q0[ ln q(z_0) - ln p(z_k) ]
summed_logs = torch.sum(log_q_z0 - log_p_zk)
# sum over batches
summed_ldj = torch.sum(ldj)
# ldj = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]
kl = (summed_logs - summed_ldj)
loss = bce + beta * (summed_logs - summed_ldj)
loss /= float(batch_size)
bce /= float(batch_size)
kl /= float(batch_size)
return loss, bce, kl
def multinomial_loss_function(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.):
"""
Computes the cross entropy loss function while summing over batch dimension, not averaged!
:param x_logit: shape: (batch_size, num_classes * num_channels, pixel_width, pixel_height), real valued logits
:param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].
:param z_mu: mean of z_0
:param z_var: variance of z_0
:param z_0: first stochastic latent variable
:param z_k: last stochastic latent variable
:param ldj: log det jacobian
:param args: global parameter settings
:param beta: beta for kl loss
:return: loss, ce, kl
"""
num_classes = 256
batch_size = x.size(0)
x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2])
# make integer class labels
target = (x * (num_classes - 1)).long()
# - N E_q0 [ ln p(x|z_k) ]
# sums over batch dimension (and feature dimension)
ce = cross_entropy(x_logit, target, size_average=False)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
# N E_q0[ ln q(z_0) - ln p(z_k) ]
summed_logs = torch.sum(log_q_z0 - log_p_zk)
# sum over batches
summed_ldj = torch.sum(ldj)
# ldj = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]
kl = (summed_logs - summed_ldj)
loss = ce + beta * (summed_logs - summed_ldj)
loss /= float(batch_size)
ce /= float(batch_size)
kl /= float(batch_size)
return loss, ce, kl
def binary_loss_array(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.):
"""
Computes the binary loss without averaging or summing over the batch dimension.
"""
batch_size = x.size(0)
# if not summed over batch_dimension
if len(ldj.size()) > 1:
ldj = ldj.view(ldj.size(0), -1).sum(-1)
# TODO: upgrade to newest pytorch version on master branch, there the nn.BCELoss comes with the option
# reduce, which when set to False, does no sum over batch dimension.
bce = -log_bernoulli(x.view(batch_size, -1), recon_x.view(batch_size, -1), dim=1)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
# ln q(z_0) - ln p(z_k) ]
logs = log_q_z0 - log_p_zk
loss = bce + beta * (logs - ldj)
return loss
def multinomial_loss_array(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.):
"""
Computes the discritezed logistic loss without averaging or summing over the batch dimension.
"""
num_classes = 256
batch_size = x.size(0)
x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2])
# make integer class labels
target = (x * (num_classes - 1)).long()
# - N E_q0 [ ln p(x|z_k) ]
# computes cross entropy over all dimensions separately:
ce = cross_entropy(x_logit, target, size_average=False, reduce=False)
# sum over feature dimension
ce = ce.view(batch_size, -1).sum(dim=1)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k.view(batch_size, -1), dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(
z_0.view(batch_size, -1), mean=z_mu.view(batch_size, -1), log_var=z_var.log().view(batch_size, -1), dim=1
)
# ln q(z_0) - ln p(z_k) ]
logs = log_q_z0 - log_p_zk
loss = ce + beta * (logs - ldj)
return loss
def cross_entropy(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True):
r"""
Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes
instead of only (N, C, d_1, d_2) or (N, C).
This criterion combines `log_softmax` and `nll_loss` in a single
function.
See :class:`~torch.nn.CrossEntropyLoss` for details.
Args:
input: Variable :math:`(N, C)` where `C = number of classes`
target: Variable :math:`(N)` where each value is
`0 <= targets[i] <= C-1`
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch. Ignored if reduce is False. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average is
True, the loss is averaged over non-ignored targets. Default: -100
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on size_average. When reduce
is False, returns a loss per batch element instead and ignores
size_average. Default: ``True``
"""
return nll_loss(F.log_softmax(input, 1), target, weight, size_average, ignore_index, reduce)
def nll_loss(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True):
r"""
Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes
instead of only (N, C, d_1, d_2) or (N, C).
The negative log likelihood loss.
See :class:`~torch.nn.NLLLoss` for details.
Args:
input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K > 1`
in the case of K-dimensional loss.
target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`,
or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K >= 1` for
K-dimensional loss.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. If size_average
is False, the losses are summed for each minibatch. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average is
True, the loss is averaged over non-ignored targets. Default: -100
"""
dim = input.dim()
if dim == 2:
return F.nll_loss(
input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
)
elif dim == 4:
return F.nll_loss(
input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
)
elif dim == 3 or dim > 4:
n = input.size(0)
c = input.size(1)
out_size = (n,) + input.size()[2:]
if target.size()[1:] != input.size()[2:]:
raise ValueError('Expected target size {}, got {}'.format(out_size, input.size()))
input = input.contiguous().view(n, c, 1, -1)
target = target.contiguous().view(n, 1, -1)
if reduce:
_loss = nn.NLLLoss2d(weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce)
return _loss(input, target)
out = F.nll_loss(
input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
)
return out.view(out_size)
else:
raise ValueError('Expected 2 or more dimensions (got {})'.format(dim))
def calculate_loss(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.):
"""
Picks the correct loss depending on the input type.
"""
if args.input_type == 'binary':
loss, rec, kl = binary_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, beta=beta)
bpd = 0.
elif args.input_type == 'multinomial':
loss, rec, kl = multinomial_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=beta)
bpd = loss.data.item() / (np.prod(args.input_size) * np.log(2.))
else:
raise ValueError('Invalid input type for calculate loss: %s.' % args.input_type)
return loss, rec, kl, bpd
def calculate_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args):
"""
Picks the correct loss depending on the input type.
"""
if args.input_type == 'binary':
loss = binary_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj)
elif args.input_type == 'multinomial':
loss = multinomial_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args)
else:
raise ValueError('Invalid input type for calculate loss: %s.' % args.input_type)
return loss
| 10,621 | 38.051471 | 116 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/optimization/training.py | from __future__ import print_function
import time
import torch
from ...vae_lib.optimization.loss import calculate_loss
from ...vae_lib.utils.visual_evaluation import plot_reconstructions
from ...vae_lib.utils.log_likelihood import calculate_likelihood
import numpy as np
def train(epoch, train_loader, model, opt, args, logger):
model.train()
train_loss = np.zeros(len(train_loader))
train_bpd = np.zeros(len(train_loader))
num_data = 0
# set warmup coefficient
beta = min([(epoch * 1.) / max([args.warmup, 1.]), args.max_beta])
logger.info('beta = {:5.4f}'.format(beta))
end = time.time()
for batch_idx, (data, _) in enumerate(train_loader):
if args.cuda:
data = data.cuda()
if args.dynamic_binarization:
data = torch.bernoulli(data)
data = data.view(-1, *args.input_size)
opt.zero_grad()
x_mean, z_mu, z_var, ldj, z0, zk = model(data)
loss, rec, kl, bpd = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args, beta=beta)
loss.backward()
train_loss[batch_idx] = loss.item()
train_bpd[batch_idx] = bpd
opt.step()
if 'MMAF' in args.flow:
if args.Lipshitz > 0:
model.forceLipshitz(args.Lipshitz)
rec = rec.item()
kl = kl.item()
num_data += len(data)
batch_time = time.time() - end
end = time.time()
if batch_idx % args.log_interval == 0:
if args.input_type == 'binary':
perc = 100. * batch_idx / len(train_loader)
log_msg = (
'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | '
'Rec {:11.6f} | KL {:11.6f}'.format(
epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(), rec, kl
)
)
else:
perc = 100. * batch_idx / len(train_loader)
tmp = 'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | Bits/dim {:8.6f}'
log_msg = tmp.format(epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(),
bpd), '\trec: {:11.3f}\tkl: {:11.6f}'.format(rec, kl)
log_msg = "".join(log_msg)
if 'cnf' in args.flow:
log_msg += ' | NFE Forward {} | NFE Backward {}'.format(f_nfe, b_nfe)
logger.info(log_msg)
if args.input_type == 'binary':
logger.info('====> Epoch: {:3d} Average train loss: {:.4f}'.format(epoch, train_loss.sum() / len(train_loader)))
else:
logger.info(
'====> Epoch: {:3d} Average train loss: {:.4f}, average bpd: {:.4f}'.
format(epoch, train_loss.sum() / len(train_loader), train_bpd.sum() / len(train_loader))
)
return train_loss
def evaluate(data_loader, model, args, logger, testing=False, epoch=0):
model.eval()
if 'MMAF' in args.flow:
prev_steps = model.flow.steps
model.flow.steps = 100
model.flow.model.set_steps_nb(100)
loss = 0.
batch_idx = 0
bpd = 0.
if args.input_type == 'binary':
loss_type = 'elbo'
else:
loss_type = 'bpd'
if testing and 'cnf' in args.flow:
override_divergence_fn(model, "brute_force")
for data, _ in data_loader:
batch_idx += 1
if args.cuda:
data = data.cuda()
with torch.no_grad():
data = data.view(-1, *args.input_size)
x_mean, z_mu, z_var, ldj, z0, zk = model(data)
batch_loss, rec, kl, batch_bpd = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args)
bpd += batch_bpd
loss += batch_loss.item()
# PRINT RECONSTRUCTIONS
if batch_idx == 1 and testing is False:
plot_reconstructions(data, x_mean, batch_loss, loss_type, epoch, args)
loss /= len(data_loader)
bpd /= len(data_loader)
if testing:
logger.info('====> Test set loss: {:.4f}'.format(loss))
# Compute log-likelihood
if testing and not ("cnf" in args.flow): # don't compute log-likelihood for cnf models
with torch.no_grad():
test_data = data_loader.dataset.tensors[0]
if args.cuda:
test_data = test_data.cuda()
logger.info('Computing log-likelihood on test set')
model.eval()
if args.dataset == 'caltech':
log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=2000, MB=500)
else:
log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=5000, MB=500)
else:
log_likelihood = None
nll_bpd = None
if args.input_type in ['multinomial']:
bpd = loss / (np.prod(args.input_size) * np.log(2.))
if testing and not ("cnf" in args.flow):
logger.info('====> Test set log-likelihood: {:.4f}'.format(log_likelihood))
if args.input_type != 'binary':
logger.info('====> Test set bpd (elbo): {:.4f}'.format(bpd))
logger.info(
'====> Test set bpd (log-likelihood): {:.4f}'.
format(log_likelihood / (np.prod(args.input_size) * np.log(2.)))
)
if 'MMAF' in args.flow:
model.flow.steps = prev_steps
if not testing:
return loss, bpd
else:
return log_likelihood, nll_bpd
| 5,533 | 30.443182 | 120 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/utils/distributions.py | from __future__ import print_function
import torch
import torch.utils.data
import math
MIN_EPSILON = 1e-5
MAX_EPSILON = 1. - 1e-5
PI = torch.FloatTensor([math.pi])
if torch.cuda.is_available():
PI = PI.cuda()
# N(x | mu, var) = 1/sqrt{2pi var} exp[-1/(2 var) (x-mean)(x-mean)]
# log N(x| mu, var) = -log sqrt(2pi) -0.5 log var - 0.5 (x-mean)(x-mean)/var
def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = -0.5 * (log_var + (x - mean) * (x - mean) * log_var.exp().reciprocal())
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = -(x - mean) * (x - mean)
log_norm *= torch.reciprocal(2. * log_var.exp())
log_norm += -0.5 * log_var
log_norm += -0.5 * torch.log(2. * PI)
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_normal_standard(x, average=False, reduce=True, dim=None):
log_norm = -0.5 * x * x
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_bernoulli(x, mean, average=False, reduce=True, dim=None):
probs = torch.clamp(mean, min=MIN_EPSILON, max=MAX_EPSILON)
log_bern = x * torch.log(probs) + (1. - x) * torch.log(1. - probs)
if reduce:
if average:
return torch.mean(log_bern, dim)
else:
return torch.sum(log_bern, dim)
else:
return log_bern
| 1,768 | 25.80303 | 86 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/utils/load_data.py | from __future__ import print_function
import torch
import torch.utils.data as data_utils
import pickle
from scipy.io import loadmat
import numpy as np
import os
def load_static_mnist(args, **kwargs):
"""
Dataloading function for static mnist. Outputs image data in vectorized form: each image is a vector of size 784
"""
args.dynamic_binarization = False
args.input_type = 'binary'
args.input_size = [1, 28, 28]
# start processing
def lines_to_np_array(lines):
return np.array([[int(i) for i in line.split()] for line in lines])
with open(os.path.join('datasets', 'data', 'binarized_mnist_train.amat')) as f:
lines = f.readlines()
x_train = lines_to_np_array(lines).astype('float32')
with open(os.path.join('datasets', 'data', 'binarized_mnist_valid.amat')) as f:
lines = f.readlines()
x_val = lines_to_np_array(lines).astype('float32')
with open(os.path.join('datasets', 'data', 'binarized_mnist_test.amat')) as f:
lines = f.readlines()
x_test = lines_to_np_array(lines).astype('float32')
# shuffle train data
np.random.shuffle(x_train)
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_freyfaces(args, **kwargs):
# set args
args.input_size = [1, 28, 20]
args.input_type = 'multinomial'
args.dynamic_binarization = False
TRAIN = 1565
VAL = 200
TEST = 200
# start processing
with open('data/Freyfaces/freyfaces.pkl', 'rb') as f:
data = pickle.load(f, encoding="latin1")[0]
data = data / 255.
# NOTE: shuffling is done before splitting into train and test set, so test set is different for every run!
# shuffle data:
np.random.seed(args.freyseed)
np.random.shuffle(data)
# train images
x_train = data[0:TRAIN].reshape(-1, 28 * 20)
# validation images
x_val = data[TRAIN:(TRAIN + VAL)].reshape(-1, 28 * 20)
# test images
x_test = data[(TRAIN + VAL):(TRAIN + VAL + TEST)].reshape(-1, 28 * 20)
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_omniglot(args, **kwargs):
n_validation = 1345
# set args
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = True
# start processing
def reshape_data(data):
return data.reshape((-1, 28, 28)).reshape((-1, 28 * 28), order='F')
omni_raw = loadmat(os.path.join('data', 'OMNIGLOT', 'chardata.mat'))
# train and test data
train_data = reshape_data(omni_raw['data'].T.astype('float32'))
x_test = reshape_data(omni_raw['testdata'].T.astype('float32'))
# shuffle train data
np.random.shuffle(train_data)
# set train and validation data
x_train = train_data[:-n_validation]
x_val = train_data[-n_validation:]
# binarize
if args.dynamic_binarization:
args.input_type = 'binary'
np.random.seed(777)
x_val = np.random.binomial(1, x_val)
x_test = np.random.binomial(1, x_test)
else:
args.input_type = 'gray'
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_caltech101silhouettes(args, **kwargs):
# set args
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = False
# start processing
def reshape_data(data):
return data.reshape((-1, 28, 28)).reshape((-1, 28 * 28), order='F')
caltech_raw = loadmat(os.path.join('data', 'Caltech101Silhouettes', 'caltech101_silhouettes_28_split1.mat'))
# train, validation and test data
x_train = 1. - reshape_data(caltech_raw['train_data'].astype('float32'))
np.random.shuffle(x_train)
x_val = 1. - reshape_data(caltech_raw['val_data'].astype('float32'))
np.random.shuffle(x_val)
x_test = 1. - reshape_data(caltech_raw['test_data'].astype('float32'))
y_train = caltech_raw['train_labels']
y_val = caltech_raw['val_labels']
y_test = caltech_raw['test_labels']
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_dataset(args, **kwargs):
if args.dataset == 'mnist':
train_loader, val_loader, test_loader, args = load_static_mnist(args, **kwargs)
elif args.dataset == 'caltech':
train_loader, val_loader, test_loader, args = load_caltech101silhouettes(args, **kwargs)
elif args.dataset == 'freyfaces':
train_loader, val_loader, test_loader, args = load_freyfaces(args, **kwargs)
elif args.dataset == 'omniglot':
train_loader, val_loader, test_loader, args = load_omniglot(args, **kwargs)
else:
raise Exception('Wrong name of the dataset!')
return train_loader, val_loader, test_loader, args
| 7,580 | 35.800971 | 116 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/spectral_normalization.py | # Code from https://github.com/christiancosgrove/pytorch-spectral-normalization-gan/blob/master/spectral_normalization.py
import torch
from torch import nn
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
def joint_gaussian(n_samp=1000):
x2 = torch.distributions.Normal(0., 4.).sample_n(n_samp)
x1 = torch.distributions.Normal(0., 1.).sample_n(n_samp) + x2**2/4
return torch.cat((x1.unsqueeze(1), x2.unsqueeze(1)), 1)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1, factor=.8):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
self.factor = factor
if not self._made_params():
self._make_params()
#self._update_u_v()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, self.factor*w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
#self._update_u_v()
return self.module.forward(*args)
| 2,536 | 32.381579 | 121 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/made.py | """
Implements Masked AutoEncoder for Density Estimation, by Germain et al. 2015
Re-implementation by Andrej Karpathy based on https://arxiv.org/abs/1502.03509
Modified by Antoine Wehenkel
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
# ------------------------------------------------------------------------------
class MaskedLinear(nn.Linear):
""" same as Linear except has a configurable mask on the weights """
def __init__(self, in_features, out_features, bias=True):
super().__init__(in_features, out_features, bias)
self.register_buffer('mask', torch.ones(out_features, in_features))
def set_mask(self, mask):
self.mask.data.copy_(torch.from_numpy(mask.astype(np.uint8).T))
def forward(self, input):
return F.linear(input, self.mask * self.weight, self.bias)
class MADE(nn.Module):
def __init__(self, nin, hidden_sizes, nout, num_masks=1, natural_ordering=False, random=False, device="cpu"):
"""
nin: integer; number of inputs
hidden sizes: a list of integers; number of units in hidden layers
nout: integer; number of outputs, which usually collectively parameterize some kind of 1D distribution
note: if nout is e.g. 2x larger than nin (perhaps the mean and std), then the first nin
will be all the means and the second nin will be stds. i.e. output dimensions depend on the
same input dimensions in "chunks" and should be carefully decoded downstream appropriately.
the output of running the tests for this file makes this a bit more clear with examples.
num_masks: can be used to train ensemble over orderings/connections
natural_ordering: force natural ordering of dimensions, don't use random permutations
"""
super().__init__()
self.random = random
self.nin = nin
self.nout = nout
self.device = device
self.pi = torch.tensor(math.pi).to(self.device)
self.hidden_sizes = hidden_sizes
assert self.nout % self.nin == 0, "nout must be integer multiple of nin"
# define a simple MLP neural net
self.net = []
hs = [nin] + hidden_sizes + [nout]
for h0,h1 in zip(hs, hs[1:]):
self.net.extend([
MaskedLinear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net = nn.Sequential(*self.net).to(device)
# seeds for orders/connectivities of the model ensemble
self.natural_ordering = natural_ordering
self.num_masks = num_masks
self.seed = 0 # for cycling through num_masks orderings
self.m = {}
self.update_masks() # builds the initial self.m connectivity
# note, we could also precompute the masks and cache them, but this
# could get memory expensive for large number of masks.
def update_masks(self):
if self.m and self.num_masks == 1: return # only a single seed, skip for efficiency
L = len(self.hidden_sizes)
# fetch the next seed and construct a random stream
rng = np.random.RandomState(self.seed)
self.seed = (self.seed + 1) % self.num_masks
# sample the order of the inputs and the connectivity of all neurons
if self.random:
self.m[-1] = np.arange(self.nin) if self.natural_ordering else rng.permutation(self.nin)
for l in range(L):
self.m[l] = rng.randint(self.m[l-1].min(), self.nin-1, size=self.hidden_sizes[l])
else:
self.m[-1] = np.arange(self.nin)
for l in range(L):
self.m[l] = np.array([self.nin - 1 - (i % self.nin) for i in range(self.hidden_sizes[l])])
# construct the mask matrices
masks = [self.m[l-1][:,None] <= self.m[l][None,:] for l in range(L)]
masks.append(self.m[L-1][:,None] < self.m[-1][None,:])
# handle the case where nout = nin * k, for integer k > 1
if self.nout > self.nin:
k = int(self.nout / self.nin)
# replicate the mask across the other outputs
masks[-1] = np.concatenate([masks[-1]]*k, axis=1)
# set the masks in all MaskedLinear layers
layers = [l for l in self.net.modules() if isinstance(l, MaskedLinear)]
for l,m in zip(layers, masks):
l.set_mask(m)
# map between in_d and order
self.i_map = self.m[-1].copy()
for k in range(len(self.m[-1])):
self.i_map[self.m[-1][k]] = k
def forward(self, x, context=None):
if self.nout == 2:
transf = self.net(x)
mu, sigma = transf[:, :self.nin], transf[:, self.nin:]
z = (x - mu) * torch.exp(-sigma)
return z
return self.net(x)
def compute_ll(self, x):
# Jac and x of MADE
transf = self.net(x)
mu, sigma = transf[:, :self.nin], transf[:, self.nin:]
z = (x - mu) * torch.exp(-sigma)
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2).sum(1)
ll = - sigma.sum(1) + log_prob_gauss
return ll, z
def invert(self, z):
if self.nin != self.nout/2:
return None
# We suppose a Gaussian MADE
u = torch.zeros(z.shape)
for d in range(self.nin):
transf = self.forward(u)
mu, sigma = transf[:, self.i_map[d]], transf[:, self.nin + self.i_map[d]]
u[:, self.i_map[d]] = z[:, self.i_map[d]] * torch.exp(sigma) + mu
return u
# ------------------------------------------------------------------------------
class ConditionnalMADE(MADE):
def __init__(self, nin, cond_in, hidden_sizes, nout, num_masks=1, natural_ordering=False, random=False, device="cpu"):
"""
nin: integer; number of inputs
hidden sizes: a list of integers; number of units in hidden layers
nout: integer; number of outputs, which usually collectively parameterize some kind of 1D distribution
note: if nout is e.g. 2x larger than nin (perhaps the mean and std), then the first nin
will be all the means and the second nin will be stds. i.e. output dimensions depend on the
same input dimensions in "chunks" and should be carefully decoded downstream appropriately.
the output of running the tests for this file makes this a bit more clear with examples.
num_masks: can be used to train ensemble over orderings/connections
natural_ordering: force natural ordering of dimensions, don't use random permutations
"""
super().__init__(nin + cond_in, hidden_sizes, nout, num_masks, natural_ordering, random, device)
self.nin_non_cond = nin
self.cond_in = cond_in
def forward(self, x, context):
out = super().forward(torch.cat((context, x), 1))
out = out.contiguous().view(x.shape[0], int(out.shape[1]/self.nin), self.nin)[:, :, self.cond_in:].contiguous().view(x.shape[0], -1)
return out
def computeLL(self, x, context):
# Jac and x of MADE
transf = self.net(torch.cat((context, x), 1))
transf = transf.contiguous().view(x.shape[0], int(transf.shape[1] / self.nin), self.nin)[:, :, self.cond_in:].contiguous().view(x.shape[0], -1)
mu, sigma = transf[:, :self.nin], transf[:, self.nin:]
z = (x - mu) * torch.exp(-sigma)
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2).sum(1)
ll = - sigma.sum(1) + log_prob_gauss
return ll, z
def invert(self, z, context):
if self.nin != self.nout / 2:
return None
# We suppose a Gaussian MADE
u = torch.zeros(z.shape)
for d in range(self.nin):
transf = self.net(torch.cat((context, x), 1))
mu, sigma = transf[:, self.i_map[d]], transf[:, self.nin + self.i_map[d]]
u[:, self.i_map[d]] = z[:, self.i_map[d]] * torch.exp(sigma) + mu
return u
if __name__ == '__main__':
from torch.autograd import Variable
# run a quick and dirty test for the autoregressive property
D = 10
rng = np.random.RandomState(14)
x = (rng.rand(1, D) > 0.5).astype(np.float32)
configs = [
(D, [], D, False), # test various hidden sizes
(D, [200], D, False),
(D, [200, 220], D, False),
(D, [200, 220, 230], D, False),
(D, [200, 220], D, True), # natural ordering test
(D, [200, 220], 2*D, True), # test nout > nin
(D, [200, 220], 3*D, False), # test nout > nin
]
for nin, hiddens, nout, natural_ordering in configs:
print("checking nin %d, hiddens %s, nout %d, natural %s" %
(nin, hiddens, nout, natural_ordering))
model = MADE(nin, hiddens, nout, natural_ordering=natural_ordering)
z = torch.randn(1, nin)
model.invert(z)
continue
# run backpropagation for each dimension to compute what other
# dimensions it depends on.
res = []
for k in range(nout):
xtr = Variable(torch.from_numpy(x), requires_grad=True)
xtrhat = model(xtr)
loss = xtrhat[0,k]
loss.backward()
depends = (xtr.grad[0].numpy() != 0).astype(np.uint8)
depends_ix = list(np.where(depends)[0])
isok = k % nin not in depends_ix
res.append((len(depends_ix), k, depends_ix, isok))
# pretty print the dependencies
res.sort()
for nl, k, ix, isok in res:
print("output %2d depends on inputs: %30s : %s" % (k, ix, "OK" if isok else "NOTOK"))
| 9,945 | 40.26971 | 151 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/ParallelNeuralIntegral.py | import torch
import numpy as np
import math
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def compute_cc_weights(nb_steps):
lam = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
lam = np.cos((lam @ lam.T) * math.pi / nb_steps)
lam[:, 0] = .5
lam[:, -1] = .5 * lam[:, -1]
lam = lam * 2 / nb_steps
W = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
W[np.arange(1, nb_steps + 1, 2)] = 0
W = 2 / (1 - W ** 2)
W[0] = 1
W[np.arange(1, nb_steps + 1, 2)] = 0
cc_weights = torch.tensor(lam.T @ W).float()
steps = torch.tensor(np.cos(np.arange(0, nb_steps + 1, 1).reshape(-1, 1) * math.pi / nb_steps)).float()
return cc_weights, steps
tensor_cache = {}
def integrate(x0, nb_steps, step_sizes, integrand, h, compute_grad=False, x_tot=None):
#Clenshaw-Curtis Quadrature Method
if tensor_cache.get(nb_steps) is None:
cc_weights, steps = compute_cc_weights(nb_steps)
device = x0.get_device() if x0.is_cuda else "cpu"
cc_weights, steps = cc_weights.to(device), steps.to(device)
tensor_cache[nb_steps] = (cc_weights, steps)
cc_weights, steps = tensor_cache[nb_steps]
xT = x0 + nb_steps*step_sizes
if not compute_grad:
x0_t = x0.unsqueeze(1).expand(-1, nb_steps + 1, -1)
xT_t = xT.unsqueeze(1).expand(-1, nb_steps + 1, -1)
h_steps = h.unsqueeze(1).expand(-1, nb_steps + 1, -1)
steps_t = steps.unsqueeze(0).expand(x0_t.shape[0], -1, x0_t.shape[2])
X_steps = x0_t + (xT_t-x0_t)*(steps_t + 1)/2
X_steps = X_steps.contiguous().view(-1, x0_t.shape[2])
h_steps = h_steps.contiguous().view(-1, h.shape[1])
dzs = integrand(X_steps, h_steps)
dzs = dzs.view(xT_t.shape[0], nb_steps+1, -1)
dzs = dzs*cc_weights.unsqueeze(0).expand(dzs.shape)
z_est = dzs.sum(1)
return z_est*(xT - x0)/2
else:
x0_t = x0.unsqueeze(1).expand(-1, nb_steps + 1, -1)
xT_t = xT.unsqueeze(1).expand(-1, nb_steps + 1, -1)
x_tot = x_tot * (xT - x0) / 2
x_tot_steps = x_tot.unsqueeze(1).expand(-1, nb_steps + 1, -1) * cc_weights.unsqueeze(0).expand(x_tot.shape[0], -1, x_tot.shape[1])
h_steps = h.unsqueeze(1).expand(-1, nb_steps + 1, -1)
steps_t = steps.unsqueeze(0).expand(x0_t.shape[0], -1, x0_t.shape[2])
X_steps = x0_t + (xT_t - x0_t) * (steps_t + 1) / 2
X_steps = X_steps.contiguous().view(-1, x0_t.shape[2])
h_steps = h_steps.contiguous().view(-1, h.shape[1])
x_tot_steps = x_tot_steps.contiguous().view(-1, x_tot.shape[1])
g_param, g_h = computeIntegrand(X_steps, h_steps, integrand, x_tot_steps, nb_steps+1)
return g_param, g_h
def computeIntegrand(x, h, integrand, x_tot, nb_steps):
h.requires_grad_(True)
with torch.enable_grad():
f = integrand.forward(x, h)
g_param = _flatten(torch.autograd.grad(f, integrand.parameters(), x_tot, create_graph=True, retain_graph=True))
g_h = _flatten(torch.autograd.grad(f, h, x_tot))
return g_param, g_h.view(int(x.shape[0]/nb_steps), nb_steps, -1).sum(1)
class ParallelNeuralIntegral(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x, integrand, flat_params, h, nb_steps=20):
with torch.no_grad():
x_tot = integrate(x0, nb_steps, (x - x0)/nb_steps, integrand, h, False)
# Save for backward
ctx.integrand = integrand
ctx.nb_steps = nb_steps
ctx.save_for_backward(x0.clone(), x.clone(), h)
return x_tot
@staticmethod
def backward(ctx, grad_output):
x0, x, h = ctx.saved_tensors
integrand = ctx.integrand
nb_steps = ctx.nb_steps
integrand_grad, h_grad = integrate(x0, nb_steps, x/nb_steps, integrand, h, True, grad_output)
x_grad = integrand(x, h)
x0_grad = integrand(x0, h)
# Leibniz formula
return -x0_grad*grad_output, x_grad*grad_output, None, integrand_grad, h_grad.view(h.shape), None
| 4,099 | 38.423077 | 138 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/MonotonicNN.py | import torch
import torch.nn as nn
from .NeuralIntegral import NeuralIntegral
from .ParallelNeuralIntegral import ParallelNeuralIntegral
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
class IntegrandNN(nn.Module):
def __init__(self, in_d, hidden_layers):
super(IntegrandNN, self).__init__()
self.net = []
hs = [in_d] + hidden_layers + [1]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net.append(nn.ELU())
self.net = nn.Sequential(*self.net)
def forward(self, x, h):
return self.net(torch.cat((x, h), 1)) + 1.
class MonotonicNN(nn.Module):
def __init__(self, in_d, hidden_layers, nb_steps=50, dev="cpu"):
super(MonotonicNN, self).__init__()
self.integrand = IntegrandNN(in_d, hidden_layers)
self.net = []
hs = [in_d-1] + hidden_layers + [2]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
# It will output the scaling and offset factors.
self.net = nn.Sequential(*self.net)
self.device = dev
self.nb_steps = nb_steps
'''
The forward procedure takes as input x which is the variable for which the integration must be made, h is just other conditionning variables.
'''
def forward(self, x, h):
x0 = torch.zeros(x.shape).to(self.device)
out = self.net(h)
offset = out[:, [0]]
scaling = torch.exp(out[:, [1]])
return scaling*ParallelNeuralIntegral.apply(x0, x, self.integrand, _flatten(self.integrand.parameters()), h, self.nb_steps) + offset
| 1,957 | 34.6 | 145 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/NeuralIntegral.py | import torch
import numpy as np
import math
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def compute_cc_weights(nb_steps):
lam = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
lam = np.cos((lam @ lam.T) * math.pi / nb_steps)
lam[:, 0] = .5
lam[:, -1] = .5 * lam[:, -1]
lam = lam * 2 / nb_steps
W = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
W[np.arange(1, nb_steps + 1, 2)] = 0
W = 2 / (1 - W ** 2)
W[0] = 1
W[np.arange(1, nb_steps + 1, 2)] = 0
cc_weights = torch.tensor(lam.T @ W).float()
steps = torch.tensor(np.cos(np.arange(0, nb_steps + 1, 1).reshape(-1, 1) * math.pi / nb_steps)).float()
return cc_weights, steps
def integrate(x0, nb_steps, step_sizes, integrand, h, compute_grad=False, x_tot=None):
#Clenshaw-Curtis Quadrature Method
cc_weights, steps = compute_cc_weights(nb_steps)
device = x0.get_device() if x0.is_cuda else "cpu"
cc_weights, steps = cc_weights.to(device), steps.to(device)
if compute_grad:
g_param = 0.
g_h = 0.
else:
z = 0.
xT = x0 + nb_steps*step_sizes
for i in range(nb_steps + 1):
x = (x0 + (xT - x0)*(steps[i] + 1)/2)
if compute_grad:
dg_param, dg_h = computeIntegrand(x, h, integrand, x_tot*(xT - x0)/2)
g_param += cc_weights[i]*dg_param
g_h += cc_weights[i]*dg_h
else:
dz = integrand(x, h)
z = z + cc_weights[i]*dz
if compute_grad:
return g_param, g_h
return z*(xT - x0)/2
def computeIntegrand(x, h, integrand, x_tot):
with torch.enable_grad():
f = integrand.forward(x, h)
g_param = _flatten(torch.autograd.grad(f, integrand.parameters(), x_tot, create_graph=True, retain_graph=True))
g_h = _flatten(torch.autograd.grad(f, h, x_tot))
return g_param, g_h
class NeuralIntegral(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x, integrand, flat_params, h, nb_steps=20):
with torch.no_grad():
x_tot = integrate(x0, nb_steps, (x - x0)/nb_steps, integrand, h, False)
# Save for backward
ctx.integrand = integrand
ctx.nb_steps = nb_steps
ctx.save_for_backward(x0.clone(), x.clone(), h)
return x_tot
@staticmethod
def backward(ctx, grad_output):
x0, x, h = ctx.saved_tensors
integrand = ctx.integrand
nb_steps = ctx.nb_steps
integrand_grad, h_grad = integrate(x0, nb_steps, x/nb_steps, integrand, h, True, grad_output)
x_grad = integrand(x, h)
x0_grad = integrand(x0, h)
# Leibniz formula
return -x0_grad*grad_output, x_grad*grad_output, None, integrand_grad, h_grad.view(h.shape), None
| 2,840 | 31.284091 | 119 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/UMNNMAF.py | import torch
import torch.nn as nn
from .NeuralIntegral import NeuralIntegral
from .ParallelNeuralIntegral import ParallelNeuralIntegral
import numpy as np
import math
from .made import MADE, ConditionnalMADE
class ELUPlus(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, x):
return self.elu(x) + 1.
dict_act_func = {"Sigmoid": nn.Sigmoid(), "ELU": ELUPlus()}
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def compute_lipschitz_linear(W, nb_iter=10):
x = torch.randn(W.shape[1], 1).to(W.device)
for i in range(nb_iter):
x_prev = x
x = W.transpose(0, 1) @ (W @ x_prev)
x = x/torch.norm(x)
lam = (torch.norm(W.transpose(0, 1) @ (W @ x))/torch.norm(x))**.5
return lam
class UMNNMAF(nn.Module):
def __init__(self, net, input_size, nb_steps=100, device="cpu", solver="CC"):
super().__init__()
self.net = net.to(device)
self.device = device
self.input_size = input_size
self.nb_steps = nb_steps
self.cc_weights = None
self.steps = None
self.solver = solver
self.register_buffer("pi", torch.tensor(math.pi))
# Scaling could be changed to be an autoregressive network output
self.scaling = nn.Parameter(torch.zeros(input_size, device=self.device), requires_grad=False)
def to(self, device):
self.device = device
super().to(device)
return self
def forward(self, x, method=None, x0=None, context=None):
x0 = x0.to(x.device) if x0 is not None else torch.zeros(x.shape).to(x.device)
xT = x
h = self.net.make_embeding(xT, context)
z0 = h.view(h.shape[0], -1, x.shape[1])[:, 0, :]
# s is a scaling factor.
s = torch.exp(self.scaling.unsqueeze(0).expand(x.shape[0], -1))
if self.solver == "CC":
z = NeuralIntegral.apply(x0, x, self.net.parallel_nets, _flatten(self.net.parallel_nets.parameters()),
h, self.nb_steps) + z0
elif self.solver == "CCParallel":
z = ParallelNeuralIntegral.apply(x0, x, self.net.parallel_nets, _flatten(self.net.parallel_nets.parameters()),
h, self.nb_steps) + z0
else:
return None
return s*z
def compute_cc_weights(self, nb_steps):
lam = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
lam = np.cos((lam @ lam.T)*math.pi/nb_steps)
lam[:, 0] = .5
lam[:, -1] = .5*lam[:, -1]
lam = lam*2/nb_steps
W = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
W[np.arange(1, nb_steps + 1, 2)] = 0
W = 2/(1 - W**2)
W[0] = 1
W[np.arange(1, nb_steps + 1, 2)] = 0
self.cc_weights = torch.tensor(lam.T @ W).float().to(self.device)
self.steps = torch.tensor(np.cos(np.arange(0, nb_steps+1, 1).reshape(-1, 1) * math.pi/nb_steps)).float().to(self.device)
def compute_log_jac(self, x, context=None):
self.net.make_embeding(x, context)
jac = self.net.forward(x)
return torch.log(jac + 1e-10) + self.scaling.unsqueeze(0).expand(x.shape[0], -1)
def compute_log_jac_bis(self, x, context=None):
z = self.forward(x, context=context)
jac = self.net.forward(x)
return z, torch.log(jac + 1e-10) + self.scaling.unsqueeze(0).expand(x.shape[0], -1)
def compute_ll(self, x, context=None):
z = self.forward(x, context=context)
jac = self.net.forward(x)
z.clamp_(-10., 10.)
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2).sum(1)
ll = log_prob_gauss + torch.log(jac + 1e-10).sum(1) + self.scaling.unsqueeze(0).expand(x.shape[0], -1).sum(1)
return ll, z
def compute_ll_bis(self, x, context=None):
z = self.forward(x, context=context)
jac = self.net.forward(x)
ll = torch.log(jac + 1e-10) + self.scaling.unsqueeze(0).expand(x.shape[0], -1)
z.clamp_(-10., 10.)
return ll, z
def compute_bpp(self, x, alpha=1e-6, context=None):
d = x.shape[1]
ll, z = self.computeLL(x, context=context)
bpp = -ll/(d*np.log(2)) - np.log2(1 - 2*alpha) + 8 \
+ 1/d * (torch.log2(torch.sigmoid(x)) + torch.log2(1 - torch.sigmoid(x))).sum(1)
z.clamp_(-10., 10.)
return bpp, ll, z
def set_steps_nb(self, nb_steps):
self.nb_steps = nb_steps
def compute_lipschitz(self, nb_iter=10):
return self.net.parallel_nets.computeLipshitz(nb_iter)
def force_lipschitz(self, L=1.5):
self.net.parallel_nets.force_lipschitz(L)
# Kind of dichotomy with a factor 100.
def invert(self, z, iter=10, context=None):
nb_step = 10
step = 1/(nb_step - 1)
x_range = (torch.ones(z.shape[0], nb_step) * torch.arange(0, 1 + step/2, step)).permute(1, 0).to(self.device)
z = z.unsqueeze(0).expand(nb_step, -1, -1)
x = z.clone()
x_inv = torch.zeros(z.shape[1], z.shape[2]).to(self.device)
left, right = -50*torch.ones(z.shape[1], z.shape[2]).to(self.device), torch.ones(z.shape[1], z.shape[2])\
.to(self.device)*50
s = torch.exp(self.scaling.unsqueeze(0).unsqueeze(1).expand(x.shape[0], x.shape[1], -1))
with torch.no_grad():
for j in range(self.input_size):
if j % 100 == 0:
print(j)
# Compute embedding and keep only the one related to x_j
h = self.net.make_embeding(x_inv, context)
offset = h.view(x_inv.shape[0], -1, x_inv.shape[1])[:, 0, [j]]
h_idx = torch.arange(j, h.shape[1], z.shape[2]).to(self.device)
h = h[:, h_idx]
h, offset = h.squeeze(1).unsqueeze(0).expand(nb_step, -1, -1), offset.unsqueeze(0).expand(nb_step, -1, -1)
x0 = torch.zeros(offset.shape).view(-1, 1).to(self.device)
derivative = lambda x, h: self.net.parallel_nets.independant_forward(torch.cat((x, h), 1))
for i in range(iter):
x[:, :, j] = x_range * (right[:, j] - left[:, j]) + left[:, j]
# if i == 0:
# print(right[:, j], left[:, j])
z_est = s[:, :, [j]]*(offset + ParallelNeuralIntegral.apply(x0, x[:, :, j].contiguous().view(-1, 1),
derivative, None,
h.contiguous().view(x0.shape[0], -1),
self.nb_steps).contiguous().view(nb_step, -1, 1))
_, z_pos = torch.abs(z_est[:, :, 0] - z[:, :, j]).min(0)
pos_midle = z_pos + torch.arange(0, z.shape[1]).to(self.device)*nb_step
z_val = z_est[:, :, 0].t().contiguous().view(-1)[pos_midle]
x_flat = x[:, :, j].t().contiguous().view(-1)
mask = (z_val < z[0, :, j]).float()
pos_left = pos_midle - 1
pos_right = (pos_midle + 1) % x_flat.shape[0]
left[:, j] = (mask * x_flat[pos_midle] + (1 - mask) * x_flat[pos_left])
right[:, j] = (mask * x_flat[pos_right] + (1 - mask) * x_flat[pos_midle])
x_inv[:, j] = x_flat[pos_midle]
return x_inv
class IntegrandNetwork(nn.Module):
def __init__(self, nnets, nin, hidden_sizes, nout, act_func='ELU', device="cpu"):
super().__init__()
self.nin = nin
self.nnets = nnets
self.nout = nout
self.hidden_sizes = hidden_sizes
self.device = device
# define a simple MLP neural net
self.net = []
hs = [nin] + hidden_sizes + [nout]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.LeakyReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net.append(dict_act_func[act_func])
self.net = nn.Sequential(*self.net)
self.masks = torch.eye(nnets).to(device)
def to(self, device):
self.device = device
self.net.to(device)
self.masks.to(device)
return self
def forward(self, x, h):
x = torch.cat((x, h), 1)
nb_batch, size_x = x.shape
x_he = x.view(nb_batch, -1, self.nnets).transpose(1, 2).contiguous().view(nb_batch*self.nnets, -1)
y = self.net(x_he).view(nb_batch, -1)
return y
def independant_forward(self, x):
return self.net(x)
def compute_lipschitz(self, nb_iter=10):
with torch.no_grad():
L = 1
for layer in self.net.modules():
if isinstance(layer, nn.Linear):
L *= compute_lipschitz_linear(layer.weight, nb_iter)
return L
def force_lipschitz(self, L=1.5):
with torch.no_grad():
for layer in self.net.modules():
if isinstance(layer, nn.Linear):
layer.weight /= max(compute_lipschitz_linear(layer.weight, 10)/L, 1)
class EmbeddingNetwork(nn.Module):
def __init__(self, in_d, hiddens_embedding=[50, 50, 50, 50], hiddens_integrand=[50, 50, 50, 50], out_made=1,
cond_in=0, act_func='ELU', device="cpu"):
super().__init__()
self.m_embeding = None
self.device = device
self.in_d = in_d
if cond_in > 0:
self.made = ConditionnalMADE(in_d, cond_in, hiddens_embedding, (in_d + cond_in) * (out_made), num_masks=1,
natural_ordering=True).to(device)
else:
self.made = MADE(in_d, hiddens_embedding, in_d * (out_made), num_masks=1, natural_ordering=True).to(device)
self.parallel_nets = IntegrandNetwork(in_d, 1 + out_made, hiddens_integrand, 1, act_func=act_func, device=device)
def to(self, device):
self.device = device
self.made.to(device)
self.parallel_nets.to(device)
return self
def make_embeding(self, x_made, context=None):
self.m_embeding = self.made.forward(x_made, context)
return self.m_embeding
def forward(self, x_t):
return self.parallel_nets.forward(x_t, self.m_embeding)
| 10,524 | 38.716981 | 129 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/UMNNMAFFlow.py | import torch
import torch.nn as nn
from .UMNNMAF import EmbeddingNetwork, UMNNMAF
import numpy as np
import math
class ListModule(object):
def __init__(self, module, prefix, *args):
"""
The ListModule class is a container for multiple nn.Module.
:nn.Module module: A module to add in the list
:string prefix:
:list of nn.module args: Other modules to add in the list
"""
self.module = module
self.prefix = prefix
self.num_module = 0
for new_module in args:
self.append(new_module)
def append(self, new_module):
if not isinstance(new_module, nn.Module):
raise ValueError('Not a Module')
else:
self.module.add_module(self.prefix + str(self.num_module), new_module)
self.num_module += 1
def __len__(self):
return self.num_module
def __getitem__(self, i):
if i < 0 or i >= self.num_module:
raise IndexError('Out of bound')
return getattr(self.module, self.prefix + str(i))
class UMNNMAFFlow(nn.Module):
def __init__(self, nb_flow=1, nb_in=1, hidden_derivative=[50, 50, 50, 50], hidden_embedding=[50, 50, 50, 50],
embedding_s=20, nb_steps=50, act_func='ELU', solver="CC", cond_in=0, device="cpu"):
"""
UMNNMAFFlow class is a normalizing flow made of UMNNMAF blocks.
:int nb_flow: The number of components in the flow
:int nb_in: The size of the input dimension (data)
:list(int) hidden_derivative: The size of hidden layers in the integrand networks
:list(int) hidden_embedding: The size of hidden layers in the embedding networks
:int embedding_s: The size of the embedding
:int nb_steps: The number of integration steps (0 for random)
:string solver: The solver (CC or CCParallel)
:int cond_in: The size of the conditionning variable
:string device: The device (cpu or gpu)
"""
super().__init__()
self.device = device
self.register_buffer("pi", torch.tensor(math.pi))
self.nets = ListModule(self, "Flow")
for i in range(nb_flow):
auto_net = EmbeddingNetwork(nb_in, hidden_embedding, hidden_derivative, embedding_s, act_func=act_func,
device=device, cond_in=cond_in).to(device)
model = UMNNMAF(auto_net, nb_in, nb_steps, device, solver=solver).to(device)
self.nets.append(model)
def to(self, device):
for net in self.nets:
net.to(device)
self.device = device
super().to(device)
return self
def forward(self, x, context=None):
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
x = net.forward(x, context=context)[:, inv_idx]
return x[:, inv_idx]
def invert(self, z, iter=10, context=None):
"""
From image to domain.
:param z: A tensor of noise.
:param iter: The number of iteration (accuracy should be around 25/100**iter
:param context: Conditioning variable
:return: Domain value
"""
inv_idx = torch.arange(z.size(1) - 1, -1, -1).long()
z = z[:, inv_idx]
for net_i in range(len(self.nets)-1, -1, -1):
z = self.nets[net_i].invert(z[:, inv_idx], iter, context=context)
return z
def compute_log_jac(self, x, context=None):
log_jac = 0.
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
log_jac += net.compute_log_jac(x, context=context)
x = net.forward(x, context=context)[:, inv_idx]
return log_jac
def compute_log_jac_bis(self, x, context=None):
log_jac = 0.
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
x, l = net.compute_log_jac_bis(x, context=context)
x = x[:, inv_idx]
log_jac += l
return x[:, inv_idx], log_jac
def compute_ll(self, x, context=None):
log_jac = 0.
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
z = net.forward(x, context=context)[:, inv_idx]
log_jac += net.compute_log_jac(x, context=context)
x = z
z = z[:, inv_idx]
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2).sum(1)
ll = log_jac.sum(1) + log_prob_gauss
return ll, z
def compute_ll_bis(self, x, context=None):
log_jac = 0.
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
log_jac += net.compute_log_jac(x, context=context)
x = net.forward(x, context=context)[:, inv_idx]
z = x[:, inv_idx]
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2)
ll = log_jac + log_prob_gauss
return ll, z
def compute_bpp(self, x, alpha=1e-6, context=None):
d = x.shape[1]
ll, z = self.compute_ll(x, context=context)
bpp = -ll / (d * np.log(2)) - np.log2(1 - 2 * alpha) + 8 \
+ 1 / d * (torch.log2(torch.sigmoid(x)) + torch.log2(1 - torch.sigmoid(x))).sum(1)
return bpp, ll, z
def set_steps_nb(self, nb_steps):
for net in self.nets:
net.set_steps_nb(nb_steps)
def compute_lipschitz(self, nb_iter=10):
L = 1.
for net in self.nets:
L *= net.compute_lipschitz(nb_iter)
return L
def force_lipschitz(self, L=1.5):
for net in self.nets:
net.force_lipschitz(L)
| 5,656 | 36.217105 | 115 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/UMNN/datasets/download_datasets.py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 19 15:58:53 2017
@author: Chin-Wei
# some code adapted from https://github.com/yburda/iwae/blob/master/download_mnist.py
LSUN
https://github.com/fyu/lsun
"""
import urllib
import pickle
import os
import struct
import numpy as np
import gzip
import time
import urllib.request
savedir = 'datasets/data'
mnist = True
cifar10 = False
omniglot = False
maf = False
class Progbar(object):
def __init__(self, target, width=30, verbose=1):
'''
@param target: total number of steps expected
'''
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=[]):
'''
@param current: index of current step
@param values: list of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
'''
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if type(self.sum_values[k]) is list:
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * " ")
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write("\n")
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
sys.stdout.write(info + "\n")
def add(self, n, values=[]):
self.update(self.seen_so_far + n, values)
# mnist
def load_mnist_images_np(imgs_filename):
with open(imgs_filename, 'rb') as f:
f.seek(4)
nimages, rows, cols = struct.unpack('>iii', f.read(12))
dim = rows * cols
images = np.fromfile(f, dtype=np.dtype(np.ubyte))
images = (images / 255.0).astype('float32').reshape((nimages, dim))
return images
# cifar10
from six.moves.urllib.request import FancyURLopener
import tarfile
import sys
class ParanoidURLopener(FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
raise Exception('URL fetch failure on {}: {} -- {}'.format(url, errcode, errmsg))
def get_file(fname, origin, untar=False):
datadir_base = os.path.expanduser(os.path.join('~', '.keras'))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, 'datasets')
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
if not os.path.exists(fpath):
print('Downloading data from', origin)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = Progbar(total_size)
else:
progbar.update(count * block_size)
ParanoidURLopener().retrieve(origin, fpath, dl_progress)
progbar = None
if untar:
if not os.path.exists(untar_fpath):
print('Untaring file...')
tfile = tarfile.open(fpath, 'r:gz')
tfile.extractall(path=datadir)
tfile.close()
return untar_fpath
return fpath
def load_batch(fpath, label_key='labels'):
f = open(fpath, 'rb')
if sys.version_info < (3,):
d = pickle.load(f)
else:
d = pickle.load(f, encoding="bytes")
# decode utf8
for k, v in d.items():
del (d[str(k)])
d[str(k)] = v
f.close()
data = d["data"]
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels
def load_cifar10():
dirname = "cifar-10-batches-py"
origin = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
path = get_file(dirname, origin=origin, untar=True)
print(path)
nb_train_samples = 50000
X_train = np.zeros((nb_train_samples, 3, 32, 32), dtype="uint8")
y_train = np.zeros((nb_train_samples,), dtype="uint8")
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
print(fpath)
data, labels = load_batch(fpath)
X_train[(i - 1) * 10000:i * 10000, :, :, :] = data
y_train[(i - 1) * 10000:i * 10000] = labels
fpath = os.path.join(path, 'test_batch')
X_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
return (X_train, y_train), (X_test, y_test)
if __name__ == '__main__':
if not os.path.exists(savedir):
os.makedirs(savedir)
if mnist:
print('dynamically binarized mnist')
mnist_filenames = ['train-images-idx3-ubyte', 't10k-images-idx3-ubyte']
for filename in mnist_filenames:
local_filename = os.path.join(savedir, filename)
urllib.request.urlretrieve("http://yann.lecun.com/exdb/mnist/{}.gz".format(filename),
local_filename + '.gz')
with gzip.open(local_filename + '.gz', 'rb') as f:
file_content = f.read()
with open(local_filename, 'wb') as f:
f.write(file_content)
np.savetxt(local_filename, load_mnist_images_np(local_filename))
os.remove(local_filename + '.gz')
print('statically binarized mnist')
subdatasets = ['train', 'valid', 'test']
for subdataset in subdatasets:
filename = 'binarized_mnist_{}.amat'.format(subdataset)
url = 'http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_{}.amat'.format(
subdataset)
local_filename = os.path.join(savedir, filename)
urllib.request.urlretrieve(url, local_filename)
if cifar10:
(X_train, y_train), (X_test, y_test) = load_cifar10()
pickle.dump((X_train, y_train, X_test, y_test),
open('{}/cifar10.pkl'.format(savedir), 'w'))
if omniglot:
url = 'https://github.com/yburda/iwae/raw/master/datasets/OMNIGLOT/chardata.mat'
filename = 'omniglot.amat'
local_filename = os.path.join(savedir, filename)
urllib.request.urlretrieve(url, local_filename)
if maf:
savedir = 'datasets'
url = 'https://zenodo.org/record/1161203/files/data.tar.gz'
local_filename = os.path.join(savedir, 'data.tar.gz')
urllib.request.urlretrieve(url, local_filename)
tar = tarfile.open(local_filename, "r:gz")
tar.extractall(savedir)
tar.close()
os.remove(local_filename)
| 9,226 | 31.60424 | 119 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/adaptive_stft_utils/operators.py | import torch.autograd
import torch
import torch.nn.functional as F
def dithering_int(n):
if n == int(n):
return int(n)
return int(torch.bernoulli((n - int(n))) + int(n))
class SignPassGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output
class Sign(torch.autograd.Function):
@staticmethod
def forward(ctx, n):
return torch.sign(n)
@staticmethod
def backward(ctx, grad_output):
return grad_output * 1e-3
class InvSign(torch.autograd.Function):
@staticmethod
def forward(ctx, n):
return torch.sign(n)
@staticmethod
def backward(ctx, grad_output):
return -grad_output * 1e-3
def clip_tensor_norm(parameters, max_norm, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
max_norm = float(max_norm)
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].device
total_norm = torch.norm(torch.stack([torch.norm(p.detach(), norm_type).to(
device) for p in parameters]), norm_type).detach()
def clamp(p):
clamped = torch.clamp(p, min=-total_norm * max_norm, max=total_norm * max_norm)
return clamped + 1e-4 * (p - clamped)
return [
clamp(p)
for p in parameters
]
class LimitGradient(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
grad_output = clip_tensor_norm(grad_output, max_norm=1.0, norm_type=2)[0]
return grad_output
| 1,729 | 23.027778 | 87 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/adaptive_stft_utils/losses.py | import torch.autograd
import torch
import torch.nn.functional as F
from .operators import clip_tensor_norm
def kurtosis(rfft_magnitudes_sq):
epsilon = 1e-7
max_norm = 0.1
kur_part = [
torch.sum(torch.pow(a, 2)) /
(torch.pow(torch.sum(a), 2).unsqueeze(-1) + epsilon)
for a in rfft_magnitudes_sq
]
n_wnd = len(rfft_magnitudes_sq)
assert n_wnd > 0
catted = torch.cat(clip_tensor_norm(kur_part, max_norm=max_norm, norm_type=2)) / max_norm
kur = torch.sum(catted) / n_wnd
return kur
| 541 | 24.809524 | 93 | py |
STFTgrad | STFTgrad-main/adaptiveSTFT/adaptive_stft_utils/mappings.py | import math
import sys
import pathlib
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.insert(0, pathlib.Path(__file__).parent.parent.parent.absolute())
from UMNN.models.UMNN import MonotonicNN
# Monotonically increasing mapping
class IdxToWindow(nn.Module):
def __init__(self, signal_len, num_windows=80, baseline_mapping_trick=True):
super(IdxToWindow, self).__init__()
self.signal_len = signal_len
self.num_windows = num_windows
self.baseline_mapping_trick = baseline_mapping_trick
self.slope = nn.Parameter(torch.tensor(
signal_len / num_windows, dtype=torch.float32, requires_grad=True, device="cuda"))
self.overlap_net = nn.Sequential(
nn.Linear(1, 64),
nn.LeakyReLU(),
nn.Linear(64, 64),
nn.LeakyReLU(),
nn.Linear(64, 1),
nn.Sigmoid()
)
self.bias = nn.Parameter(torch.tensor(
1e-2, dtype=torch.float32, requires_grad=True, device="cuda"))
self.model_monotonic = MonotonicNN(
2, [128, 128, 128], nb_steps=768, dev="cuda").cuda()
self.signal_mid_point = nn.Parameter(torch.tensor(
signal_len / 2.0, dtype=torch.float32, requires_grad=True, device="cuda"))
def forward(self, idx):
assert len(idx.shape) == 1
# transform window idx
# scale down by 10
rescale = .1
in_var = (idx * rescale + self.bias).view(idx.size(0), 1)
stem = (self.model_monotonic(in_var, in_var).flatten() / rescale)
# at least advance by 32 sample per window
if self.baseline_mapping_trick:
baseline_mapping = 32 * idx
else:
baseline_mapping = 0
# convert window idx to sample idx
x_i = (stem * self.slope + baseline_mapping) + self.signal_mid_point
perc = self.overlap_net(idx.unsqueeze(-1) / self.signal_len * 2 - 1).flatten()
return (x_i, perc)
def make_find_window_configs(idx_to_window: IdxToWindow, last_sample: int):
"""
Creates a function which scans the mapping function to generate window
positions and overlaps ranging from just before the first sample to
just after the last sample.
"""
prev_cached_i = 0
def find_window_configs():
nonlocal prev_cached_i
# evaluate the window generator until we hit boundary on both sides,
# but keep one extra element on both sides past boundary
eval_cache = {}
def fast_idx_to_window(i):
batch_size = 16
idx = i // batch_size
arr = eval_cache.get(idx)
if arr is not None:
return (arr[0][i - idx * batch_size], arr[1][i - idx * batch_size])
arr = idx_to_window(torch.arange(start=idx * batch_size, end=idx * batch_size + batch_size,
step=1, dtype=torch.float32, device="cuda", requires_grad=False))
eval_cache[idx] = arr
return (arr[0][i - idx * batch_size], arr[1][i - idx * batch_size])
window_configs = []
i = prev_cached_i
w, p = fast_idx_to_window(i)
if w >= last_sample:
path = 0
# move left if too big
while w >= last_sample:
i = i - 1
w, p = fast_idx_to_window(i)
prev_cached_i = i
window_configs.append(fast_idx_to_window(i + 1))
# collect all items between last_sample and 0
while w >= 0:
window_configs.append((w, p))
i = i - 1
w, p = fast_idx_to_window(i)
window_configs.append((w, p))
window_configs.reverse()
elif w <= 0:
path = 1
# move right if too small
while w <= 0:
i = i + 1
w, p = fast_idx_to_window(i)
window_configs.append(fast_idx_to_window(i - 1))
prev_cached_i = i
# collect all items between 0 and last_sample
while w <= last_sample:
window_configs.append((w, p))
i = i + 1
w, p = fast_idx_to_window(i)
window_configs.append((w, p))
else:
path = 2
# w was in range
right_list = []
while w <= last_sample:
right_list.append((w, p))
i = i + 1
w, p = fast_idx_to_window(i)
right_list.append((w, p))
# move left from zero, to cover the starting regions
i = prev_cached_i - 1
w, p = fast_idx_to_window(i)
left_list = []
while w >= 0:
left_list.append((w, p))
i = i - 1
w, p = fast_idx_to_window(i)
left_list.append((w, p))
left_list.reverse()
window_configs = left_list + right_list
# filter out windows that are too small
filt_window_configs = []
prev_window_sample = -math.inf
assert window_configs[0][0] < 0, f"{i} {path} {window_configs}"
assert window_configs[-1][0] > last_sample, f"{i} {path} {window_configs}"
# min window size is 1
for i in range(len(window_configs)):
if i > 0:
if not (window_configs[i][0] > window_configs[i - 1][0]):
if path == 2:
path_desc = f"({(len(left_list), len(right_list))})" # type: ignore
else:
path_desc = None
assert False, f"path: {path} {path_desc}, i: {i}, {window_configs}, {window_configs[i][0]} > {window_configs[i - 1][0]}"
if window_configs[i][0] - prev_window_sample > 1:
filt_window_configs.append(window_configs[i])
prev_window_sample = window_configs[i][0]
assert len(filt_window_configs) > 0, f"{len(window_configs)}, path: {path}, prev_cached_i: {prev_cached_i}, {fast_idx_to_window(prev_cached_i)}, {fast_idx_to_window(prev_cached_i - 1)}, {idx_to_window.slope}"
assert filt_window_configs[0][0] < 0, f"{i} {path} {filt_window_configs}"
assert filt_window_configs[-1][0] > last_sample, f"{i} {path} {filt_window_configs}"
assert len(filt_window_configs) > 1, f"{filt_window_configs}, slope: {idx_to_window.slope.item()}"
return filt_window_configs
return find_window_configs
| 6,507 | 40.452229 | 216 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/test_tasnet.py | import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from model.model import TSENet,TSENet_one_hot
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_path, s1_path, ref_path, inf_path, yaml_path, model, gpuid):
super(Separation, self).__init__()
self.mix = handle_scp(mix_path)
self.s1 = handle_scp(s1_path)
self.ref = handle_scp(ref_path)
self.clss, self.onsets, self.offsets = handle_scp_inf(inf_path)
self.key = list(self.mix.keys())
opt = parse(yaml_path)
net = TSENet(N=opt['TSENet']['N'],
B=opt['TSENet']['B'],
H=opt['TSENet']['H'],
P=opt['TSENet']['P'],
X=opt['TSENet']['X'],
R=opt['TSENet']['R'],
norm=opt['TSENet']['norm'],
num_spks=opt['TSENet']['num_spks'],
causal=opt['TSENet']['causal'],
cls_num=opt['TSENet']['class_num'],
nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
nFFT=opt['datasets']['audio_setting']['nFFT'],
fusion=opt['TSENet']['fusion'],
usingEmb=opt['TSENet']['usingEmb'],
usingTsd=opt['TSENet']['usingTsd'],
CNN10_settings=opt['TSENet']['CNN10_settings'],
fixCNN10=opt['TSENet']['fixCNN10'],
fixTSDNet=opt['TSENet']['fixTSDNet'],
pretrainedCNN10=opt['TSENet']['pretrainedCNN10'],
pretrainedTSDNet=opt['TSENet']['pretrainedTSDNet'],
threshold=opt['TSENet']['threshold'])
dicts = torch.load(model, map_location='cpu')
net.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.net=net.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.gpuid=tuple(gpuid)
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.cls_num = opt['TSENet']['class_num']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
def inference(self, file_path, max_num=8000):
with torch.no_grad():
for i in range(min(len(self.key), max_num)):
index = self.key[i]
s1_index = index.replace('.wav', '_lab.wav')
ref_index = index.replace('.wav', '_re.wav')
mix = read_wav(self.mix[index])
ref = read_wav(self.ref[ref_index])
s1 = read_wav(self.s1[s1_index])
cls = torch.zeros(self.cls_num)
cls[self.clss[index]] = 1.
cls_index = cls.argmax(0)
cls_index = cls_index.to(self.device)
onset = self.onsets[index]
offset = self.offsets[index]
max_frame = self.sr * self.audio_length // self.nFrameShift - 2
onset_frame = round(onset * (self.sr // self.nFrameShift - 1)) if round(
onset * (self.sr // self.nFrameShift - 1)) >= 0 else 0
offset_frame = round(offset * (self.sr // self.nFrameShift - 1)) if round(
offset * (self.sr // self.nFrameShift - 1)) < max_frame else max_frame
framelab = torch.zeros(max_frame + 1)
for i in range(onset_frame, offset_frame + 1):
framelab[i] = 1.
framelab = framelab[None,:]
self.logger.info("Compute on utterance {}...".format(index))
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = s1.to(self.device)
framelab = framelab.to(self.device)
if mix.dim() == 1:
mix = torch.unsqueeze(mix, 0)
if ref.dim() == 1:
ref = torch.unsqueeze(ref, 0)
if s1.dim() == 1:
s1 = torch.unsqueeze(s1, 0)
#out, lps, lab, est_cls
#ests, lps, lab, est_cls = self.net(mix, ref,cls_index.long(), s1)
ests, lps, lab, est_cls = self.net(mix, ref, s1)
spks=[torch.squeeze(s.detach().cpu()) for s in ests]
a = 0
for s in spks:
s = s[:mix.shape[1]]
s = s.unsqueeze(0)
a += 1
os.makedirs(file_path+'/sound'+str(a), exist_ok=True)
filename=file_path+'/sound'+str(a)+'/'+index
write_wav(filename, s, 16000)
self.logger.info("Compute over {:d} utterances".format(len(self.mix)))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-mix_scp', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_mix.scp', help='Path to mix scp file.')
parser.add_argument(
'-s1_scp', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_s1.scp', help='Path to s1 scp file.')
parser.add_argument(
'-ref_scp', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_re.scp', help='Path to ref scp file.')
parser.add_argument(
'-inf_scp', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_inf.scp', help='Path to inf file.')
parser.add_argument(
'-yaml', type=str, default='./config/TSENet/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSE_exp/checkpoint_fsd2018_audio/TSENet_loss_one_hot_loss_7/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=20, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSENet/result/TSENet/baseline', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation(args.mix_scp, args.s1_scp, args.ref_scp, args.inf_scp, args.yaml, args.model, gpuid)
separation.inference(args.save_path, args.max_num)
if __name__ == "__main__":
main()
| 7,559 | 48.090909 | 176 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/train_Tasnet.py | import sys
sys.path.append('./')
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset import Datasets
from model.model import TSENet,TSENet_one_hot
from logger import set_logger
import logging
from config import option
import argparse
import torch
from trainer import trainer_Tasnet,trainer_Tasnet_one_hot
import torch.optim.lr_scheduler as lr_scheduler
def make_dataloader(opt):
# make training dataloader
train_dataset = Datasets(
opt['datasets']['train']['dataroot_mix'],
opt['datasets']['train']['dataroot_targets'][0], # s1
opt['datasets']['train']['dataroot_targets'][1], # ref
opt['datasets']['train']['dataroot_targets'][2], # time information
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'],
opt['datasets']['audio_setting']['nFrameShift'])
train_dataloader = Loader(train_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make validation dataloader
val_dataset = Datasets(
opt['datasets']['val']['dataroot_mix'],
opt['datasets']['val']['dataroot_targets'][0],
opt['datasets']['val']['dataroot_targets'][1],
opt['datasets']['val']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'],
opt['datasets']['audio_setting']['nFrameShift'])
val_dataloader = Loader(val_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make test dataloader
test_dataset = Datasets(
opt['datasets']['test']['dataroot_mix'],
opt['datasets']['test']['dataroot_targets'][0],
opt['datasets']['test']['dataroot_targets'][1],
opt['datasets']['test']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'],
opt['datasets']['audio_setting']['nFrameShift'])
test_dataloader = Loader(test_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
return train_dataloader, val_dataloader, test_dataloader
def make_optimizer(params, opt):
optimizer = getattr(torch.optim, opt['optim']['name'])
if opt['optim']['name'] == 'Adam':
optimizer = optimizer(
params, lr=opt['optim']['lr'], weight_decay=opt['optim']['weight_decay'])
else:
optimizer = optimizer(params, lr=opt['optim']['lr'], weight_decay=opt['optim']
['weight_decay'], momentum=opt['optim']['momentum'])
return optimizer
def train():
parser = argparse.ArgumentParser(description='Parameters for training TSENet')
parser.add_argument('--opt', type=str, help='Path to option YAML file.')
args = parser.parse_args()
opt = option.parse(args.opt)
set_logger.setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
logger = logging.getLogger(opt['logger']['name'])
# build model
logger.info("Building the model of TSENet")
logger.info(opt['logger']['experimental_description'])
if opt['one_hot'] == 1:
net = TSENet_one_hot(N=opt['TSENet']['N'],
B=opt['TSENet']['B'],
H=opt['TSENet']['H'],
P=opt['TSENet']['P'],
X=opt['TSENet']['X'],
R=opt['TSENet']['R'],
norm=opt['TSENet']['norm'],
num_spks=opt['TSENet']['num_spks'],
causal=opt['TSENet']['causal'],
cls_num=opt['TSENet']['class_num'],
nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
nFFT=opt['datasets']['audio_setting']['nFFT'],
fusion=opt['TSENet']['fusion'],
usingEmb=opt['TSENet']['usingEmb'],
usingTsd=opt['TSENet']['usingTsd'],
CNN10_settings=opt['TSENet']['CNN10_settings'],
fixCNN10=opt['TSENet']['fixCNN10'],
fixTSDNet=opt['TSENet']['fixTSDNet'],
pretrainedCNN10=opt['TSENet']['pretrainedCNN10'],
pretrainedTSDNet=opt['TSENet']['pretrainedTSDNet'],
threshold=opt['TSENet']['threshold'])
else:
net = TSENet(N=opt['TSENet']['N'],
B=opt['TSENet']['B'],
H=opt['TSENet']['H'],
P=opt['TSENet']['P'],
X=opt['TSENet']['X'],
R=opt['TSENet']['R'],
norm=opt['TSENet']['norm'],
num_spks=opt['TSENet']['num_spks'],
causal=opt['TSENet']['causal'],
cls_num=opt['TSENet']['class_num'],
nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
nFFT=opt['datasets']['audio_setting']['nFFT'],
fusion=opt['TSENet']['fusion'],
usingEmb=opt['TSENet']['usingEmb'],
usingTsd=opt['TSENet']['usingTsd'],
CNN10_settings=opt['TSENet']['CNN10_settings'],
fixCNN10=opt['TSENet']['fixCNN10'],
fixTSDNet=opt['TSENet']['fixTSDNet'],
pretrainedCNN10=opt['TSENet']['pretrainedCNN10'],
pretrainedTSDNet=opt['TSENet']['pretrainedTSDNet'],
threshold=opt['TSENet']['threshold'])
# build optimizer
logger.info("Building the optimizer of TSENet")
optimizer = make_optimizer(net.parameters(), opt)
# build dataloader
logger.info('Building the dataloader of TSENet')
train_dataloader, val_dataloader, test_dataloader = make_dataloader(opt)
logger.info('Train Datasets Length: {}, Val Datasets Length: {}, Test Datasets Length: {}'.format(
len(train_dataloader), len(val_dataloader), len(test_dataloader)))
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, opt['train']['epoch'])
# build trainer
logger.info('Building the Trainer of TSENet')
if opt['one_hot'] == 1:
trainer = trainer_Tasnet_one_hot.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
else:
trainer = trainer_Tasnet.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
trainer.run()
#trainer.only_test()
if __name__ == "__main__":
train()
| 7,435 | 48.245033 | 131 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/create_scp_debug.py | import os
train_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tr_mix.scp'
train_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tr_s1.scp'
train_s2_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tr_s2.scp'
train_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tr_re.scp'
test_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tt_mix.scp'
test_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tt_s1.scp'
test_s2_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tt_s2.scp'
test_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tt_re.scp'
val_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/val_mix.scp'
val_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/val_s1.scp'
val_s2_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/val_s2.scp'
val_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/val_re.scp'
test_offset_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_mix.scp'
test_offset_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_s1.scp'
test_offset_s2_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_s2.scp'
test_offset_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_re.scp'
train_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/train'
test_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/test'
vl_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/val'
test_offset_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/test_offset'
tr_mix = open(train_mix_scp,'w')
tr_s1 = open(train_s1_scp,'w')
tr_s2 = open(train_s2_scp,'w')
tr_re = open(train_re_scp,'w')
num1 = 0
num2 = 0
num3 = 0
num4 = 0
for root, dirs, files in os.walk(train_mix):
files.sort()
for file in files:
if 'a.wav' in file and num1 < 30:
tr_s1.write(file+" "+root+'/'+file)
tr_s1.write('\n')
num1 += 1
tr_re.write(file + " " + root + '/' + file)
tr_re.write('\n')
num2 += 1
elif 'b.wav' in file and num3 < 30:
tr_s2.write(file + " " + root + '/' + file)
tr_s2.write('\n')
num3 += 1
elif num4 < 30:
tr_mix.write(file + " " + root + '/' + file)
tr_mix.write('\n')
num4 += 1
tr_mix.close()
tr_s1.close()
tr_s2.close()
tr_re.close()
tt_mix = open(test_mix_scp,'w')
tt_s1 = open(test_s1_scp,'w')
tt_s2 = open(test_s2_scp,'w')
tt_re = open(test_re_scp,'w')
num1 = 0
num2 = 0
num3 = 0
num4 = 0
for root, dirs, files in os.walk(test_mix):
files.sort()
for file in files:
if 'a.wav' in file and num1 < 30:
tt_s1.write(file+" "+root+'/'+file)
tt_s1.write('\n')
num1 += 1
tt_re.write(file + " " + root + '/' + file)
tt_re.write('\n')
num2 += 1
elif 'b.wav' in file and num3 < 30:
tt_s2.write(file + " " + root + '/' + file)
tt_s2.write('\n')
num3 += 1
elif num4 < 30:
tt_mix.write(file + " " + root + '/' + file)
tt_mix.write('\n')
num4 += 1
tt_mix.close()
tt_s1.close()
tt_s2.close()
tt_re.close()
val_mix = open(val_mix_scp,'w')
val_s1 = open(val_s1_scp,'w')
val_s2 = open(val_s2_scp,'w')
val_re = open(val_re_scp,'w')
num1 = 0
num2 = 0
num3 = 0
num4 = 0
for root, dirs, files in os.walk(vl_mix):
files.sort()
for file in files:
if 'a.wav' in file and num1 < 30:
val_s1.write(file+" "+root+'/'+file)
val_s1.write('\n')
num4 += 1
val_re.write(file + " " + root + '/' + file)
val_re.write('\n')
num2 += 1
elif 'b.wav' in file and num3 < 30:
val_s2.write(file + " " + root + '/' + file)
val_s2.write('\n')
num3 += 1
elif num4 < 30:
val_mix.write(file + " " + root + '/' + file)
val_mix.write('\n')
num4 += 1
val_mix.close()
val_s1.close()
val_s2.close()
val_re.close()
tto_mix = open(test_offset_mix_scp,'w')
tto_s1 = open(test_offset_s1_scp,'w')
tto_s2 = open(test_offset_s2_scp,'w')
tto_re = open(test_offset_re_scp,'w')
num1 = 0
num2 = 0
num3 = 0
num4 = 0
for root, dirs, files in os.walk(test_offset_mix):
files.sort()
for file in files:
if 'a.wav' in file and num1 < 30:
tto_s1.write(file+" "+root+'/'+file)
tto_s1.write('\n')
num1 += 1
elif 'b.wav' in file and num2 < 30:
tto_s2.write(file + " " + root + '/' + file)
tto_s2.write('\n')
num2 += 1
elif 're.wav' in file and num3 < 30:
tto_re.write(file + " " + root + '/' + file)
tto_re.write('\n')
num3 += 1
elif num4 < 30:
tto_mix.write(file + " " + root + '/' + file)
tto_mix.write('\n')
num4 += 1
tto_mix.close()
tto_s1.close()
tto_s2.close()
tto_re.close()
| 5,313 | 33.506494 | 102 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/draw.py | import torchaudio
import matplotlib
import matplotlib.pyplot as plt
[width, height] = matplotlib.rcParams['figure.figsize']
if width < 10:
matplotlib.rcParams['figure.figsize'] = [width * 2.5, height]
if __name__ == "__main__":
# filename = "/apdcephfs/private_helinwang/tsss/tsss_mixed/train/train_1.wav"
filename = "/apdcephfs/private_helinwang/tsss/tsss_mixed/test_offset/test_offset_10_re.wav"
waveform, sample_rate = torchaudio.load(filename)
print("Shape of waveform: {}".format(waveform.size()))
print("Sample rate of waveform: {}".format(sample_rate))
plt.figure()
plt.plot(waveform.t().numpy())
# plt.title('test_offset_100_mix')
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.savefig('test_offset_10_re.png')
| 775 | 32.73913 | 95 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/trainer/trainer_Tasnet.py | import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from model.loss import get_loss
import torch
import os
import matplotlib.pyplot as plt
import numpy as np
import math
def time_to_frame(tm,st=True):
radio = 10.0/624
if st:
n_fame = tm//radio
else:
n_fame = math.ceil(tm/radio)
if n_fame >= 624:
n_fame = 623
if n_fame < 0:
n_fame = 0
return n_fame
def get_mask(onset,offset):
out_mask = np.zeros((onset.shape[0],257,624))
for i in range(onset.shape[0]):
st_frame = time_to_frame(onset[i])
ed_frame = time_to_frame(offset[i])
st_frame = st_frame.numpy()
ed_frame = ed_frame.numpy()
# print('st_t,ed_t ',onset[i],offset[i])
# print('st_frame,ed_frame',st_frame,ed_frame)
# assert 1==2
out_mask[i,:,int(st_frame):int(ed_frame)+1] = 1
out_mask = torch.from_numpy(out_mask)
return out_mask
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, TSENet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path'] # training path
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift'] # hop_length?
self.audio_length = opt['datasets']['audio_setting']['audio_length'] # 10
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.weighting_ratio = opt['train']['weighting_ratio'] # 0.3
self.metric_ratio = opt['train']['metirc_ratio'] # 0.5
self.loss_type = opt['train']['loss'] # 15?
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device('cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.net = TSENet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.net)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.net = TSENet.to(self.device)
self.logger.info('Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.net)))
if opt['resume']['state']: # load pre-train?
print(opt['resume']['path'])
# assert 1==2
ckp = torch.load(opt['resume']['path']+'/'+'best.pt', map_location='cpu')
print(ckp.keys())
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.net = TSENet.to(self.device)
self.net.load_state_dict(ckp['model_state_dict'])
self.optimizer = optimizer
self.optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.net = TSENet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.train()
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_loss_spec_all = 0.0
total_loss_mse_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_loss_spec_w = 0.0
total_loss_mse_w = 0.0
total_sisnrI_w = 0.0
total_loss_cls = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, framelab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
self.optimizer.zero_grad()
out, lps, lab, est_cls = self.net(mix, ref, s1[0])
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_loss_mse_all += loss_mse_all.item()
total_loss_spec_all += loss_spec_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_loss_mse_w += loss_mse_w.item()
total_loss_spec_w += loss_spec_w.item()
total_sisnrI_w += sisnrI_w.item()
total_loss_cls += loss_cls.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.net.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, loss_mse:{:.6f}, loss_spec:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, loss_mse_w:{:.6f}, loss_spec_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'loss_cls:{:.6f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_sisnr_all / num_index,
total_loss_mse_all / num_index,
total_loss_spec_all / num_index,
total_sisnrI_all / num_index,
total_loss_sisnr_w / num_index,
total_loss_mse_w / num_index,
total_loss_spec_w / num_index,
total_sisnrI_w / num_index,
total_loss_cls / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_loss_mse_all = total_loss_mse_all / num_index
total_loss_spec_all = total_loss_spec_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_loss_mse_w = total_loss_mse_w / num_index
total_loss_spec_w = total_loss_spec_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
total_loss_cls = total_loss_cls / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.6e}, loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, loss_mse:{:.6f}, loss_spec:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, loss_mse_w:{:.6f}, loss_spec_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'loss_cls:{:.6f}, Total time:{:.6f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_loss_mse_all,
total_loss_spec_all, total_sisnrI_all, total_loss_sisnr_w, total_loss_mse_w,
total_loss_spec_w, total_sisnrI_w, total_loss_cls, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def validation(self, epoch):
self.logger.info(
'Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.eval()
num_index = 1
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_sisnrI_w = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, framelab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
out, lps, lab, est_cls = self.net(mix, ref, s1[0])
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_sisnrI_w += sisnrI_w.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, ' \
'loss_sisnr:{:.3f}, sisnrI:{:.3f}, loss_sisnr_w:{:.3f}, sisnrI_w:{:.3f}' \
', Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_sisnrI_all,
total_loss_sisnr_w, total_sisnrI_w, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.eval()
num_index = 1
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_sisnrI_w = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, framelab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
out_mask = get_mask(onset, offset)
out_mask = out_mask.to(self.device).float()
onset = onset.to(self.device)
offset = offset.to(self.device)
# print('onset ',onset)
# print('offset ',offset)
out, lps, lab, est_cls = self.net(mix, ref, s1[0],out_mask) # add true mask
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_sisnrI_w += sisnrI_w.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, ' \
'loss_sisnr:{:.3f}, sisnrI:{:.3f}, loss_sisnr_w:{:.3f}, sisnrI_w:{:.3f}, ' \
'Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_sisnrI_all,
total_loss_sisnr_w, total_sisnrI_w, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def run(self):
train_loss = []
val_loss = []
test_loss = []
train_sisnrI = [] # sisnrI ?
val_sisnrI = []
test_sisnrI = []
train_sisnr = []
val_sisnr = []
test_sisnr = []
train_sisnrI_w = []
val_sisnrI_w= []
test_sisnrI_w = []
train_sisnr_w = []
val_sisnr_w = []
test_sisnr_w = []
train_metric = []
val_metric = []
test_metric = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,v_sisnr,v_sisnr_w,v_sisnrI,v_sisnrI_w= self.validation(self.cur_epoch)
best_loss = v_loss
best_sisnrI = v_sisnrI
best_sisnr = v_sisnr
best_sisnrI_w = v_sisnrI_w
best_sisnr_w = v_sisnr_w
best_metric = self.metric_ratio * best_sisnrI_w + (1. - self.metric_ratio) * best_sisnrI
self.logger.info("Starting epoch from {:d}, metric = {:.4f}, loss = {:.4f}, sisnrI = {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}".format(self.cur_epoch, best_metric, best_loss, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss, t_sisnr, t_sisnr_w, t_sisnrI, t_sisnrI_w = self.train(self.cur_epoch)
v_loss, v_sisnr, v_sisnr_w, v_sisnrI, v_sisnrI_w = self.validation(self.cur_epoch)
tt_loss, tt_sisnr, tt_sisnr_w, tt_sisnrI, tt_sisnrI_w = self.test(self.cur_epoch)
t_metric = self.metric_ratio * t_sisnrI_w + (1. - self.metric_ratio) * t_sisnrI
v_metric = self.metric_ratio * v_sisnrI_w + (1. - self.metric_ratio) * v_sisnrI
tt_metric = self.metric_ratio * tt_sisnrI_w + (1. - self.metric_ratio) * tt_sisnrI
train_metric.append(t_metric)
val_metric.append(v_metric)
test_metric.append(tt_metric)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
train_sisnrI.append(t_sisnrI)
val_sisnrI.append(v_sisnrI)
test_sisnrI.append(tt_sisnrI)
train_sisnr.append(t_sisnr)
val_sisnr.append(v_sisnr)
test_sisnr.append(tt_sisnr)
train_sisnrI_w.append(t_sisnrI_w)
val_sisnrI_w.append(v_sisnrI_w)
test_sisnrI_w.append(tt_sisnrI_w)
train_sisnr_w.append(t_sisnr_w)
val_sisnr_w.append(v_sisnr_w)
test_sisnr_w.append(tt_sisnr_w)
# schedule here
self.scheduler.step()
if v_metric <= best_metric:
no_improve += 1
self.logger.info(
'No improvement, Best metric: {:.4f}, sisnrI = {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}'.format(best_metric, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
else:
best_loss = v_loss
best_metric = v_metric
best_sisnrI = v_sisnrI
best_sisnr = v_sisnr
best_sisnrI_w = v_sisnrI_w
best_sisnr_w = v_sisnr_w
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Metric Change: {:.4f}, sisnrI: {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}'.format(
self.cur_epoch, best_metric, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
self.logger.info('Epoch: {:d}, Best Metirc Test: {:.4f}, sisnrI: {:.4f}, sisnr = {:.4f}, sisnrI_w: {:.4f}, sisnr_w = {:.4f}'.format(
self.cur_epoch, tt_metric, tt_sisnrI, tt_sisnr, tt_sisnrI_w, tt_sisnr_w))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
plt.plot(x, train_sisnrI, 'b-', label=u'train_sisnrI', linewidth=0.8)
plt.plot(x, val_sisnrI, 'c-', label=u'val_sisnrI', linewidth=0.8)
plt.plot(x, test_sisnrI, 'g', label=u'test_sisnrI', linewidth=0.8)
plt.legend()
plt.ylabel('sisnrI')
plt.xlabel('epoch')
plt.savefig('sisnrI.png')
plt.plot(x, train_sisnr, 'b-', label=u'train_sisnr', linewidth=0.8)
plt.plot(x, val_sisnr, 'c-', label=u'val_sisnr', linewidth=0.8)
plt.plot(x, test_sisnr, 'g', label=u'test_sisnr', linewidth=0.8)
plt.legend()
plt.ylabel('sisnr')
plt.xlabel('epoch')
plt.savefig('sisnr.png')
plt.plot(x, train_sisnrI_w, 'b-', label=u'train_sisnrI_w', linewidth=0.8)
plt.plot(x, val_sisnrI_w, 'c-', label=u'val_sisnrI_w', linewidth=0.8)
plt.plot(x, test_sisnrI_w, 'g', label=u'test_sisnrI_w', linewidth=0.8)
plt.legend()
plt.ylabel('sisnrI_w')
plt.xlabel('epoch')
plt.savefig('sisnrI_w.png')
plt.plot(x, train_sisnr_w, 'b-', label=u'train_sisnr_w', linewidth=0.8)
plt.plot(x, val_sisnr_w, 'c-', label=u'val_sisnr_w', linewidth=0.8)
plt.plot(x, test_sisnr_w, 'g', label=u'test_sisnr_w', linewidth=0.8)
plt.legend()
plt.ylabel('sisnr_w')
plt.xlabel('epoch')
plt.savefig('sisnr_w.png')
plt.plot(x, train_metric, 'b-', label=u'train_metric', linewidth=0.8)
plt.plot(x, val_metric, 'c-', label=u'val_metric', linewidth=0.8)
plt.plot(x, test_metric, 'g', label=u'test_metric', linewidth=0.8)
plt.legend()
plt.ylabel('metric')
plt.xlabel('epoch')
plt.savefig('metric.png')
def only_test(self):
tt_loss, tt_sisnr, tt_sisnr_w, tt_sisnrI, tt_sisnrI_w = self.validation(0)
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.net.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 21,031 | 46.58371 | 259 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/trainer/trainer_Tasnet_one_hot.py | import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from model.loss import get_loss,get_loss_one_hot
import torch
import os
import matplotlib.pyplot as plt
import numpy as np
def time_to_frame(tm,st=True):
radio = 10.0/624
if st:
n_fame = tm//radio
else:
n_fame = math.ceil(tm/radio)
if n_fame >= 624:
n_fame = 623
if n_fame < 0:
n_fame = 0
return n_fame
def get_mask(onset,offset):
out_mask = np.zeros((onset.shape[0],257,624))
for i in range(onset.shape[0]):
st_frame = time_to_frame(onset[i])
ed_frame = time_to_frame(offset[i])
st_frame = st_frame.numpy()
ed_frame = ed_frame.numpy()
# print('st_t,ed_t ',onset[i],offset[i])
# print('st_frame,ed_frame',st_frame,ed_frame)
# assert 1==2
out_mask[i,:,int(st_frame):int(ed_frame)+1] = 1
out_mask = torch.from_numpy(out_mask)
return out_mask
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, TSENet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path'] # training path
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift'] # hop_length?
self.audio_length = opt['datasets']['audio_setting']['audio_length'] # 10
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.weighting_ratio = opt['train']['weighting_ratio'] # 0.3
self.metric_ratio = opt['train']['metirc_ratio'] # 0.5
self.loss_type = opt['train']['loss'] # 15?
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.net = TSENet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.net)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.net = TSENet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.net)))
if opt['resume']['state']: # load pre-train?
ckp = torch.load(opt['resume']['path']+'/'+'best.pt', map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.net = TSENet.to(self.device)
self.net.load_state_dict(ckp['model_state_dict'])
self.optimizer = optimizer
self.optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.net = TSENet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info(
'Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.train()
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_loss_spec_all = 0.0
total_loss_mse_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_loss_spec_w = 0.0
total_loss_mse_w = 0.0
total_sisnrI_w = 0.0
total_loss_cls = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, framelab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
self.optimizer.zero_grad()
out, lps, lab, est_cls = self.net(mix, ref, cls_index.long(), s1[0])
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss_one_hot(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_loss_mse_all += loss_mse_all.item()
total_loss_spec_all += loss_spec_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_loss_mse_w += loss_mse_w.item()
total_loss_spec_w += loss_spec_w.item()
total_sisnrI_w += sisnrI_w.item()
total_loss_cls += loss_cls.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.net.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, loss_mse:{:.6f}, loss_spec:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, loss_mse_w:{:.6f}, loss_spec_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'loss_cls:{:.6f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_sisnr_all / num_index,
total_loss_mse_all / num_index,
total_loss_spec_all / num_index,
total_sisnrI_all / num_index,
total_loss_sisnr_w / num_index,
total_loss_mse_w / num_index,
total_loss_spec_w / num_index,
total_sisnrI_w / num_index,
total_loss_cls / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_loss_mse_all = total_loss_mse_all / num_index
total_loss_spec_all = total_loss_spec_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_loss_mse_w = total_loss_mse_w / num_index
total_loss_spec_w = total_loss_spec_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
total_loss_cls = total_loss_cls / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, loss_mse:{:.6f}, loss_spec:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, loss_mse_w:{:.6f}, loss_spec_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'loss_cls:{:.6f}, Total time:{:.6f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_loss_mse_all,
total_loss_spec_all, total_sisnrI_all, total_loss_sisnr_w, total_loss_mse_w,
total_loss_spec_w, total_sisnrI_w, total_loss_cls, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def validation(self, epoch):
self.logger.info(
'Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.eval()
num_index = 1
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_sisnrI_w = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, framelab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
out, lps, lab, est_cls = self.net(mix, ref, cls_index.long(), s1[0])
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss_one_hot(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_sisnrI_w += sisnrI_w.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, sisnrI_w:{:.6f}' \
', Total time:{:.6f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_sisnrI_all,
total_loss_sisnr_w, total_sisnrI_w, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.eval()
num_index = 1
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_sisnrI_w = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, framelab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
out_mask = get_mask(onset, offset)
out_mask = out_mask.to(self.device).float()
onset = onset.to(self.device)
offset = offset.to(self.device)
framelab = framelab.to(self.device)
out, lps, lab, est_cls = self.net(mix, ref, cls_index.long(), s1[0], out_mask)
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss_one_hot(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_sisnrI_w += sisnrI_w.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'Total time:{:.6f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_sisnrI_all,
total_loss_sisnr_w, total_sisnrI_w, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def only_test(self):
tt_loss, tt_sisnr, tt_sisnr_w, tt_sisnrI, tt_sisnrI_w = self.validation(0)
def run(self):
train_loss = []
val_loss = []
test_loss = []
train_sisnrI = [] # sisnrI ?
val_sisnrI = []
test_sisnrI = []
train_sisnr = []
val_sisnr = []
test_sisnr = []
train_sisnrI_w = []
val_sisnrI_w= []
test_sisnrI_w = []
train_sisnr_w = []
val_sisnr_w = []
test_sisnr_w = []
train_metric = []
val_metric = []
test_metric = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,v_sisnr,v_sisnr_w,v_sisnrI,v_sisnrI_w= self.validation(self.cur_epoch)
best_loss = v_loss
best_sisnrI = v_sisnrI
best_sisnr = v_sisnr
best_sisnrI_w = v_sisnrI_w
best_sisnr_w = v_sisnr_w
best_metric = self.metric_ratio * best_sisnrI_w + (1. - self.metric_ratio) * best_sisnrI
self.logger.info("Starting epoch from {:d}, metric = {:.4f}, loss = {:.4f}, sisnrI = {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}".format(self.cur_epoch, best_metric, best_loss, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss, t_sisnr, t_sisnr_w, t_sisnrI, t_sisnrI_w = self.train(self.cur_epoch)
v_loss, v_sisnr, v_sisnr_w, v_sisnrI, v_sisnrI_w = self.validation(self.cur_epoch)
tt_loss, tt_sisnr, tt_sisnr_w, tt_sisnrI, tt_sisnrI_w = self.test(self.cur_epoch)
t_metric = self.metric_ratio * t_sisnrI_w + (1. - self.metric_ratio) * t_sisnrI
v_metric = self.metric_ratio * v_sisnrI_w + (1. - self.metric_ratio) * v_sisnrI
tt_metric = self.metric_ratio * tt_sisnrI_w + (1. - self.metric_ratio) * tt_sisnrI
train_metric.append(t_metric)
val_metric.append(v_metric)
test_metric.append(tt_metric)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
train_sisnrI.append(t_sisnrI)
val_sisnrI.append(v_sisnrI)
test_sisnrI.append(tt_sisnrI)
train_sisnr.append(t_sisnr)
val_sisnr.append(v_sisnr)
test_sisnr.append(tt_sisnr)
train_sisnrI_w.append(t_sisnrI_w)
val_sisnrI_w.append(v_sisnrI_w)
test_sisnrI_w.append(tt_sisnrI_w)
train_sisnr_w.append(t_sisnr_w)
val_sisnr_w.append(v_sisnr_w)
test_sisnr_w.append(tt_sisnr_w)
# schedule here
self.scheduler.step()
if v_metric <= best_metric:
no_improve += 1
self.logger.info(
'No improvement, Best metric: {:.4f}, sisnrI = {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}'.format(best_metric, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
else:
best_loss = v_loss
best_metric = v_metric
best_sisnrI = v_sisnrI
best_sisnr = v_sisnr
best_sisnrI_w = v_sisnrI_w
best_sisnr_w = v_sisnr_w
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Metric Change: {:.4f}, sisnrI: {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}'.format(
self.cur_epoch, best_metric, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
self.logger.info('Epoch: {:d}, Best Metirc Test: {:.4f}, sisnrI: {:.4f}, sisnr = {:.4f}, sisnrI_w: {:.4f}, sisnr_w = {:.4f}'.format(
self.cur_epoch, tt_metric, tt_sisnrI, tt_sisnr, tt_sisnrI_w, tt_sisnr_w))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
plt.plot(x, train_sisnrI, 'b-', label=u'train_sisnrI', linewidth=0.8)
plt.plot(x, val_sisnrI, 'c-', label=u'val_sisnrI', linewidth=0.8)
plt.plot(x, test_sisnrI, 'g', label=u'test_sisnrI', linewidth=0.8)
plt.legend()
plt.ylabel('sisnrI')
plt.xlabel('epoch')
plt.savefig('sisnrI.png')
plt.plot(x, train_sisnr, 'b-', label=u'train_sisnr', linewidth=0.8)
plt.plot(x, val_sisnr, 'c-', label=u'val_sisnr', linewidth=0.8)
plt.plot(x, test_sisnr, 'g', label=u'test_sisnr', linewidth=0.8)
plt.legend()
plt.ylabel('sisnr')
plt.xlabel('epoch')
plt.savefig('sisnr.png')
plt.plot(x, train_sisnrI_w, 'b-', label=u'train_sisnrI_w', linewidth=0.8)
plt.plot(x, val_sisnrI_w, 'c-', label=u'val_sisnrI_w', linewidth=0.8)
plt.plot(x, test_sisnrI_w, 'g', label=u'test_sisnrI_w', linewidth=0.8)
plt.legend()
plt.ylabel('sisnrI_w')
plt.xlabel('epoch')
plt.savefig('sisnrI_w.png')
plt.plot(x, train_sisnr_w, 'b-', label=u'train_sisnr_w', linewidth=0.8)
plt.plot(x, val_sisnr_w, 'c-', label=u'val_sisnr_w', linewidth=0.8)
plt.plot(x, test_sisnr_w, 'g', label=u'test_sisnr_w', linewidth=0.8)
plt.legend()
plt.ylabel('sisnr_w')
plt.xlabel('epoch')
plt.savefig('sisnr_w.png')
plt.plot(x, train_metric, 'b-', label=u'train_metric', linewidth=0.8)
plt.plot(x, val_metric, 'c-', label=u'val_metric', linewidth=0.8)
plt.plot(x, test_metric, 'g', label=u'test_metric', linewidth=0.8)
plt.legend()
plt.ylabel('metric')
plt.xlabel('epoch')
plt.savefig('metric.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.net.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 21,117 | 47.104784 | 259 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/data_loader/AudioData.py | import torch.nn.functional as F
from utils import util
import torch
import torchaudio
import sys
sys.path.append('../')
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
def write_wav(fname, src, sample_rate):
'''
Write wav file
input:
fname: wav file path
src: frames of audio
sample_rate: An integer which is the sample rate of the audio
output:
None
'''
torchaudio.save(fname, src, sample_rate)
class AudioReader(object):
'''
Class that reads Wav format files
Input:
scp_path (str): a different scp file address
sample_rate (int, optional): sample rate (default: 8000)
chunk_size (int, optional): split audio size (default: 32000(4 s))
least_size (int, optional): Minimum split size (default: 16000(2 s))
Output:
split audio (list)
'''
def __init__(self, scp_path, sample_rate=8000, chunk_size=32000, least_size=16000):
super(AudioReader, self).__init__()
self.sample_rate = sample_rate
self.index_dict = util.handle_scp(scp_path)
self.keys = list(self.index_dict.keys())
self.audio = []
self.chunk_size = chunk_size
self.least_size = least_size
self.split()
def split(self):
'''
split audio with chunk_size and least_size
'''
for key in self.keys:
utt = read_wav(self.index_dict[key])
if utt.shape[0] < self.least_size:
continue
if utt.shape[0] > self.least_size and utt.shape[0] < self.chunk_size:
gap = self.chunk_size-utt.shape[0]
self.audio.append(F.pad(utt, (0, gap), mode='constant'))
if utt.shape[0] >= self.chunk_size:
start = 0
while True:
if start + self.chunk_size > utt.shape[0]:
break
self.audio.append(utt[start:start+self.chunk_size])
start += self.least_size
if __name__ == "__main__":
a = AudioReader("/home/likai/data1/create_scp/cv_mix.scp")
audio = a.audio
print(len(audio))
| 2,751 | 30.272727 | 87 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/data_loader/Dataset.py | import sys
sys.path.append('../')
import torch
from torch.utils.data import DataLoader, Dataset
import torchaudio
from utils.util import handle_scp, handle_scp_inf
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Datasets(Dataset):
'''
Load audio data
mix_scp: file path of mix audio (type: str)
ref_scp: file path of ground truth audio (type: list[spk1,spk2])
inf_scp: include the onset and offset information?
'''
def __init__(self, mix_scp=None, s1_scp=None, ref_scp=None, inf_scp=None, sr=16000, cls_num=50, audio_length=10, nFrameShift=256):
super(Datasets, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.s1_audio = handle_scp(s1_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss, self.onsets, self.offsets = handle_scp_inf(inf_scp)
self.sr = sr
self.cls_num = cls_num # class num
self.audio_length = audio_length # s
self.nFrameShift = nFrameShift #
self.key = list(self.mix_audio.keys()) # mixture audio name
def __len__(self):
return len(self.key)
def __getitem__(self, index):
index = self.key[index] # get name?
s1_index = index.replace('.wav', '_lab.wav') # get clean wav name
ref_index = index.replace('.wav', '_re.wav') # get reference wav name
mix = read_wav(self.mix_audio[index])
s1 = read_wav(self.s1_audio[s1_index])
ref = read_wav(self.ref_audio[ref_index])
cls = torch.zeros(self.cls_num) # ready for one-hot
cls[self.clss[index]] = 1. #
onset = self.onsets[index] # get oneset time
offset = self.offsets[index] # get offset time
max_frame = self.sr * self.audio_length // self.nFrameShift - 2 #
onset_frame = round(onset * (self.sr // self.nFrameShift - 1)) if round(onset * (self.sr // self.nFrameShift - 1)) >= 0 else 0
# time transfer to the number of frame
offset_frame = round(offset * (self.sr // self.nFrameShift - 1)) if round(
offset * (self.sr // self.nFrameShift - 1)) < max_frame else max_frame
framelab = torch.zeros(max_frame + 1) # frame-level label
for i in range(onset_frame, offset_frame + 1):
framelab[i] = 1.
return mix, s1, ref, cls, onset, offset, framelab
if __name__ == "__main__":
datasets = Datasets("/apdcephfs/share_1316500/donchaoyang/tsss/Dual-Path-RNN-Pytorch/scps/tr_mix.scp",
"/apdcephfs/share_1316500/donchaoyang/tsss/Dual-Path-RNN-Pytorch/scps/tr_s1.scp",
"/apdcephfs/share_1316500/donchaoyang/tsss/Dual-Path-RNN-Pytorch/scps/tr_re.scp",
"/apdcephfs/share_1316500/donchaoyang/tsss/Dual-Path-RNN-Pytorch/scps/tr_inf.scp",
16000,
50,
8,
256)
print(datasets.key)
| 3,439 | 39.952381 | 134 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/data_loader/AudioReader.py | import sys
sys.path.append('../')
import torchaudio
import torch
from utils.util import handle_scp
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
def write_wav(fname, src, sample_rate):
'''
Write wav file
input:
fname: wav file path
src: frames of audio
sample_rate: An integer which is the sample rate of the audio
output:
None
'''
torchaudio.save(fname, src, sample_rate)
class AudioReader(object):
'''
Class that reads Wav format files
Input as a different scp file address
Output a matrix of wav files in all scp files.
'''
def __init__(self, scp_path, sample_rate=8000):
super(AudioReader, self).__init__()
self.sample_rate = sample_rate
self.index_dict = handle_scp(scp_path)
self.keys = list(self.index_dict.keys())
def _load(self, key):
src, sr = read_wav(self.index_dict[key], return_rate=True)
if self.sample_rate is not None and sr != self.sample_rate:
raise RuntimeError('SampleRate mismatch: {:d} vs {:d}'.format(
sr, self.sample_rate))
return src
def __len__(self):
return len(self.keys)
def __iter__(self):
for key in self.keys:
yield key, self._load(key)
def __getitem__(self, index):
if type(index) not in [int, str]:
raise IndexError('Unsupported index type: {}'.format(type(index)))
if type(index) == int:
num_uttrs = len(self.keys)
if num_uttrs < index and index < 0:
raise KeyError('Interger index out of range, {:d} vs {:d}'.format(
index, num_uttrs))
index = self.keys[index]
if index not in self.index_dict:
raise KeyError("Missing utterance {}!".format(index))
return self._load(index)
if __name__ == "__main__":
r = AudioReader('/home/likai/data1/create_scp/cv_s2.scp')
index = 0
print(r[1])
| 2,556 | 28.732558 | 82 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/utils/util.py | import torch
import torch.nn as nn
def handle_scp(scp_path):
'''
Read scp file script
input:
scp_path: .scp file's file path
output:
scp_dict: {'key':'wave file path'}
'''
scp_dict = dict()
line = 0
lines = open(scp_path, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
if len(scp_parts) != 2:
raise RuntimeError("For {}, format error in line[{:d}]: {}".format(
scp_path, line, scp_parts))
if len(scp_parts) == 2:
key, value = scp_parts
if key in scp_dict:
raise ValueError("Duplicated key \'{0}\' exists in {1}".format(
key, scp_path))
scp_dict[key] = value
return scp_dict
def handle_scp_inf(scp_path):
'''
Read information scp file script
input:
scp_path: .scp file's file path
output:
scp_dict: {'key':'wave file path'}
'''
scp_dict_cls = dict()
scp_dict_onset = dict()
scp_dict_offset = dict()
line = 0
lines = open(scp_path, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
if len(scp_parts) != 4:
raise RuntimeError("For {}, format error in line[{:d}]: {}".format(
scp_path, line, scp_parts))
if len(scp_parts) == 4:
key, cls, onset, offset = scp_parts
if key in scp_dict_cls:
raise ValueError("Duplicated key \'{0}\' exists in {1}".format(
key, scp_path))
scp_dict_cls[key] = int(cls)
scp_dict_onset[key] = float(onset)
scp_dict_offset[key] = float(offset)
return scp_dict_cls, scp_dict_onset, scp_dict_offset
def check_parameters(net):
'''
Returns module parameters. Mb
'''
parameters = sum(param.numel() for param in net.parameters())
return parameters / 10**6
| 1,932 | 26.225352 | 79 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/model/model_t.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x L
# gln: mean,var N x 1 x 1
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x-mean)**2, (1, 2), keepdim=True)
# N x C x L
if self.elementwise_affine:
x = self.weight*(x-mean)/torch.sqrt(var+self.eps)+self.bias
else:
x = (x-mean)/torch.sqrt(var+self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine)
def forward(self, x):
# x: N x C x L
# N x L x C
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim):
if norm not in ['gln', 'cln', 'bn']:
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
if norm == 'gln':
return GlobalLayerNorm(dim, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
else:
return nn.BatchNorm1d(dim)
class Conv1D_e(nn.Module):
'''
Build the Conv1D structure
causal: if True is causal setting
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D_e, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x):
"""
Input:
x: [B x C x T], B is batch size, T is times
Returns:
x: [B, C, T]
"""
# B x C x T -> B x C_o x T_o
x_conv = self.conv1x1(x)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Conv1D(nn.Conv1d):
'''
Applies a 1D convolution over an input signal composed of several input planes.
'''
def __init__(self, *args, **kwargs):
super(Conv1D, self).__init__(*args, **kwargs)
def forward(self, x, squeeze=False):
# x: N x C x L
if x.dim() not in [2, 3]:
raise RuntimeError("{} accept 2/3D tensor as input".format(
self.__name__))
x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))
if squeeze:
x = torch.squeeze(x)
return x
class ConvTrans1D(nn.ConvTranspose1d):
'''
This module can be seen as the gradient of Conv1d with respect to its input.
It is also known as a fractionally-strided convolution
or a deconvolution (although it is not an actual deconvolution operation).
'''
def __init__(self, *args, **kwargs):
super(ConvTrans1D, self).__init__(*args, **kwargs)
def forward(self, x, squeeze=False):
"""
x: N x L or N x C x L
"""
if x.dim() not in [2, 3]:
raise RuntimeError("{} accept 2/3D tensor as input".format(
self.__name__))
x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))
if squeeze:
x = torch.squeeze(x)
return x
class Conv1D_Block(nn.Module):
'''
Consider only residual links
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D_Block, self).__init__()
# conv 1 x 1
self.conv1x1 = Conv1D(in_channels, out_channels, 1)
self.PReLU_1 = nn.PReLU()
self.norm_1 = select_norm(norm, out_channels)
# not causal don't need to padding, causal need to pad+1 = kernel_size
self.pad = (dilation * (kernel_size - 1)) // 2 if not causal else (
dilation * (kernel_size - 1))
# depthwise convolution
self.dwconv = Conv1D(out_channels, out_channels, kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLU_2 = nn.PReLU()
self.norm_2 = select_norm(norm, out_channels)
self.Sc_conv = nn.Conv1d(out_channels, in_channels, 1, bias=True)
self.causal = causal
def forward(self, x):
# x: N x C x L
# N x O_C x L
c = self.conv1x1(x)
# N x O_C x L
c = self.PReLU_1(c)
c = self.norm_1(c)
# causal: N x O_C x (L+pad)
# noncausal: N x O_C x L
c = self.dwconv(c)
# N x O_C x L
if self.causal:
c = c[:, :, :-self.pad]
c = self.PReLU_2(c)
c = self.norm_2(c)
c = self.Sc_conv(c)
return x+c
class Conv1D_emb(nn.Module):
'''
Build the Conv1D structure with embedding
causal: if True is causal setting
'''
def __init__(self, in_channels=256, emb_channels=128, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False, fusion='concat', usingEmb=True, usingTsd=False):
super(Conv1D_emb, self).__init__()
self.causal = causal
self.usingTsd = usingTsd
self.usingEmb = usingEmb
self.fusion = fusion # concat, add, multiply
if usingEmb:
if fusion == 'concat':
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels + 1, out_channels, kernel_size=1)
elif fusion == 'add':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
elif fusion == 'multiply':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C']
tsd: [B x 1 x T]
Returns:
x: [B, C, T]
"""
T = x.shape[-1]
emb = torch.unsqueeze(emb, -1)
# B x C' X T
emb = emb.repeat(1, 1, T)
# B x (C + C') X T
if self.usingEmb:
if self.fusion == 'concat':
if not self.usingTsd:
x_ = torch.cat([x, emb], 1)
else:
x_ = torch.cat([x, emb, tsd], 1)
elif self.fusion == 'add':
x_ = self.PReLu1(self.preCNN(emb)) + x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
elif self.fusion == 'multiply':
x_ = self.PReLu1(self.preCNN(emb)) * x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
else:
x_ = x
# B x (C + C') X T -> B x C_o x T_o
x_conv = self.conv1x1(x_)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class ExtractionNet(nn.Module):
'''
TasNet Separation part
LayerNorm -> 1x1Conv -> 1-D Conv .... -> output
'''
def __init__(self, conv1d_block=8, in_channels=64, out_channels=128, emb_channels=128, final_channels=257,
out_sp_channels=512, kernel_size=3, norm='gln', causal=False, num_spks=1, fusion='concat', usingEmb=[True,True,True], usingTsd=[False,False,False]):
super(ExtractionNet, self).__init__()
self.conv1x1 = nn.Conv1d(in_channels, out_channels, 1)
self.conv_block_1_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[0], usingTsd=usingTsd[0])
self.conv_block_1_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_2_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[1], usingTsd=usingTsd[1])
self.conv_block_2_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_3_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[2], usingTsd=usingTsd[2])
self.conv_block_3_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.PReLu = nn.PReLU()
self.norm = select_norm('cln', in_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, num_spks * final_channels, 1)
self.activation = nn.Sigmoid()
self.num_spks = num_spks
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_lists = [Conv1D_e(
**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_lists)
def _Sequential(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C x T], B is batch size, T is times
Returns:
x: [num_spks, B, N, T]
"""
# B x C x T
x = self.norm(x)
x = self.conv1x1(x)
x = self.PReLu(x)
# B x C x T
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_1_front(x, emb, tsd)
x = self.conv_block_1_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_2_front(x, emb, tsd)
x = self.conv_block_2_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_3_front(x, emb, tsd)
x = self.conv_block_3_back(x)
x = F.dropout(x, p=0.2, training=self.training)
# B x N x T
# print('x ',x.shape)
x = self.PReLu(x)
x = self.end_conv1x1(x)
# print('x ', x.shape)
# assert 1==2
x = self.activation(x)
return x
class ConvTasNet(nn.Module):
'''
ConvTasNet module
N Number of filters in autoencoder
L Length of the filters (in samples)
B Number of channels in bottleneck and the residual paths’ 1 × 1-conv blocks
Sc Number of channels in skip-connection paths’ 1 × 1-conv blocks
H Number of channels in convolutional blocks
P Kernel size in convolutional blocks
X Number of convolutional blocks in each repeat
R Number of repeats
'''
def __init__(self,
N=128,
L=20,
B=128,
H=256,
P=3,
X=8,
R=3,
norm="gln",
num_spks=1,
activate="relu",
causal=False,
cls_num=41,
nFrameLen=512,
nFrameShift=256, #
nFFT=512,
fusion='concat',
usingEmb=[True,True,True],
usingTsd=[False,False,False],
CNN10_settings=[16000,1024,320,64,50,8000,527,512,128],
fixCNN10=False,
fixTSDNet=True,
pretrainedCNN10=None,
pretrainedTSDNet=None,
threshold=0.5,
):
super(ConvTasNet, self).__init__()
# n x 1 x T => n x N x T
self.encoder = Conv1D(1, N, L, stride=L // 2, padding=0)
# n x N x T Layer Normalization of Separation
self.LayerN_S = select_norm('cln', N)
# n x B x T Conv 1 x 1 of Separation
self.BottleN_S = Conv1D(N, B, 1)
# Separation block
# n x B x T => n x B x T
# self.separation = self._Sequential_repeat(
# R, X, in_channels=B, out_channels=H, kernel_size=P, norm=norm, causal=causal)
self.extractor = ExtractionNet(conv1d_block=X, in_channels=N,
out_channels=B, final_channels=N, out_sp_channels=H, kernel_size=P,
norm=norm, causal=causal, num_spks=num_spks, fusion=fusion,
usingEmb=usingEmb, usingTsd=usingTsd)
self.conditioner_one_hot = nn.Embedding(cls_num, 128)
self.fixCNN10 = fixCNN10
self.fixTSDNet = fixTSDNet
self.pretrainedCNN10 = pretrainedCNN10
self.pretrainedTSDNet = pretrainedTSDNet
self.usingEmb = usingEmb
self.usingTsd = usingTsd
self.threshold = threshold
# self.init_conditioner() # init conditioner modual
self.emb_fc = nn.Linear(CNN10_settings[7], CNN10_settings[8]) # produce embedding
# if usingTsd[0] or usingTsd[1] or usingTsd[2]: # if we decide to use tsdNet
# self.tsdnet = TSDNet(nFrameLen=nFrameLen, nFrameShift=nFrameShift, cls_num=cls_num, CNN10_settings=CNN10_settings)
# self.init_TSDNet() # init it
self.epsilon = 1e-20
# n x B x T => n x 2*N x T
self.gen_masks = Conv1D(B, num_spks*N, 1)
# n x N x T => n x 1 x L
self.decoder = ConvTrans1D(N, 1, L, stride=L//2)
# activation function
active_f = {
'relu': nn.ReLU(),
'sigmoid': nn.Sigmoid(),
'softmax': nn.Softmax(dim=0)
}
self.activation_type = activate
self.activation = active_f[activate]
self.num_spks = num_spks
def init_conditioner(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
if self.fixCNN10: # if fix it
for p in self.conditioner.parameters():
p.requires_grad = False
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_Block_lists = [Conv1D_Block(
**block_kwargs, dilation=(2**i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_Block_lists)
def _Sequential_repeat(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, one_hot):
if x.dim() >= 3:
raise RuntimeError(
"{} accept 1/2D tensor as input, but got {:d}".format(
self.__name__, x.dim()))
if x.dim() == 1:
x = torch.unsqueeze(x, 0)
# x: n x 1 x L => n x N x T
tsdMask = None
# print('x ',x.shape)
w = self.encoder(x)
# print('w ',w.shape)
# n x N x L => n x B x L
e = self.LayerN_S(w)
# print('e ', e.shape)
e = self.BottleN_S(e)
# print('e1 ', e.shape)
# conditional part
x_cls = None
# print('one_hot ',one_hot.shape)
emb_one_hot = self.conditioner_one_hot(one_hot)
# print('emb_one_hot ',emb_one_hot.shape)
# n x B x L => n x B x L
m = self.extractor(e, emb_one_hot, tsdMask)
# print('m ',m.shape)
# assert 1==2
# n x B x L => n x num_spk*N x L
# m = self.gen_masks(e)
# n x N x L x num_spks
m = torch.chunk(m, chunks=self.num_spks, dim=1)
# num_spks x n x N x L
# m = self.activation(torch.stack(m, dim=0))
gt = None
d = [w*m[i] for i in range(self.num_spks)]
# print('d ',d[0].shape)
#d = w*m
# decoder part num_spks x n x L
# audio_encoder = self.istft(x_ex, x_phase) # reconstruct predict audio
# audio = [audio_encoder[:, 0]]
s = [self.decoder(d[i], squeeze=True) for i in range(self.num_spks)]
# print('s ',s[0].shape)
return s, m, gt, x_cls
def check_parameters(net):
'''
Returns module parameters. Mb
'''
parameters = sum(param.numel() for param in net.parameters())
return parameters / 10**6
def test_convtasnet():
x = torch.randn(2, 160000)
one_hot = torch.zeros(2)
nnet = ConvTasNet()
s = nnet(x,one_hot.long())
print(str(look_parameters(nnet))+' Mb')
print(s[1].shape)
if __name__ == "__main__":
test_convtasnet()
| 21,609 | 37.451957 | 165 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/model/PANNS.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class CNN10(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(CNN10, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.relu_(self.fc1(x))
embedding = x
return embedding
| 4,628 | 38.905172 | 107 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/model/loss.py | import torch
import numpy as np
def nll_loss(output, target):
'''Negative likelihood loss. The output should be obtained using F.log_softmax(x).
Args:
output: (N, classes_num)
target: (N, classes_num)
'''
loss = - torch.mean(target * output)
return loss
def sisnr_loss(x, s, eps=1e-8):
"""
calculate training loss
input:
x: separated signal, N x S tensor, estimate value
s: reference signal, N x S tensor, True value
Return:
sisnr: N tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :s.shape[-1]]
else:
s = s[:, :x.shape[-1]]
def l2norm(mat, keepdim=False):
return torch.norm(mat, dim=-1, keepdim=keepdim)
if x.shape != s.shape:
raise RuntimeError(
"Dimention mismatch when calculate si-snr, {} vs {}".format(
x.shape, s.shape))
x_zm = x - torch.mean(x, dim=-1, keepdim=True)
s_zm = s - torch.mean(s, dim=-1, keepdim=True)
t = torch.sum(
x_zm * s_zm, dim=-1,
keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps)
loss = -20. * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps))
return torch.sum(loss) / x.shape[0]
def sisnri(x, s, m): # sisnr improvement
"""
Arguments:
x: separated signal, BS x S predicted sound
s: reference signal, BS x S target sound
m: mixture signal, BS x S mixture sound
Return:
sisnri: N tensor
"""
sisnr = sisnr_loss(x, s)
sisnr_ori = sisnr_loss(m, s)
return sisnr_ori - sisnr #
def lfb_mse_loss(x, s):
"""
est_spec, ref_spec: BS x F x T
return: log fbank MSE: BS tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :, :s.shape[-1]]
else:
s = s[:, :, :x.shape[-1]]
t = torch.sum((x - s) ** 2)/(x.shape[0]*x.shape[1]*x.shape[2])
return t
def mse_loss(x, s):
"""
calculate training loss
input:
x: separated signal, N x S tensor
s: reference signal, N x S tensor
Return:
return: N tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :s.shape[-1]]
else:
s = s[:, :x.shape[-1]]
t = torch.sum((x - s) ** 2)/(x.shape[0]*x.shape[1])
return t
def get_loss(loss_type, est_wav, lab_wav, mix_wav, est_mask, lab_mask, est_cls, lab_cls, onset, offset, nFrameShift, sr, audio_length, ratio):
"""
loss type:
1: enrollment: spec mse loss
2: enrollment: wave mse loss
3: enrollment: wave sisnrI loss
4: enrollment: spec mse loss + wave mse loss
5: enrollment: spec mse loss + wave sisnrI loss
6: enrollment: wave mse loss + wave sisnrI loss
7: enrollment: spec mse loss + wave mse loss + wave sisnrI loss
8: enrollment: spec mse loss (w)
9: enrollment: wave mse loss (w)
10: enrollment: wave sisnrI loss (w)
11: enrollment: spec mse loss (w) + wave mse loss (w)
12: enrollment: spec mse loss (w) + wave sisnrI loss (w)
13: enrollment: wave mse loss (w) + wave sisnrI loss (w)
14: enrollment: spec mse loss (w) + wave mse loss (w) + wave sisnrI loss (w)
15: enrollment: spec mse loss + wave mse loss + wave sisnrI loss + cls1 loss
16: enrollment: spec mse loss (w) + wave mse loss (w) + wave sisnrI loss (w) + cls1 loss
"""
loss_sisnr_w = 0.0
loss_mse_w = 0.0
loss_spec_w = 0.0
sisnrI_w = 0.0
onset = onset.cpu().numpy()
offset = offset.cpu().numpy()
sample_num = onset.shape[0] # batch_size
for i in range(sample_num):
assert onset[i] < offset[i]
max_wav = audio_length * sr - 1
# print('max_wav ',max_wav)
max_frame = sr * audio_length // nFrameShift - 2
# print('max_frame ',max_frame)
onset_wav = round(sr * onset[i]) if round(sr * onset[i]) >= 0 else 0 # target sound begin sample
# print('onset[i], onset_wav ',onset[i],onset_wav)
offset_wav = round(sr * offset[i]) if round(sr * offset[i]) < max_wav else max_wav # end
# print('offset[i], offset_wav ',offset[i],offset_wav)
onset_frame = round(onset[i] * (sr // nFrameShift - 1)) if round(onset[i] * (sr // nFrameShift - 1)) >= 0 else 0
# print('onset_frame ',onset_frame)
offset_frame = round(offset[i] * (sr // nFrameShift - 1)) if round(offset[i] * (sr // nFrameShift - 1)) < max_frame else max_frame
# print('offset_frame ',offset_frame)
est_wav_w = est_wav[i, onset_wav:offset_wav] # est_wav
est_wav_w = est_wav_w[None, :] # (1,N)
lab_wav_w = lab_wav[i, onset_wav:offset_wav] # lab_wav
lab_wav_w = lab_wav_w[None, :]
est_mask_w = est_mask[i, :, onset_frame:offset_frame]
est_mask_w = est_mask_w[None, :]
lab_mask_w = lab_mask[i, :, onset_frame:offset_frame]
lab_mask_w = lab_mask_w[None, :]
loss_sisnr_w += sisnr_loss(est_wav_w, lab_wav_w) # weighted sisnr
# print('loss_sisnr_w ',loss_sisnr_w)
loss_mse_w += mse_loss(est_wav_w, lab_wav_w) # weighted mse
loss_spec_w += lfb_mse_loss(est_mask_w, lab_mask_w) # mask loss
# assert loss_mse_w is nan
# print('loss_mse_w ',loss_mse_w)
# print('loss_spec_w ',loss_spec_w)
# assert 1==2
mix_wav_w = mix_wav[i, onset_wav:offset_wav] # mix wav
mix_wav_w = mix_wav_w[None, :]
sisnrI_w += sisnri(est_wav_w, lab_wav_w, mix_wav_w) # inmprovemnt
loss_sisnr_w = loss_sisnr_w / sample_num
loss_mse_w = loss_mse_w / sample_num
loss_spec_w = loss_spec_w / sample_num
sisnrI_w = sisnrI_w / sample_num
loss_sisnr_all = sisnr_loss(est_wav, lab_wav) # 整个音频的loss
loss_mse_all = mse_loss(est_wav, lab_wav)
# print(est_wav[0])
# print(lab_wav[0])
# assert 1==2
# print('loss_mse_all ',loss_mse_all)
# assert 1==2
loss_spec_all = lfb_mse_loss(est_mask, lab_mask)
sisnrI_all = sisnri(est_wav, lab_wav, mix_wav)
loss_cls = nll_loss(est_cls, lab_cls) # 分类损失
# loss_emb = torch.cosine_similarity(emb, emb2, dim=-1)
# loss_emb = 1.-torch.mean(loss_emb)
if loss_type == 1:
loss = loss_spec_all * 100.
elif loss_type == 2:
loss = loss_mse_all * 1000.
elif loss_type == 3:
loss = - sisnrI_all
elif loss_type == 4:
loss = loss_spec_all * 100. + loss_mse_all * 1000.
elif loss_type == 5:
loss = loss_spec_all * 100. - sisnrI_all
elif loss_type == 6:
loss = loss_mse_all * 1000. - sisnrI_all
elif loss_type == 7:
loss = loss_spec_all * 100. + loss_mse_all * 1000. - sisnrI_all
elif loss_type == 8:
loss = (loss_spec_all + ratio * loss_spec_w) * 100.
elif loss_type == 9:
loss = (loss_mse_all + ratio * loss_mse_w) * 1000.
elif loss_type == 10:
loss = - sisnrI_all - ratio * sisnrI_w
elif loss_type == 11:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000.
elif loss_type == 12:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 13:
loss = (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 14:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 15:
loss = loss_spec_all * 100. + loss_mse_all * 1000. - sisnrI_all + loss_cls * 1000.
elif loss_type == 16:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w + loss_cls * 1000.
elif loss_type == 17:
loss = (loss_spec_all + ratio * loss_spec_w) * 10. + (loss_mse_all + ratio * loss_mse_w) * 100000. - sisnrI_all - ratio * sisnrI_w + loss_cls * 100.
# print('loss_spec_all ',loss_spec_all)
# print('loss_spec_w ',loss_spec_w)
# print('loss_mse_all ',loss_mse_all)
# print('loss_mse_w ',loss_mse_w)
# print('sisnrI_all ',sisnrI_all)
# print('sisnrI_w ',sisnrI_w)
# print('loss_cls ',loss_cls)
# assert 1==2
return loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, loss_cls
def get_loss_one_hot(loss_type, est_wav, lab_wav, mix_wav, est_mask, lab_mask, est_cls, lab_cls, onset, offset, nFrameShift, sr, audio_length, ratio):
"""
loss type:
1: enrollment: spec mse loss
2: enrollment: wave mse loss
3: enrollment: wave sisnrI loss
4: enrollment: spec mse loss + wave mse loss
5: enrollment: spec mse loss + wave sisnrI loss
6: enrollment: wave mse loss + wave sisnrI loss
7: enrollment: spec mse loss + wave mse loss + wave sisnrI loss
8: enrollment: spec mse loss (w)
9: enrollment: wave mse loss (w)
10: enrollment: wave sisnrI loss (w)
11: enrollment: spec mse loss (w) + wave mse loss (w)
12: enrollment: spec mse loss (w) + wave sisnrI loss (w)
13: enrollment: wave mse loss (w) + wave sisnrI loss (w)
14: enrollment: spec mse loss (w) + wave mse loss (w) + wave sisnrI loss (w)
15: enrollment: spec mse loss + wave mse loss + wave sisnrI loss + cls1 loss
16: enrollment: spec mse loss (w) + wave mse loss (w) + wave sisnrI loss (w) + cls1 loss
"""
loss_sisnr_w = 0.0
loss_mse_w = 0.0
loss_spec_w = 0.0
sisnrI_w = 0.0
onset = onset.cpu().numpy()
offset = offset.cpu().numpy()
sample_num = onset.shape[0] # batch_size
for i in range(sample_num):
assert onset[i] < offset[i]
max_wav = audio_length * sr - 1
# print('max_wav ',max_wav)
max_frame = sr * audio_length // nFrameShift - 2
# print('max_frame ',max_frame)
onset_wav = round(sr * onset[i]) if round(sr * onset[i]) >= 0 else 0 # target sound begin sample
# print('onset[i], onset_wav ',onset[i],onset_wav)
offset_wav = round(sr * offset[i]) if round(sr * offset[i]) < max_wav else max_wav # end
# print('offset[i], offset_wav ',offset[i],offset_wav)
onset_frame = round(onset[i] * (sr // nFrameShift - 1)) if round(onset[i] * (sr // nFrameShift - 1)) >= 0 else 0
# print('onset_frame ',onset_frame)
offset_frame = round(offset[i] * (sr // nFrameShift - 1)) if round(offset[i] * (sr // nFrameShift - 1)) < max_frame else max_frame
# print('offset_frame ',offset_frame)
est_wav_w = est_wav[i, onset_wav:offset_wav] # est_wav
est_wav_w = est_wav_w[None, :] # (1,N)
lab_wav_w = lab_wav[i, onset_wav:offset_wav] # lab_wav
lab_wav_w = lab_wav_w[None, :]
est_mask_w = est_mask[i, :, onset_frame:offset_frame]
est_mask_w = est_mask_w[None, :]
lab_mask_w = lab_mask[i, :, onset_frame:offset_frame]
lab_mask_w = lab_mask_w[None, :]
loss_sisnr_w += sisnr_loss(est_wav_w, lab_wav_w) # weighted sisnr
# print('loss_sisnr_w ',loss_sisnr_w)
loss_mse_w += mse_loss(est_wav_w, lab_wav_w) # weighted mse
loss_spec_w += lfb_mse_loss(est_mask_w, lab_mask_w) # mask loss
# assert loss_mse_w is nan
# print('loss_mse_w ',loss_mse_w)
# print('loss_spec_w ',loss_spec_w)
# assert 1==2
mix_wav_w = mix_wav[i, onset_wav:offset_wav] # mix wav
mix_wav_w = mix_wav_w[None, :]
sisnrI_w += sisnri(est_wav_w, lab_wav_w, mix_wav_w) # inmprovemnt
loss_sisnr_w = loss_sisnr_w / sample_num
loss_mse_w = loss_mse_w / sample_num
loss_spec_w = loss_spec_w / sample_num
sisnrI_w = sisnrI_w / sample_num
loss_sisnr_all = sisnr_loss(est_wav, lab_wav) # 整个音频的loss
loss_mse_all = mse_loss(est_wav, lab_wav)
# print(est_wav[0])
# print(lab_wav[0])
# assert 1==2
# print('loss_mse_all ',loss_mse_all)
# assert 1==2
loss_spec_all = lfb_mse_loss(est_mask, lab_mask)
sisnrI_all = sisnri(est_wav, lab_wav, mix_wav)
# loss_cls = nll_loss(est_cls, lab_cls) # 分类损失
loss_cls = loss_spec_all
# loss_emb = torch.cosine_similarity(emb, emb2, dim=-1)
# loss_emb = 1.-torch.mean(loss_emb)
if loss_type == 1:
loss = loss_spec_all * 100.
elif loss_type == 2:
loss = loss_mse_all * 1000.
elif loss_type == 3:
loss = - sisnrI_all
elif loss_type == 4:
loss = loss_spec_all * 100. + loss_mse_all * 1000.
elif loss_type == 5:
loss = loss_spec_all * 100. - sisnrI_all
elif loss_type == 6:
loss = loss_mse_all * 1000. - sisnrI_all
elif loss_type == 7:
loss = loss_spec_all * 100. + loss_mse_all * 1000. - sisnrI_all
elif loss_type == 8:
loss = (loss_spec_all + ratio * loss_spec_w) * 100.
elif loss_type == 9:
loss = (loss_mse_all + ratio * loss_mse_w) * 1000.
elif loss_type == 10:
loss = - sisnrI_all - ratio * sisnrI_w
elif loss_type == 11:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000.
elif loss_type == 12:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 13:
loss = (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 14:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 15:
loss = loss_spec_all * 100. + loss_mse_all * 1000. - sisnrI_all
elif loss_type == 16:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 17:
loss = (loss_spec_all + ratio * loss_spec_w) * 10. + (loss_mse_all + ratio * loss_mse_w) * 100000. - sisnrI_all - ratio * sisnrI_w
# print('loss_spec_all ',loss_spec_all)
# print('loss_spec_w ',loss_spec_w)
# print('loss_mse_all ',loss_mse_all)
# print('loss_mse_w ',loss_mse_w)
# print('sisnrI_all ',sisnrI_all)
# print('sisnrI_w ',sisnrI_w)
# print('loss_cls ',loss_cls)
# assert 1==2
return loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, loss_cls
| 14,345 | 42.34139 | 156 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/model/model.py | import torch
from torch import nn
import torch.nn.functional as F
import sys
sys.path.append('../')
from utils.util import check_parameters
from model.PANNS import CNN10
from model.tsd import TSD
import math
def init_kernel(frame_len,
frame_hop,
num_fft=None,
window="sqrt_hann"):
if window != "sqrt_hann":
raise RuntimeError("Now only support sqrt hanning window in order "
"to make signal perfectly reconstructed")
if not num_fft:
# FFT points
fft_size = 2 ** math.ceil(math.log2(frame_len))
else:
fft_size = num_fft
# window [window_length]
window = torch.hann_window(frame_len) ** 0.5
S_ = 0.5 * (fft_size * fft_size / frame_hop) ** 0.5
# window_length, F, 2 (real+imag)
kernel = torch.rfft(torch.eye(fft_size) / S_, 1)[:frame_len]
# 2, F, window_length
kernel = torch.transpose(kernel, 0, 2) * window
# 2F, 1, window_length
kernel = torch.reshape(kernel, (fft_size + 2, 1, frame_len))
return kernel
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
NOTE:
1) Recommend sqrt_hann window with 2**N frame length, because it
could achieve perfect reconstruction after overlap-add
2) Now haven't consider padding problems yet
"""
def __init__(self,
frame_len,
frame_hop,
window="sqrt_hann",
num_fft=None):
super(STFTBase, self).__init__()
K = init_kernel(
frame_len,
frame_hop,
num_fft=num_fft,
window=window)
self.K = nn.Parameter(K, requires_grad=False)
self.stride = frame_hop
self.window = window
def freeze(self):
self.K.requires_grad = False
def unfreeze(self):
self.K.requires_grad = True
def check_nan(self):
num_nan = torch.sum(torch.isnan(self.K))
if num_nan:
raise RuntimeError(
"detect nan in STFT kernels: {:d}".format(num_nan))
def extra_repr(self):
return "window={0}, stride={1}, requires_grad={2}, kernel_size={3[0]}x{3[2]}".format(
self.window, self.stride, self.K.requires_grad, self.K.shape)
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, **kwargs)
def forward(self, x):
"""
Accept raw waveform and output magnitude and phase
x: input signal, N x 1 x S or N x S
m: magnitude, N x F x T
p: phase, N x F x T
"""
if x.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
x.dim()))
self.check_nan()
# if N x S, reshape N x 1 x S
if x.dim() == 2:
x = torch.unsqueeze(x, 1)
# N x 2F x T
c = F.conv1d(x, self.K, stride=self.stride, padding=0)
# N x F x T
r, i = torch.chunk(c, 2, dim=1)
m = (r ** 2 + i ** 2) ** 0.5
p = torch.atan2(i, r)
return m, p
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, **kwargs)
def forward(self, m, p, squeeze=False):
"""
Accept phase & magnitude and output raw waveform
m, p: N x F x T
s: N x C x S
"""
if p.dim() != m.dim() or p.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
p.dim()))
self.check_nan()
# if F x T, reshape 1 x F x T
if p.dim() == 2:
p = torch.unsqueeze(p, 0)
m = torch.unsqueeze(m, 0)
r = m * torch.cos(p)
i = m * torch.sin(p)
# N x 2F x T
c = torch.cat([r, i], dim=1)
# N x 2F x T
s = F.conv_transpose1d(c, self.K, stride=self.stride, padding=0)
if squeeze:
s = torch.squeeze(s)
return s
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x L
# gln: mean,var N x 1 x 1
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
# N x C x L
if self.elementwise_affine:
x = self.weight * (x - mean) / torch.sqrt(var + self.eps) + self.bias
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine)
def forward(self, x):
# x: N x C x L
# N x L x C
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim):
if norm == 'gln':
return GlobalLayerNorm(dim, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
if norm == 'ln':
return nn.GroupNorm(1, dim)
else:
return nn.BatchNorm1d(dim)
class Conv1D(nn.Module):
'''
Build the Conv1D structure
causal: if True is causal setting
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x):
"""
Input:
x: [B x C x T], B is batch size, T is times
Returns:
x: [B, C, T]
"""
# B x C x T -> B x C_o x T_o
x_conv = self.conv1x1(x)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Conv1D_emb(nn.Module):
'''
Build the Conv1D structure with embedding
causal: if True is causal setting
'''
def __init__(self, in_channels=256, emb_channels=128, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False, fusion='concat', usingEmb=True, usingTsd=False):
super(Conv1D_emb, self).__init__()
self.causal = causal
self.usingTsd = usingTsd
self.usingEmb = usingEmb
self.fusion = fusion # concat, add, multiply
if usingEmb:
if fusion == 'concat':
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels + 1, out_channels, kernel_size=1)
elif fusion == 'add':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
elif fusion == 'multiply':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C']
tsd: [B x 1 x T]
Returns:
x: [B, C, T]
"""
T = x.shape[-1]
emb = torch.unsqueeze(emb, -1)
# B x C' X T
emb = emb.repeat(1, 1, T)
# B x (C + C') X T
if self.usingEmb:
if self.fusion == 'concat':
if not self.usingTsd:
x_ = torch.cat([x, emb], 1)
else:
x_ = torch.cat([x, emb, tsd], 1)
elif self.fusion == 'add':
x_ = self.PReLu1(self.preCNN(emb)) + x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
elif self.fusion == 'multiply':
x_ = self.PReLu1(self.preCNN(emb)) * x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
else:
x_ = x
# B x (C + C') X T -> B x C_o x T_o
x_conv = self.conv1x1(x_)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class ExtractionNet(nn.Module):
'''
TasNet Separation part
LayerNorm -> 1x1Conv -> 1-D Conv .... -> output
'''
def __init__(self, conv1d_block=8, in_channels=64, out_channels=128, emb_channels=128, final_channels=257,
out_sp_channels=512, kernel_size=3, norm='gln', causal=False, num_spks=1, fusion='concat', usingEmb=[True,True,True], usingTsd=[False,False,False]):
super(ExtractionNet, self).__init__()
self.conv1x1 = nn.Conv1d(in_channels, out_channels, 1)
self.conv_block_1_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[0], usingTsd=usingTsd[0])
self.conv_block_1_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_2_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[1], usingTsd=usingTsd[1])
self.conv_block_2_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_3_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[2], usingTsd=usingTsd[2])
self.conv_block_3_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.PReLu = nn.PReLU()
self.norm = select_norm('cln', in_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, num_spks * final_channels, 1)
self.activation = nn.Sigmoid()
self.num_spks = num_spks
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_lists = [Conv1D(
**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_lists)
def _Sequential(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C x T], B is batch size, T is times
Returns:
x: [num_spks, B, N, T]
"""
# B x C x T
x = self.norm(x)
x = self.conv1x1(x)
x = self.PReLu(x)
# B x C x T
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_1_front(x, emb, tsd)
x = self.conv_block_1_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_2_front(x, emb, tsd)
x = self.conv_block_2_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_3_front(x, emb, tsd)
x = self.conv_block_3_back(x)
x = F.dropout(x, p=0.2, training=self.training)
# B x N x T
# print('x ',x.shape)
x = self.PReLu(x)
x = self.end_conv1x1(x)
# print('x ', x.shape)
# assert 1==2
x = self.activation(x)
return x
class TSENet(nn.Module):
'''
TSENet module
N Number of filters in autoencoder
B Number of channels in bottleneck and the residual paths’ 1 × 1-conv blocks
H Number of channels in convolutional blocks
P Kernel size in convolutional blocks
X Number of convolutional blocks in each repeat
R Number of repeats
'''
def __init__(self,
N=512,
B=128,
H=512,
P=3,
X=8,
R=3,
norm="gln",
num_spks=1,
causal=False,
cls_num=50,
nFrameLen=512,
nFrameShift=256, #
nFFT=512,
fusion='concat',
usingEmb=[True,True,True],
usingTsd=[False,False,False],
CNN10_settings=[16000,1024,320,64,50,8000,527,512,128],
fixCNN10=False,
fixTSDNet=True,
pretrainedCNN10=None,
pretrainedTSDNet=None,
threshold=0.5,
):
super(TSENet, self).__init__()
self.device = torch.device('cuda')
self.stft = STFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.istft = iSTFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.front_CNN = nn.Conv1d(nFrameShift+1, N, 1) # (input_c ,output_c,kernel_size), why is nFrameShift+1
self.PReLu = nn.PReLU()
self.extractor = ExtractionNet(conv1d_block=X, in_channels=N,
out_channels=B, final_channels=nFrameShift + 1, out_sp_channels=H, kernel_size=P,
norm=norm, causal=causal, num_spks=num_spks, fusion=fusion,
usingEmb=usingEmb, usingTsd=usingTsd)
self.num_spks = num_spks
self.conditioner = CNN10(sample_rate=CNN10_settings[0], window_size=CNN10_settings[1],
hop_size=CNN10_settings[2], mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5],
classes_num=CNN10_settings[6])
self.cls1 = nn.Linear(CNN10_settings[7], CNN10_settings[8])
self.cls2 = nn.Linear(CNN10_settings[8], cls_num) # classifier head
self.fixCNN10 = fixCNN10
self.fixTSDNet = fixTSDNet
self.pretrainedCNN10 = pretrainedCNN10
self.pretrainedTSDNet = pretrainedTSDNet
self.usingEmb = usingEmb
self.usingTsd = usingTsd
self.threshold = threshold
self.init_conditioner() # init conditioner modual
self.emb_fc = nn.Linear(CNN10_settings[7], CNN10_settings[8]) # produce embedding
if usingTsd[0] or usingTsd[1] or usingTsd[2]: # if we decide to use tsdNet
self.tsdnet = TSDNet(nFrameLen=nFrameLen, nFrameShift=nFrameShift, cls_num=cls_num, CNN10_settings=CNN10_settings)
self.init_TSDNet() # init it
self.epsilon = 1e-20
def init_conditioner(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
if self.fixCNN10: # if fix it
for p in self.conditioner.parameters():
p.requires_grad = False
def init_TSDNet(self):
if self.pretrainedTSDNet:
device = torch.device('cuda')
dicts = torch.load(self.pretrainedTSDNet, map_location=device)
self.tsdnet.load_state_dict(dicts["model_state_dict"])
if self.fixTSDNet:
for p in self.tsdnet.parameters():
p.requires_grad = False
def forward(self, x, ref, label=None, true_mask=None, inf=False):
"""
Input:
x: [B, T], B is batch size, T is times
ref: [B, T], B is batch size, T is times
Returns:
audio: [B, T]
"""
# B x T -> B x C x T
x_magnitude, x_phase = self.stft(x) # get mag spectrum
x_encoder = torch.log(x_magnitude ** 2 + self.epsilon) # bs, 257, 249
if not inf:
label_magnitude, label_phase = self.stft(label) # inf ?
# print('label_magnitude ',label_magnitude.shape)
if self.usingTsd[0] or self.usingTsd[1] or self.usingTsd[2]: # if use tsd
_, _, out_tsd_up = self.tsdnet(x, ref) # produce tsd results
tsdMask = torch.zeros(x_magnitude.shape[0], x_magnitude.shape[2]).cuda() # generate mask
tsdMask[out_tsd_up > self.threshold] = 1. # if large the threshold, set is 1
tsdMask = tsdMask[:, None, :]
else:
tsdMask = None
# B x T -> B x C -> B x C x T
out_enc = self.conditioner(ref) # encode ref audio
emb = self.emb_fc(out_enc) # get embedding
emb = self.PReLu(emb)
x_cls = self.PReLu(self.cls1(out_enc))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1) # produce classification results
x_encoder = self.PReLu(self.front_CNN(x_encoder))
mask = self.extractor(x_encoder, emb, tsdMask) # generate mask
# print('mask ',mask.shape)
# assert 1==2
if true_mask != None:
mask = true_mask*mask
x_ex = x_magnitude * mask
gt = label_magnitude / (x_magnitude + self.epsilon) * torch.cos(label_phase - x_phase) # PSM
gt = torch.clamp(gt, min=0., max=1.) # Truncated to [0,1]
audio_encoder = self.istft(x_ex, x_phase) # reconstruct predict audio
audio = [audio_encoder[:, 0]]
return audio, mask, gt, x_cls
class TSENet_one_hot(nn.Module):
'''
TSENet module
N Number of filters in autoencoder
B Number of channels in bottleneck and the residual paths’ 1 × 1-conv blocks
H Number of channels in convolutional blocks
P Kernel size in convolutional blocks
X Number of convolutional blocks in each repeat
R Number of repeats
'''
def __init__(self,
N=512,
B=128,
H=512,
P=3,
X=8,
R=3,
norm="gln",
num_spks=1,
causal=False,
cls_num=50,
nFrameLen=512,
nFrameShift=256, #
nFFT=512,
fusion='concat',
usingEmb=[True,True,True],
usingTsd=[False,False,False],
CNN10_settings=[16000,1024,320,64,50,8000,527,512,128],
fixCNN10=False,
fixTSDNet=True,
pretrainedCNN10=None,
pretrainedTSDNet=None,
threshold=0.5,
):
super(TSENet_one_hot, self).__init__()
self.device = torch.device('cuda')
self.stft = STFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.istft = iSTFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.front_CNN = nn.Conv1d(nFrameShift+1, N, 1) # (input_c ,output_c,kernel_size), why is nFrameShift+1
self.PReLu = nn.PReLU()
self.extractor = ExtractionNet(conv1d_block=X, in_channels=N,
out_channels=B, final_channels=nFrameShift + 1, out_sp_channels=H, kernel_size=P,
norm=norm, causal=causal, num_spks=num_spks, fusion=fusion,
usingEmb=usingEmb, usingTsd=usingTsd)
self.num_spks = num_spks
# self.conditioner = CNN10(sample_rate=CNN10_settings[0], window_size=CNN10_settings[1],
# hop_size=CNN10_settings[2], mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5],
# classes_num=CNN10_settings[6])
self.conditioner_one_hot = nn.Embedding(cls_num,128)
# self.cls1 = nn.Linear(CNN10_settings[7], CNN10_settings[8])
# self.cls2 = nn.Linear(CNN10_settings[8], cls_num) # classifier head
self.fixCNN10 = fixCNN10
self.fixTSDNet = fixTSDNet
self.pretrainedCNN10 = pretrainedCNN10
self.pretrainedTSDNet = pretrainedTSDNet
self.usingEmb = usingEmb
self.usingTsd = usingTsd
self.threshold = threshold
# self.init_conditioner() # init conditioner modual
self.emb_fc = nn.Linear(CNN10_settings[7], CNN10_settings[8]) # produce embedding
if usingTsd[0] or usingTsd[1] or usingTsd[2]: # if we decide to use tsdNet
self.tsdnet = TSDNet_one_hot(nFrameLen=nFrameLen, nFrameShift=nFrameShift, cls_num=cls_num, CNN10_settings=CNN10_settings)
self.init_TSDNet() # init it
self.epsilon = 1e-20
# def init_conditioner(self):
# if self.pretrainedCNN10:
# device = torch.device('cuda')
# checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
# self.conditioner.load_state_dict(checkpoint['model'])
# if self.fixCNN10: # if fix it
# for p in self.conditioner.parameters():
# p.requires_grad = False
def init_TSDNet(self):
if self.pretrainedTSDNet:
device = torch.device('cuda')
dicts = torch.load(self.pretrainedTSDNet, map_location=device)
self.tsdnet.load_state_dict(dicts["model_state_dict"])
if self.fixTSDNet:
for p in self.tsdnet.parameters():
p.requires_grad = False
def forward(self, x, ref,one_hot, label=None,true_mask=None, inf=False):
"""
Input:
x: [B, T], B is batch size, T is times
ref: [B, T], B is batch size, T is times
Returns:
audio: [B, T]
"""
# B x T -> B x C x T
x_magnitude, x_phase = self.stft(x) # get mag spectrum
x_encoder = torch.log(x_magnitude ** 2 + self.epsilon) # bs, 257, 249
if not inf:
label_magnitude, label_phase = self.stft(label) # inf ?
if self.usingTsd[0] or self.usingTsd[1] or self.usingTsd[2]: # if use tsd
_, _, out_tsd_up = self.tsdnet(x, ref) # produce tsd results
tsdMask = torch.zeros(x_magnitude.shape[0], x_magnitude.shape[2]).cuda() # generate mask
tsdMask[out_tsd_up > self.threshold] = 1. # if large the threshold, set is 1
tsdMask = tsdMask[:, None, :]
else:
tsdMask = None
# tsdMask = tsdMask[:, None, :]
# B x T -> B x C -> B x C x T
# out_enc = self.conditioner(ref) # encode ref audio
# emb = self.emb_fc(out_enc) # get embedding
# emb = self.PReLu(emb)
# # print('emb ',emb.shape)
# x_cls = self.PReLu(self.cls1(out_enc))
# x_cls = F.dropout(x_cls, p=0.5, training=self.training)
# x_cls = self.cls2(x_cls)
# x_cls = F.log_softmax(x_cls, dim=-1) # produce classification results
x_cls = None
# print('one_hot ',one_hot.shape)
emb_one_hot = self.conditioner_one_hot(one_hot)
# print('emb_one_hot ',emb_one_hot.shape)
x_encoder = self.PReLu(self.front_CNN(x_encoder))
mask = self.extractor(x_encoder, emb_one_hot, tsdMask) # generate mask
# print('mask ',mask.shape)
# print('true_mask ',true_mask.shape)
# true_mask = true_mask.unsqueeze(2)
# true_mask = true_mask.repeat(1,1,257)
# true_mask = true_mask.transpose(1,2)
# if true_mask != None:
# mask = true_mask*mask
x_ex = x_magnitude * mask
gt = label_magnitude / (x_magnitude + self.epsilon) * torch.cos(label_phase - x_phase) # PSM
gt = torch.clamp(gt, min=0., max=1.) # Truncated to [0,1]
audio_encoder = self.istft(x_ex, x_phase) # reconstruct predict audio
audio = [audio_encoder[:, 0]]
return audio, mask, gt, x_cls
class TSDNet_one_hot(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet_one_hot, self).__init__()
self.PReLu = nn.PReLU()
# self.conditioner = CNN10(sample_rate=16000, window_size=1024,
# hop_size=320, mel_bins=64, fmin=50, fmax=8000,
# classes_num=527)
# self.cls1 = nn.Linear(128, 128)
# self.cls2 = nn.Linear(128, cls_num)
# self.pretrainedCNN10 = pretrainedCNN10
# self.init_ref()
# self.emb_fc = nn.Linear(512, 128)
self.conditioner_one_hot = nn.Embedding(cls_num,128)
# print(CNN10_settings)
self.tsd = TSD(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
# print('self.tsd ',self.tsd)
# assert 1==2
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def forward(self, x, ref,onehot=None):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
# out_enc = self.conditioner(ref)
# emb = self.emb_fc(out_enc)
# emb = self.PReLu(emb)
emb_onehot = self.conditioner_one_hot(onehot)
# print('emb_onehot ',emb_onehot.shape)
# assert 1==2
out_tsd_up, out_tsd_time,sim_cos = self.tsd(x, emb_onehot)
# x_cls = self.PReLu(self.cls1(emb))
# x_cls = F.dropout(x_cls, p=0.5, training=self.training)
# x_cls = self.cls2(x_cls)
# x_cls = F.log_softmax(x_cls, dim=-1)
x_cls = torch.zeros(1).cuda()
return x_cls, out_tsd_time, out_tsd_up,sim_cos
class TSDNet(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=50, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet, self).__init__()
self.PReLu = nn.PReLU()
self.conditioner = CNN10(sample_rate=16000, window_size=1024,
hop_size=320, mel_bins=64, fmin=50, fmax=8000,
classes_num=527)
self.cls1 = nn.Linear(128, 128)
self.cls2 = nn.Linear(128, cls_num)
self.pretrainedCNN10 = pretrainedCNN10
self.init_ref()
self.emb_fc = nn.Linear(512, 128)
self.tsd = TSD(sample_rate=CNN10_settings[0], window_size=CNN10_settings[1],
hop_size=CNN10_settings[2], mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def forward(self, x, ref):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
out_enc = self.conditioner(ref)
emb = self.emb_fc(out_enc)
emb = self.PReLu(emb)
out_tsd_up, out_tsd_time = self.tsd(x, emb)
x_cls = self.PReLu(self.cls1(emb))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
return x_cls, out_tsd_time, out_tsd_up
if __name__ == "__main__":
conv = Conv_TasNet().cuda()
# encoder = Encoder(16, 512)
x = torch.randn(4, 64000).cuda()
label = torch.randn(4, 64000).cuda()
ref = torch.randn(4, 64000).cuda()
audio, lps, lab = conv(x, ref, label)
print(audio[0].shape)
# print("{:.3f}".format(check_parameters(conv)))
| 32,316 | 39.497494 | 165 | py |
Tim-TSENet | Tim-TSENet-main/TSENET/model/tsd.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
def init_weights(m):
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Block2D(nn.Module):
def __init__(self, cin, cout, kernel_size=3, padding=1):
super().__init__()
self.block = nn.Sequential(
nn.BatchNorm2d(cin),
nn.Conv2d(cin,
cout,
kernel_size=kernel_size,
padding=padding,
bias=False),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
def forward(self, x):
return self.block(x)
class TSD(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(256, 256, bidirectional=True, batch_first=True)
self.fc = nn.Linear(512, 256)
self.outputlayer = nn.Linear(256, 2)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_up[:,:,0], decision_time[:,:,0]
| 5,798 | 33.517857 | 109 | py |
Tim-TSENet | Tim-TSENet-main/generate_dataset/generate_data_fsd_kaggle2.py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
import random
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
import soundfile as sf
import math
import pandas as pd
from pathlib import Path
event_ls = ["Acoustic_guitar", "Applause", "Bark", "Bass_drum",
"Burping_or_eructation", "Bus", "Cello", "Chime",
"Clarinet", "Computer_keyboard", "Cough", "Cowbell",
"Double_bass", "Drawer_open_or_close", "Electric_piano",
"Fart", "Finger_snapping", "Fireworks", "Flute", "Glockenspiel",
"Gong", "Gunshot_or_gunfire", "Harmonica", "Hi-hat", "Keys_jangling",
"Knock", "Laughter", "Meow", "Microwave_oven", "Oboe", "Saxophone",
"Scissors", "Shatter", "Snare_drum", "Squeak", "Tambourine", "Tearing",
"Telephone", "Trumpet", "Violin_or_fiddle", "Writing"]
event_to_id = {label : i for i, label in enumerate(event_ls)}
print(event_to_id)
def get_file_label_dict(csv_path):
#strong_csv = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSD50K.ground_truth/dev.csv'
print('strong_csv ',csv_path)
DF_strong = pd.read_csv(csv_path,sep=',',usecols=[0,1])
file_id = DF_strong['fname']
labels = DF_strong['label']
filename_ls = []
label_ls = []
for fname in file_id:
filename_ls.append(fname)
for label in labels:
label_ls.append(label)
dict_ls = {}
for i in range(len(filename_ls)):
dict_ls[filename_ls[i]] = str(event_to_id[label_ls[i]])
return dict_ls
def region_selection(top_result_mat):
region_index = np.zeros((top_result_mat.shape[-1], 2), dtype=np.int32)
max_value = np.zeros(top_result_mat.shape[-1])
for i in range(top_result_mat.shape[-1]):
max_index = np.argmax(top_result_mat[:, i])
max_value[i] = np.max(top_result_mat[:, i])
if max_index < 100:
max_index = 100
elif max_index > 900:
max_index = 900
l_index = max_index - 100
r_index = max_index + 100
region_index[i, 0] = l_index
region_index[i, 1] = r_index
return region_index, max_value
def check_files():
train_pth = '/apdcephfs/private_helinwang/tsss/ft_local/balanced_train_segments'
eval_pth = '/apdcephfs/private_helinwang/tsss/ft_local/eval_segments'
train_lst = []
eval_lst = []
for root, dirs, files in os.walk(train_pth):
for name in files:
train_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(eval_pth):
for name in files:
eval_lst.append(os.path.join(root, name))
for file in train_lst + eval_lst:
try:
(waveform, sr) = librosa.core.load(file, mono=True)
except:
print('{} Read Error'.format(file))
os.system('rm -rf '+ file)
else:
if waveform.shape[0] != int(sr * 10):
print('{} Wave Length Error: {} samples. Fix it.'.format(file, waveform.shape[0]))
if waveform.shape[0] > int(sr * 10):
waveform = waveform[:int(sr * 10)]
else:
waveform = np.concatenate((waveform, [0.] * (int(sr * 10) - waveform.shape[0])),0)
sf.write(file, waveform, sr, subtype='PCM_24')
else:
print('{} No Error.'.format(file))
print('Finished Checkout!')
def generate_mixed_data(args):
sample_rate = args.sample_rate
duration = args.duration
sample_num = int(sample_rate*duration)
num1 = int(sample_rate*2)
train_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/train'
test_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/test'
val_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/val'
train_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/train.txt'
test_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/test.txt'
val_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/val.txt'
train_data_pth = '/apdcephfs/private_helinwang/tsss/tsss_data/train'
test_data_pth = '/apdcephfs/private_helinwang/tsss/tsss_data/test'
train_lst = []
test_lst = []
for root, dirs, files in os.walk(train_data_pth):
for name in files:
train_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(test_data_pth):
for name in files:
test_lst.append(os.path.join(root, name))
train_data_num = len(train_lst)
test_data_num = len(test_lst)
# generate train mixed data
rs1 = random.sample(train_lst, train_data_num)
rs1 = rs1 * 2
rs2 = random.sample(train_lst, train_data_num) + random.sample(train_lst, train_data_num)
flag_num = 1
for i in range(len(rs1)):
# resample
(waveform1, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[i], sr=sample_rate, mono=True)
# get random index
temp = random.random()
if temp > 0.5:
index1 = random.randint(0, (sample_num - num1 - 1) // 2)
index2 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
else:
index2 = random.randint(0, (sample_num - num1 - 1) // 2)
index1 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
wave = np.zeros(sample_num)
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
# check and fix length
if waveform1.shape[0] > num1:
waveform1 = waveform1[:num1]
elif waveform1.shape[0] < num1:
waveform1 = np.concatenate((waveform1, [0.] * (num1 - waveform1.shape[0])), 0)
if waveform2.shape[0] > num1:
waveform2 = waveform2[:num1]
elif waveform2.shape[0] < num1:
waveform2 = np.concatenate((waveform2, [0.] * (num1 - waveform2.shape[0])), 0)
# energy normalization
wave1[index1:(index1 + num1)] = waveform1
wave2[index2:(index2 + num1)] = waveform2
wave2 = wave2*np.sum(wave1**2)/np.sum(wave2**2)
wave = wave1 + wave2
# waveform = waveform1 + waveform2
file_name = 'train_' + str(flag_num) + '.wav'
file_name_a = 'train_' + str(flag_num) + '_a.wav'
file_name_b = 'train_' + str(flag_num) + '_b.wav'
file_name_re = 'train_' + str(flag_num) + '_re.wav'
file_pth = train_dir + '/' + file_name
file_pth_a = train_dir + '/' + file_name_a
file_pth_b = train_dir + '/' + file_name_b
file_pth_re = train_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_24')
sf.write(file_pth_a, wave1, sample_rate, subtype='PCM_24')
sf.write(file_pth_b, wave2, sample_rate, subtype='PCM_24')
sf.write(file_pth_re, waveform1, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(file_pth))
with open(train_txt, "a+") as f:
f.write(file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[i].split('/')[-1] + '\t' + rs1[i].split('/')[-1] + '\n')
flag_num += 1
# generate val mixed data
rs1 = random.sample(train_lst, int(0.25 * train_data_num))
rs2 = random.sample(train_lst, int(0.25 * train_data_num))
flag_num = 1
for i in range(len(rs1)):
# resample
(waveform1, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[i], sr=sample_rate, mono=True)
# get random index
temp = random.random()
if temp > 0.5:
index1 = random.randint(0, (sample_num - num1 - 1) // 2)
index2 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
else:
index2 = random.randint(0, (sample_num - num1 - 1) // 2)
index1 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
wave = np.zeros(sample_num)
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
# check and fix length
if waveform1.shape[0] > num1:
waveform1 = waveform1[:num1]
elif waveform1.shape[0] < num1:
waveform1 = np.concatenate((waveform1, [0.] * (num1 - waveform1.shape[0])), 0)
if waveform2.shape[0] > num1:
waveform2 = waveform2[:num1]
elif waveform2.shape[0] < num1:
waveform2 = np.concatenate((waveform2, [0.] * (num1 - waveform2.shape[0])), 0)
# energy normalization
wave1[index1:(index1 + num1)] = waveform1
wave2[index2:(index2 + num1)] = waveform2
wave2 = wave2 * np.sum(wave1 ** 2) / np.sum(wave2 ** 2)
wave = wave1 + wave2
# waveform = waveform1 + waveform2
file_name = 'val_' + str(flag_num) + '.wav'
file_name_a = 'val_' + str(flag_num) + '_a.wav'
file_name_b = 'val_' + str(flag_num) + '_b.wav'
file_name_re = 'val_' + str(flag_num) + '_re.wav'
file_pth = val_dir + '/' + file_name
file_pth_a = val_dir + '/' + file_name_a
file_pth_b = val_dir + '/' + file_name_b
file_pth_re = val_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_24')
sf.write(file_pth_a, wave1, sample_rate, subtype='PCM_24')
sf.write(file_pth_b, wave2, sample_rate, subtype='PCM_24')
sf.write(file_pth_re, waveform1, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(file_pth))
with open(val_txt, "a+") as f:
f.write(file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[i].split('/')[-1] + '\t' + rs1[i].split('/')[-1] + '\n')
flag_num += 1
# generate test mixed data
rs1 = random.sample(test_lst, int(0.25 * test_data_num))
rs2 = random.sample(test_lst, int(0.25 * test_data_num))
flag_num = 1
for i in range(len(rs1)):
# resample
(waveform1, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[i], sr=sample_rate, mono=True)
# get random index
temp = random.random()
if temp > 0.5:
index1 = random.randint(0, (sample_num - num1 - 1) // 2)
index2 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
else:
index2 = random.randint(0, (sample_num - num1 - 1) // 2)
index1 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
wave = np.zeros(sample_num)
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
# check and fix length
if waveform1.shape[0] > num1:
waveform1 = waveform1[:num1]
elif waveform1.shape[0] < num1:
waveform1 = np.concatenate((waveform1, [0.] * (num1 - waveform1.shape[0])), 0)
if waveform2.shape[0] > num1:
waveform2 = waveform2[:num1]
elif waveform2.shape[0] < num1:
waveform2 = np.concatenate((waveform2, [0.] * (num1 - waveform2.shape[0])), 0)
# energy normalization
wave1[index1:(index1 + num1)] = waveform1
wave2[index2:(index2 + num1)] = waveform2
wave2 = wave2 * np.sum(wave1 ** 2) / np.sum(wave2 ** 2)
wave = wave1 + wave2
# waveform = waveform1 + waveform2
file_name = 'test_' + str(flag_num) + '.wav'
file_name_a = 'test_' + str(flag_num) + '_a.wav'
file_name_b = 'test_' + str(flag_num) + '_b.wav'
file_name_re = 'test_' + str(flag_num) + '_re.wav'
file_pth = test_dir + '/' + file_name
file_pth_a = test_dir + '/' + file_name_a
file_pth_b = test_dir + '/' + file_name_b
file_pth_re = test_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_24')
sf.write(file_pth_a, wave1, sample_rate, subtype='PCM_24')
sf.write(file_pth_b, wave2, sample_rate, subtype='PCM_24')
sf.write(file_pth_re, waveform1, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(file_pth))
with open(test_txt, "a+") as f:
f.write(file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[i].split('/')[-1] + '\t' + rs1[i].split('/')[-1] + '\n')
flag_num += 1
print('Finished Mixed Data!')
def generate_mixed_offset_data(args):
sample_rate = args.sample_rate
sample_num = int(sample_rate * 5)
num1 = int(sample_rate * 3)
test_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data'
test_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data/test.txt'
test_data_pth = '/apdcephfs/private_helinwang/tsss/ft_local/ESC-50-master/audio'
test_lst = []
for root, dirs, files in os.walk(test_data_pth):
for name in files:
test_lst.append(os.path.join(root, name))
flag_num = 1
test_data_num = len(test_lst)
rs1 = random.sample(test_lst, test_data_num) + random.sample(test_lst, test_data_num) + random.sample(test_lst, test_data_num) +random.sample(test_lst, test_data_num)
for i in range(len(rs1)):
cls1 = rs1[i].split('-')[-1]
while True:
rs2 = random.sample(test_lst, 1)
cls2 = rs2[0].split('-')[-1]
if cls2 == cls1:
break
while True:
rs3 = random.sample(test_lst, 1)
cls3 = rs3[0].split('-')[-1]
if cls3 != cls1:
break
(waveform1, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[0], sr=sample_rate, mono=True)
(waveform3, _) = librosa.core.load(rs3[0], sr=sample_rate, mono=True)
# get random index
temp = random.random()
if temp > 0.5:
index1 = random.randint(0, (sample_num - num1 - 1) // 2)
index2 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
else:
index2 = random.randint(0, (sample_num - num1 - 1) // 2)
index1 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
wave = np.zeros(sample_num)
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
# energy normalization
wave1[index1:(index1 + num1)] = waveform1[index1:(index1 + num1)]
wave2[index2:(index2 + num1)] = waveform3[index2:(index2 + num1)]
wave2 = wave2 * np.sum(wave1 ** 2) / np.sum(wave2 ** 2)
wave = wave1 + wave2
# waveform = waveform1 + waveform3
file_name = 'test_offset_' + str(flag_num) + '.wav'
file_name_a = 'test_offset_' + str(flag_num) + '_a.wav'
file_name_b = 'test_offset_' + str(flag_num) + '_b.wav'
file_name_re = 'test_offset_' + str(flag_num) + '_re.wav'
file_pth = test_dir + '/' + file_name
file_pth_a = test_dir + '/' + file_name_a
file_pth_b = test_dir + '/' + file_name_b
file_pth_re = test_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_24')
sf.write(file_pth_a, wave1, sample_rate, subtype='PCM_24')
sf.write(file_pth_b, wave2, sample_rate, subtype='PCM_24')
sf.write(file_pth_re, waveform2, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(file_pth))
with open(test_txt, "a+") as f:
f.write(file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs3[0].split('/')[-1]+ '\t' + rs2[0].split('/')[-1] + '\n')
flag_num += 1
print('Finished Mixed Offset Data!')
def data_pre(audio, audio_length, fs, audio_skip):
stride = round(audio_skip * fs / 2)
loop = round((audio_length * fs) // stride - 1)
i = 0
out = audio
while i < loop:
win_data = out[i*stride: (i+2)*stride]
maxamp = np.max(np.abs(win_data))
if maxamp < 0.0005:
loop = loop - 2
out[i*stride: (loop+1)*stride] = out[(i+2)*stride: (loop+3)*stride]
else:
i = i + 1
length = (audio_length * fs) // stride - loop - 1
if length == 0:
return out
else:
return out[:(loop + 1) * stride]
# out of domain
def generate_data_train(args):
sample_rate = args.sample_rate
sample_num = round(sample_rate * 10)
csv_path = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.meta/train_post_competition.csv'
train_dir = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/train'
train_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/train.txt'
data_pth = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.audio_train'
back_pth = '/apdcephfs/private_donchaoyang/tsss/ft_local/TAU-urban-acoustic-scenes-2019-development/audio'
data_lst = []
back_lst = []
name_dict = get_file_label_dict(csv_path)# get dict {filename: class_name}
for root, dirs, files in os.walk(data_pth): # 获得目录下所有的音频文件
for name in files:
data_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(back_pth):
for name in files:
back_lst.append(os.path.join(root, name))
flag_num = 1
data_num = len(data_lst)
print(data_num)
mix_lst = [1,2,3]
rs1 = random.sample(data_lst, data_num)+\
random.sample(data_lst, data_num)+\
random.sample(data_lst, data_num)+\
random.sample(data_lst, data_num)+\
random.sample(data_lst, data_num) # 每个音频都有5次成为目标声音的机会
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+ \
# random.sample(data_lst, data_num) + \
# random.sample(data_lst, data_num) + \
# random.sample(data_lst, data_num) + \
# random.sample(data_lst, data_num) + \
# random.sample(data_lst, data_num)
# print(len(rs1))
# assert 1==2
for i in range(len(rs1)):
real_name = Path(rs1[i]).name
cls1 = name_dict[real_name] # get class label
while True:
rs2 = random.sample(data_lst, 1) # 随机选择一个audio
rs2_real = Path(rs2[0]).name
cls2 = name_dict[rs2_real]
if cls2 == cls1 and rs2_real != real_name: # 保证不选到相同的音频
break
while True:
rs3 = random.sample(data_lst, 1)
rs3_real = Path(rs3[0]).name
cls3 = name_dict[rs3_real]
if cls3 != cls1:
break
while True:
rs4 = random.sample(data_lst, 1)
rs4_real = Path(rs4[0]).name
cls4 = name_dict[rs4_real]
if cls4 != cls1:
break
while True:
rs5 = random.sample(data_lst, 1)
rs5_real = Path(rs5[0]).name
cls5 = name_dict[rs5_real]
if cls5 != cls1:
break
rs6 = random.sample(back_lst, 1)
rs7 = random.sample(mix_lst, 1)
mix_num = rs7[0]
(waveform1_, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True) # target sound
(waveform2, _) = librosa.core.load(rs2[0], sr=sample_rate, mono=True)
(waveform3_, _) = librosa.core.load(rs3[0], sr=sample_rate, mono=True)
(waveform4_, _) = librosa.core.load(rs4[0], sr=sample_rate, mono=True)
(waveform5_, _) = librosa.core.load(rs5[0], sr=sample_rate, mono=True)
(background, _) = librosa.core.load(rs6[0], sr=sample_rate, mono=True)
if waveform1_.shape[0] >= sample_num:
waveform1_ = waveform1_[:sample_num-100]
if waveform2.shape[0] > sample_num:
waveform2 = waveform2[:sample_num]
if waveform3_.shape[0] >= sample_num:
waveform3_ = waveform3_[:sample_num-100]
if waveform4_.shape[0] >= sample_num:
waveform4_ = waveform4_[:sample_num-100]
if waveform5_.shape[0] >= sample_num:
waveform5_ = waveform5_[:sample_num-100]
if background.shape[0] > sample_num:
background = background[:sample_num]
elif background.shape[0] < sample_num:
background = np.concatenate((background, [0.] * (sample_num - background.shape[0])), 0)
waveform1 = data_pre(waveform1_, audio_length=int(waveform1_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform3 = data_pre(waveform3_, audio_length=int(waveform3_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform4 = data_pre(waveform4_, audio_length=int(waveform4_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform5 = data_pre(waveform5_, audio_length=int(waveform5_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
num1 = waveform1.shape[0]
num3 = waveform3.shape[0]
num4 = waveform4.shape[0]
num5 = waveform5.shape[0]
index1 = random.randint(0, sample_num - num1 - 1) # 在0-10s中,随机选择起始位置,将声音放进去
index3 = random.randint(0, sample_num - num3 - 1)
index4 = random.randint(0, sample_num - num4 - 1)
index5 = random.randint(0, sample_num - num5 - 1)
onset = 10.0 * index1 / sample_num
offset = 10.0 * (index1 + num1) / sample_num
onset3 = 10.0 * index3 / sample_num
offset3 = 10.0 * (index3 + num3) / sample_num
onset4 = 10.0 * index4 / sample_num
offset4 = 10.0 * (index4 + num4) / sample_num
onset5 = 10.0 * index5 / sample_num
offset5 = 10.0 * (index5 + num5) / sample_num
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
wave3 = np.zeros(sample_num)
wave4 = np.zeros(sample_num)
wave5 = np.zeros(sample_num)
snr = 10. ** (float(random.uniform(-5, 10)) / 20.) # 随机生成一定的信噪比
waveform3 = waveform3 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform3) ** 2)) * snr) # add snr
waveform4 = waveform4 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform4) ** 2)) * snr)
waveform5 = waveform5 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform5) ** 2)) * snr)
wave1[index1:index1 + num1] = waveform1
if num1 == 0:
continue
wave3[index3:index3 + num3] = waveform3
wave4[index4:index4 + num4] = waveform4
wave5[index5:index5 + num5] = waveform5
wave2[:waveform2.shape[0]] = waveform2
snr2 = 10. ** (float(random.uniform(5, 20)) / 20.)
background = background * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(background) ** 2)) * snr2) # 背景声音也加入一个随机的信噪比
if mix_num == 1: # 一种干扰声音
wave = background + wave1 + wave3
elif mix_num == 2: # 2
wave = background + wave1 + wave3 + wave4
else: # 3
wave = background + wave1 + wave3 + wave4 + wave5
file_name = 'train_' + str(flag_num) + '.wav'
file_name_lab = 'train_' + str(flag_num) + '_lab.wav'
file_name_re = 'train_' + str(flag_num) + '_re.wav'
file_pth = train_dir + '/' + file_name
file_pth_lab = train_dir + '/' + file_name_lab
file_pth_re = train_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_16') # save mixture
sf.write(file_pth_lab, wave1, sample_rate, subtype='PCM_16') # save clean audio
sf.write(file_pth_re, wave2, sample_rate, subtype='PCM_16') # save reference audio
print('Save to: {}'.format(file_pth))
if mix_num == 1: # 保存基本信息
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\n')
elif mix_num == 2:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\n')
else:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\t' +
str(onset5) + '\t' + str(offset5) + '\t' + cls5 + '\n')
flag_num += 1
print('Finished Training Data Generation!')
def generate_data_val(args):
sample_rate = args.sample_rate
sample_num = round(sample_rate * 10)
csv_path = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.meta/test_post_competition_scoring_clips.csv'
train_dir = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/val'
train_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/val.txt'
data_pth = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.audio_test'
back_pth = '/apdcephfs/private_donchaoyang/tsss/ft_local/TAU-urban-acoustic-scenes-2019-development/audio'
data_lst = []
back_lst = []
name_dict = get_file_label_dict(csv_path)
# print(name_dict)
# assert 1==2
for root, dirs, files in os.walk(data_pth):
for name in files:
data_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(back_pth):
for name in files:
back_lst.append(os.path.join(root, name))
flag_num = 1
data_num = len(data_lst)
mix_lst = [1,2,3]
rs1 = random.sample(data_lst, data_num)
for i in range(len(rs1)):
real_name = Path(rs1[i]).name
cls1 = name_dict[real_name] # get class label
while True:
rs2 = random.sample(data_lst, 1)
real_name2 = Path(rs2[0]).name
cls2 = name_dict[real_name2] # get class label
if cls2 == cls1 and real_name2 != real_name:
break
while True:
rs3 = random.sample(data_lst, 1)
real_name3 = Path(rs3[0]).name
cls3 = name_dict[real_name3] # get class label
if cls3 != cls1:
break
while True:
rs4 = random.sample(data_lst, 1)
real_name4 = Path(rs4[0]).name
cls4 = name_dict[real_name4] # get class label
if cls4 != cls1:
break
while True:
rs5 = random.sample(data_lst, 1)
real_name5 = Path(rs5[0]).name
cls5 = name_dict[real_name5] # get class label
if cls5 != cls1:
break
rs6 = random.sample(back_lst, 1)
rs7 = random.sample(mix_lst, 1)
mix_num = rs7[0]
(waveform1_, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[0], sr=sample_rate, mono=True)
(waveform3_, _) = librosa.core.load(rs3[0], sr=sample_rate, mono=True)
(waveform4_, _) = librosa.core.load(rs4[0], sr=sample_rate, mono=True)
(waveform5_, _) = librosa.core.load(rs5[0], sr=sample_rate, mono=True)
(background, _) = librosa.core.load(rs6[0], sr=sample_rate, mono=True)
if waveform1_.shape[0] >= sample_num:
waveform1_ = waveform1_[:sample_num-100]
if waveform2.shape[0] > sample_num:
waveform2 = waveform2[:sample_num]
if waveform3_.shape[0] >= sample_num:
waveform3_ = waveform3_[:sample_num-100]
if waveform4_.shape[0] >= sample_num:
waveform4_ = waveform4_[:sample_num-100]
if waveform5_.shape[0] >= sample_num:
waveform5_ = waveform5_[:sample_num-100]
if background.shape[0] > sample_num:
background = background[:sample_num]
elif background.shape[0] < sample_num:
background = np.concatenate((background, [0.] * (sample_num - background.shape[0])), 0)
waveform1 = data_pre(waveform1_, audio_length=int(waveform1_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform3 = data_pre(waveform3_, audio_length=int(waveform3_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform4 = data_pre(waveform4_, audio_length=int(waveform4_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform5 = data_pre(waveform5_, audio_length=int(waveform5_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
num1 = waveform1.shape[0]
num3 = waveform3.shape[0]
num4 = waveform4.shape[0]
num5 = waveform5.shape[0]
index1 = random.randint(0, sample_num - num1 - 1)
index3 = random.randint(0, sample_num - num3 - 1)
index4 = random.randint(0, sample_num - num4 - 1)
index5 = random.randint(0, sample_num - num5 - 1)
onset = 10.0 * index1 / sample_num
offset = 10.0 * (index1 + num1) / sample_num
onset3 = 10.0 * index3 / sample_num
offset3 = 10.0 * (index3 + num3) / sample_num
onset4 = 10.0 * index4 / sample_num
offset4 = 10.0 * (index4 + num4) / sample_num
onset5 = 10.0 * index5 / sample_num
offset5 = 10.0 * (index5 + num5) / sample_num
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
wave3 = np.zeros(sample_num)
wave4 = np.zeros(sample_num)
wave5 = np.zeros(sample_num)
snr = 10. ** (float(random.uniform(-5, 10)) / 20.)
waveform3 = waveform3 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform3) ** 2)) * snr)
waveform4 = waveform4 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform4) ** 2)) * snr)
waveform5 = waveform5 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform5) ** 2)) * snr)
wave1[index1:index1 + num1] = waveform1
if num1 == 0:
continue
wave3[index3:index3 + num3] = waveform3
wave4[index4:index4 + num4] = waveform4
wave5[index5:index5 + num5] = waveform5
wave2[:waveform2.shape[0]] = waveform2
snr2 = 10. ** (float(random.uniform(5, 20)) / 20.)
background = background * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(background) ** 2)) * snr2)
if mix_num == 1:
wave = background + wave1 + wave3
elif mix_num == 2:
wave = background + wave1 + wave3 + wave4
else:
wave = background + wave1 + wave3 + wave4 + wave5
file_name = 'val_' + str(flag_num) + '.wav'
file_name_lab = 'val_' + str(flag_num) + '_lab.wav'
file_name_re = 'val_' + str(flag_num) + '_re.wav'
file_pth = train_dir + '/' + file_name
file_pth_lab = train_dir + '/' + file_name_lab
file_pth_re = train_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_16')
sf.write(file_pth_lab, wave1, sample_rate, subtype='PCM_16')
sf.write(file_pth_re, wave2, sample_rate, subtype='PCM_16')
print('Save to: {}'.format(file_pth))
if mix_num == 1:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\n')
elif mix_num == 2:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\n')
else:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\t' +
str(onset5) + '\t' + str(offset5) + '\t' + cls5 + '\n')
flag_num += 1
print('Finished Val Data Generation!')
def generate_data_test(args):
sample_rate = args.sample_rate
sample_num = round(sample_rate * 10)
csv_path = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.meta/test_post_competition_scoring_clips.csv'
train_dir = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/test'
train_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/test.txt'
data_pth = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.audio_test'
back_pth = '/apdcephfs/private_donchaoyang/tsss/ft_local/TAU-urban-acoustic-scenes-2019-development/audio'
data_lst = []
back_lst = []
name_dict = get_file_label_dict(csv_path)
# print(name_dict)
# assert 1==2
for root, dirs, files in os.walk(data_pth):
for name in files:
data_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(back_pth):
for name in files:
back_lst.append(os.path.join(root, name))
flag_num = 1
data_num = len(data_lst)
mix_lst = [1,2,3]
rs1 = random.sample(data_lst, data_num)
for i in range(len(rs1)):
real_name = Path(rs1[i]).name
cls1 = name_dict[real_name] # get class label
while True:
rs2 = random.sample(data_lst, 1)
real_name2 = Path(rs2[0]).name
cls2 = name_dict[real_name2] # get class label
if cls2 == cls1 and real_name2 != real_name:
break
while True:
rs3 = random.sample(data_lst, 1)
real_name3 = Path(rs3[0]).name
cls3 = name_dict[real_name3] # get class label
if cls3 != cls1:
break
while True:
rs4 = random.sample(data_lst, 1)
real_name4 = Path(rs4[0]).name
cls4 = name_dict[real_name4] # get class label
if cls4 != cls1:
break
while True:
rs5 = random.sample(data_lst, 1)
real_name5 = Path(rs5[0]).name
cls5 = name_dict[real_name5] # get class label
if cls5 != cls1:
break
rs6 = random.sample(back_lst, 1)
rs7 = random.sample(mix_lst, 1)
mix_num = rs7[0]
(waveform1_, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[0], sr=sample_rate, mono=True)
(waveform3_, _) = librosa.core.load(rs3[0], sr=sample_rate, mono=True)
(waveform4_, _) = librosa.core.load(rs4[0], sr=sample_rate, mono=True)
(waveform5_, _) = librosa.core.load(rs5[0], sr=sample_rate, mono=True)
(background, _) = librosa.core.load(rs6[0], sr=sample_rate, mono=True)
if waveform1_.shape[0] >= sample_num:
waveform1_ = waveform1_[:sample_num-100]
if waveform2.shape[0] > sample_num:
waveform2 = waveform2[:sample_num]
if waveform3_.shape[0] >= sample_num:
waveform3_ = waveform3_[:sample_num-100]
if waveform4_.shape[0] >= sample_num:
waveform4_ = waveform4_[:sample_num-100]
if waveform5_.shape[0] >= sample_num:
waveform5_ = waveform5_[:sample_num-100]
if background.shape[0] > sample_num:
background = background[:sample_num]
elif background.shape[0] < sample_num:
background = np.concatenate((background, [0.] * (sample_num - background.shape[0])), 0)
waveform1 = data_pre(waveform1_, audio_length=int(waveform1_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform3 = data_pre(waveform3_, audio_length=int(waveform3_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform4 = data_pre(waveform4_, audio_length=int(waveform4_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform5 = data_pre(waveform5_, audio_length=int(waveform5_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
num1 = waveform1.shape[0]
num3 = waveform3.shape[0]
num4 = waveform4.shape[0]
num5 = waveform5.shape[0]
index1 = random.randint(0, sample_num - num1 - 1)
index3 = random.randint(0, sample_num - num3 - 1)
index4 = random.randint(0, sample_num - num4 - 1)
index5 = random.randint(0, sample_num - num5 - 1)
onset = 10.0 * index1 / sample_num
offset = 10.0 * (index1 + num1) / sample_num
onset3 = 10.0 * index3 / sample_num
offset3 = 10.0 * (index3 + num3) / sample_num
onset4 = 10.0 * index4 / sample_num
offset4 = 10.0 * (index4 + num4) / sample_num
onset5 = 10.0 * index5 / sample_num
offset5 = 10.0 * (index5 + num5) / sample_num
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
wave3 = np.zeros(sample_num)
wave4 = np.zeros(sample_num)
wave5 = np.zeros(sample_num)
snr = 10. ** (float(random.uniform(-5, 10)) / 20.)
waveform3 = waveform3 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform3) ** 2)) * snr)
waveform4 = waveform4 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform4) ** 2)) * snr)
waveform5 = waveform5 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform5) ** 2)) * snr)
wave1[index1:index1 + num1] = waveform1
if num1 == 0:
continue
wave3[index3:index3 + num3] = waveform3
wave4[index4:index4 + num4] = waveform4
wave5[index5:index5 + num5] = waveform5
wave2[:waveform2.shape[0]] = waveform2
snr2 = 10. ** (float(random.uniform(5, 20)) / 20.)
background = background * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(background) ** 2)) * snr2)
if mix_num == 1:
wave = background + wave1 + wave3
elif mix_num == 2:
wave = background + wave1 + wave3 + wave4
else:
wave = background + wave1 + wave3 + wave4 + wave5
file_name = 'test_' + str(flag_num) + '.wav'
file_name_lab = 'test_' + str(flag_num) + '_lab.wav'
file_name_re = 'test_' + str(flag_num) + '_re.wav'
file_pth = train_dir + '/' + file_name
file_pth_lab = train_dir + '/' + file_name_lab
file_pth_re = train_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_16')
sf.write(file_pth_lab, wave1, sample_rate, subtype='PCM_16')
sf.write(file_pth_re, wave2, sample_rate, subtype='PCM_16')
print('Save to: {}'.format(file_pth))
if mix_num == 1:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\n')
elif mix_num == 2:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\n')
else:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\t' +
str(onset5) + '\t' + str(offset5) + '\t' + cls5 + '\n')
flag_num += 1
print('Finished Test Data Generation!')
def save_data(args):
train_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/data/train'
test_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/data/test'
noise_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/data/noise'
train_pth = '/apdcephfs/private_helinwang/tsss/ft_local/ESC-50-master/train'
test_pth = '/apdcephfs/private_helinwang/tsss/ft_local/ESC-50-master/test/audio'
noise_pth = '/apdcephfs/share_1316500/helinwang/data/ft_local/TAU-urban-acoustic-scenes-2019-development/audio'
sample_rate = args.sample_rate
train_dict = {}
test_dict = {}
noise_dict = {}
for root, dirs, files in os.walk(train_pth):
for name in files:
path_ = os.path.join(root, name)
path_2 = os.path.join(train_dir, name)
train_dict[path_] = path_2
for root, dirs, files in os.walk(test_pth):
for name in files:
path_ = os.path.join(root, name)
path_2 = os.path.join(test_dir, name)
test_dict[path_] = path_2
for root, dirs, files in os.walk(noise_pth):
for name in files:
path_ = os.path.join(root, name)
path_2 = os.path.join(noise_dir, name)
noise_dict[path_] = path_2
for i in train_dict.keys():
(wave, _) = librosa.core.load(i, sr=sample_rate, mono=True)
wave = data_pre(wave, audio_length=5, fs=sample_rate, audio_skip=0.2)
sf.write(train_dict[i], wave, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(train_dict[i]))
for i in test_dict.keys():
(wave, _) = librosa.core.load(i, sr=sample_rate, mono=True)
wave = data_pre(wave, audio_length=5, fs=sample_rate, audio_skip=0.2)
sf.write(test_dict[i], wave, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(test_dict[i]))
for i in noise_dict.keys():
(wave, _) = librosa.core.load(i, sr=sample_rate, mono=True)
if wave.shape[0] > round(sample_rate*10):
wave = wave[:round(sample_rate*10)]
elif wave.shape[0] < round(sample_rate*10):
wave = np.concatenate((wave, [0.] * (round(sample_rate*10) - wave.shape[0])), 0)
sf.write(noise_dict[i], wave, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(noise_dict[i]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_at = subparsers.add_parser('audio_tagging')
parser_at.add_argument('--sample_rate', type=int, default=16000)
parser_at.add_argument('--duration', type=int, default=4)
parser_at.add_argument('--window_size', type=int, default=1024)
parser_at.add_argument('--hop_size', type=int, default=320)
parser_at.add_argument('--mel_bins', type=int, default=64)
parser_at.add_argument('--fmin', type=int, default=50)
parser_at.add_argument('--fmax', type=int, default=14000)
# parser_at.add_argument('--model_type', type=str, required=True)
# parser_at.add_argument('--checkpoint_path', type=str, required=True)
# parser_at.add_argument('--audio_path', type=str, required=True)
# parser_at.add_argument('--cuda', action='store_true', default=False)
parser_sed = subparsers.add_parser('sound_event_detection')
parser_sed.add_argument('--sample_rate', type=int, default=16000)
parser_sed.add_argument('--duration', type=int, default=4)
parser_sed.add_argument('--window_size', type=int, default=1024)
parser_sed.add_argument('--hop_size', type=int, default=320)
parser_sed.add_argument('--mel_bins', type=int, default=64)
parser_sed.add_argument('--fmin', type=int, default=50)
parser_sed.add_argument('--fmax', type=int, default=14000)
parser_sed.add_argument('--model_type', type=str, required=True)
parser_sed.add_argument('--checkpoint_path', type=str, required=True)
parser_sed.add_argument('--audio_path', type=str, required=True)
parser_sed.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
# check_files()
# if args.mode == 'audio_tagging':
# audio_tagging(args)
#
# elif args.mode == 'sound_event_detection':
# sound_event_detection(args)
#
# else:
# raise Exception('Error argument!')
# generate_mixed_data(args)
# generate_mixed_offset_data(args)
generate_data_train(args)
generate_data_val(args)
generate_data_test(args)
#save_data(args)
| 46,868 | 46.874362 | 170 | py |
Tim-TSENet | Tim-TSENet-main/generate_dataset/create_scp.py | import os
train_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_mix.scp'
train_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_s1.scp'
train_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_re.scp'
test_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tt_mix.scp'
test_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tt_s1.scp'
test_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tt_re.scp'
val_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/val_mix.scp'
val_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/val_s1.scp'
val_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/val_re.scp'
train_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data/train'
test_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data/test'
vl_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data/val'
tr_mix = open(train_mix_scp,'w')
tr_s1 = open(train_s1_scp,'w')
tr_re = open(train_re_scp,'w')
for root, dirs, files in os.walk(train_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
tr_s1.write(file+" "+root+'/'+file)
tr_s1.write('\n')
elif 're.wav' in file:
tr_re.write(file + " " + root + '/' + file)
tr_re.write('\n')
else:
tr_mix.write(file + " " + root + '/' + file)
tr_mix.write('\n')
tr_mix.close()
tr_s1.close()
tr_re.close()
tt_mix = open(test_mix_scp,'w')
tt_s1 = open(test_s1_scp,'w')
tt_re = open(test_re_scp,'w')
for root, dirs, files in os.walk(test_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
tt_s1.write(file+" "+root+'/'+file)
tt_s1.write('\n')
elif 're.wav' in file:
tt_re.write(file + " " + root + '/' + file)
tt_re.write('\n')
else:
tt_mix.write(file + " " + root + '/' + file)
tt_mix.write('\n')
tt_mix.close()
tt_s1.close()
tt_re.close()
val_mix = open(val_mix_scp,'w')
val_s1 = open(val_s1_scp,'w')
val_re = open(val_re_scp,'w')
for root, dirs, files in os.walk(vl_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
val_s1.write(file+" "+root+'/'+file)
val_s1.write('\n')
elif 're.wav' in file:
val_re.write(file + " " + root + '/' + file)
val_re.write('\n')
else:
val_mix.write(file + " " + root + '/' + file)
val_mix.write('\n')
val_mix.close()
val_s1.close()
val_re.close()
| 2,673 | 34.653333 | 89 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/test_tasnet_one_hot.py | import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import TSDNet,TSDNet_one_hot,TSDNet_plus_one_hot
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model import model
from logger import set_logger
from config import option
import argparse
import torch
import time
import soundfile as sf
import metrics # import metrics.py file
import tsd_utils as utils # import utils.py
import pandas as pd
import numpy as np
from tabulate import tabulate
def time_to_frame(tm):
return int(tm/(10.0/312))
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_scp, ref_scp, inf_scp, yaml_path, model, gpuid, pred_file='./tsd_result.tsv'):
super(Separation, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss, self.onset, self.offset = handle_scp_inf(inf_scp)
self.key = list(self.mix_audio.keys())
opt = parse(yaml_path)
tsdnet = TSDNet_one_hot()
dicts = torch.load(model, map_location='cpu')
tsdnet.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.tsdnet=tsdnet.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.pred_file = pred_file
self.label_path = opt['label_path']
self.save_tsv_path = opt['save_tsv_path']
def test(self):
self.tsdnet.eval()
time_predictions = []
class_result_file = 'class_result_{}.txt'
event_file = 'event_{}.txt'
segment_file = 'segment_{}.txt'
with torch.no_grad():
for i in range(len(self.key)):
index = self.key[i]
ref_index = index.replace('.wav', '_re.wav')
cls = str(self.clss[index])
onset = self.onset[index]
offset = self.offset[index]
onset_frame = time_to_frame(onset)
offset_frame = time_to_frame(offset)
cls_vec = torch.zeros(41)
cls_vec[self.clss[index]] = 1.
cls_vec = cls_vec.unsqueeze(0)
cls_index = cls_vec.argmax(1)
# cls_index = torch.from_numpy(cls_index)
print('i ',i)
# print('cls_index ',cls_index)
# assert 1==2
cls_index = cls_index.to(self.device)
cls = 'class_' + cls
mix = read_wav(self.mix_audio[index])
ref = read_wav(self.ref_audio[ref_index])
mix = mix.to(self.device)
ref = ref.to(self.device)
mix = mix[None,:]
ref = ref[None,:]
# cls_index = cls_index[None,:]
# print('cls_index ',cls_index.shape)
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.tsdnet(mix, ref,cls_index)
# print('onset_frame, offset_frame',onset_frame, offset_frame)
# assert 1==2
# x_cls: <bs,50>
# out_tsd_time: <bs,t/2>
# out_tsd_up: <bs,t>
pred = est_tsd_time_up.detach().cpu().numpy() # transpose to numpy
# pred = pred[:,:,0]
# print(pred.shape)
thres = 0.5
window_size = 1
filtered_pred = utils.median_filter(pred, window_size=window_size, threshold=thres)
decoded_pred = [] #
decoded_pred_ = utils.decode_with_timestamps(str(cls),filtered_pred[0,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((str(cls),0,0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
#print('len(decoded_pred) ',len(decoded_pred))
filename = index.split('/')[-1]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
for event_label, onset, offset in label_prediction:
time_predictions.append({
'filename': filename,
'onset': onset,
'offset': offset,
'event_label': str(event_label)}) # get real predict results,including event_label,onset,offset
assert len(time_predictions) > 0, "No outputs, lower threshold?"
pred_df = pd.DataFrame(time_predictions, columns=['filename', 'onset', 'offset','event_label']) # it store the happen event and its time information
time_ratio = 10.0/pred.shape[1] # calculate time
pred_df = utils.predictions_to_time(pred_df, ratio=time_ratio) # transform the number of frame to real time
label_path = self.label_path
test_data_filename = os.path.splitext(os.path.basename(label_path))[0]
print('test_data_filename ',test_data_filename)
pred_file = 'hard_predictions_{}.txt'
if pred_file: # it name is hard_predictions...
pred_df.to_csv(os.path.join(self.save_tsv_path, pred_file.format(test_data_filename)),
index=False, sep="\t")
strong_labels_df = pd.read_csv(self.label_path, sep='\t') # get
if not np.issubdtype(strong_labels_df['filename'].dtype, np.number):
strong_labels_df['filename'] = strong_labels_df['filename'].apply(os.path.basename)
sed_eval = True
if sed_eval:
event_result, segment_result = metrics.compute_metrics(
strong_labels_df, pred_df, time_resolution=0.2) # calculate f1
print("Event Based Results:\n{}".format(event_result))
event_results_dict = event_result.results_class_wise_metrics()
class_wise_results_df = pd.DataFrame().from_dict({
f: event_results_dict[f]['f_measure']
for f in event_results_dict.keys()}).T
class_wise_results_df.to_csv(os.path.join(
self.save_tsv_path, class_result_file.format(test_data_filename)), sep='\t')
print("Class wise F1-Macro:\n{}".format(
tabulate(class_wise_results_df, headers='keys', tablefmt='github')))
if event_file:
with open(os.path.join(self.save_tsv_path,
event_file.format(test_data_filename)), 'w') as wp:
wp.write(event_result.__str__())
print("=" * 100)
print(segment_result)
if segment_file:
with open(os.path.join(self.save_tsv_path, segment_file.format(test_data_filename)), 'w') as wp:
wp.write(segment_result.__str__())
event_based_results = pd.DataFrame(
event_result.results_class_wise_average_metrics()['f_measure'], index=['event_based'])
segment_based_results = pd.DataFrame(
segment_result.results_class_wise_average_metrics()
['f_measure'], index=['segment_based'])
result_quick_report = pd.concat((event_based_results, segment_based_results))
# Add two columns
with open(os.path.join(self.save_tsv_path, 'quick_report_{}.md'.format(test_data_filename)), 'w') as wp:
print(tabulate(result_quick_report, headers='keys', tablefmt='github'), file=wp)
print("Quick Report: \n{}".format(tabulate(result_quick_report, headers='keys', tablefmt='github')))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-yaml', type=str, default='./config/Conv_Tasnet/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSD_exp/checkpoint_fsd2018_new/TSDNet_one_hot_scale156/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=10000, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/Conv_Tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation('/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_mix.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_re.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_inf.scp',
args.yaml, args.model, gpuid)
separation.test()
if __name__ == "__main__":
main()
| 9,953 | 48.034483 | 171 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/test_tasnet_wav.py | import os
import torch
from data_loader.AudioReader import AudioReader, write_wav, read_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import Conv_TasNet
from logger.set_logger import setup_logger
import logging
from config.option import parse
import tqdm
class Separation():
def __init__(self, mix_path, yaml_path, model, gpuid):
super(Separation, self).__init__()
self.mix = read_wav(mix_path)
opt = parse(yaml_path, is_tain=False)
net = ConvTasNet(**opt['Conv_Tasnet'])
dicts = torch.load(model, map_location='cpu')
net.load_state_dict(dicts["model_state_dict"])
self.logger = get_logger(__name__)
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.net=net.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.gpuid=tuple(gpuid)
def inference(self, file_path):
with torch.no_grad():
egs=self.mix.to(self.device)
norm = torch.norm(egs,float('inf'))
if len(self.gpuid) != 0:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach().cpu()) for s in ests]
else:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach()) for s in ests]
index=0
for s in spks:
s = s[:egs.shape[0]]
#norm
s = s*norm/torch.max(torch.abs(s))
s = s.unsqueeze(0)
index += 1
os.makedirs(file_path+'/spk'+str(index), exist_ok=True)
filename=file_path+'/spk'+str(index)+'/'+key
write_wav(filename, s, 8000)
self.logger.info("Compute over {:d} utterances".format(len(self.mix)))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-mix_scp', type=str, default='../create_scp/tt_mix.scp', help='Path to mix scp file.')
parser.add_argument(
'-yaml', type=str, default='./config/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='./checkpoint/Conv_Tasnet_skip/best.pt', help="Path to model file.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/conv_tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation(args.mix_scp, args.yaml, args.model, gpuid)
separation.inference(args.save_path)
if __name__ == "__main__":
main()
| 2,864 | 37.2 | 104 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/test_tasnet.py | import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import TSDNet,TSDNet_one_hot
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model import model
from logger import set_logger
from config import option
import argparse
import torch
import time
import soundfile as sf
import metrics # import metrics.py file
import tsd_utils as utils # import utils.py
import pandas as pd
import numpy as np
from tabulate import tabulate
import datetime
import uuid
from pathlib import Path
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_scp, ref_scp, inf_scp, yaml_path, model, gpuid, pred_file='./tsd_result.tsv'):
super(Separation, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss,_,_ = handle_scp_inf(inf_scp)
self.key = list(self.mix_audio.keys())
opt = parse(yaml_path)
tsdnet = TSDNet()
dicts = torch.load(model, map_location='cpu')
tsdnet.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.tsdnet=tsdnet.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.pred_file = pred_file
self.label_path = opt['label_path']
self.save_tsv_path = os.path.join(opt['save_tsv_path'], opt['name'],
"{}_{}".format(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%m'), uuid.uuid1().hex))
Path(self.save_tsv_path).mkdir(exist_ok=True, parents=True) # make dir
def test(self):
self.tsdnet.eval()
time_predictions = []
class_result_file = 'class_result_{}.txt'
event_file = 'event_{}.txt'
segment_file = 'segment_{}.txt'
with torch.no_grad():
for i in range(len(self.key)):
index = self.key[i]
ref_index = index.replace('.wav', '_re.wav')
cls = str(self.clss[index])
cls = 'class_' + cls
mix = read_wav(self.mix_audio[index])
ref = read_wav(self.ref_audio[ref_index])
mix = mix.to(self.device)
ref = ref.to(self.device)
mix = mix[None,:]
ref = ref[None,:]
x_cls, out_tsd_time, out_tsd_up = self.tsdnet(mix, ref)
# x_cls: <bs,50>
# out_tsd_time: <bs,t/2>
# out_tsd_up: <bs,t>
pred = out_tsd_up.detach().cpu().numpy() # transpose to numpy
# pred = pred[:,:,0]
# print(pred.shape)
thres = 0.5
window_size = 1
filtered_pred = utils.median_filter(pred, window_size=window_size, threshold=thres)
decoded_pred = [] #
decoded_pred_ = utils.decode_with_timestamps(str(cls),filtered_pred[0,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((str(cls),0,0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
#print('len(decoded_pred) ',len(decoded_pred))
filename = index.split('/')[-1]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
for event_label, onset, offset in label_prediction:
time_predictions.append({
'filename': filename,
'onset': onset,
'offset': offset,
'event_label': str(event_label)}) # get real predict results,including event_label,onset,offset
assert len(time_predictions) > 0, "No outputs, lower threshold?"
pred_df = pd.DataFrame(time_predictions, columns=['filename', 'onset', 'offset','event_label']) # it store the happen event and its time information
time_ratio = 10.0/pred.shape[1] # calculate time
pred_df = utils.predictions_to_time(pred_df, ratio=time_ratio) # transform the number of frame to real time
label_path = self.label_path
test_data_filename = os.path.splitext(os.path.basename(label_path))[0]
print('test_data_filename ',test_data_filename)
pred_file = 'hard_predictions_{}.txt'
if pred_file: # it name is hard_predictions...
pred_df.to_csv(os.path.join(self.save_tsv_path, pred_file.format(test_data_filename)),
index=False, sep="\t")
strong_labels_df = pd.read_csv(self.label_path, sep='\t') # get
if not np.issubdtype(strong_labels_df['filename'].dtype, np.number):
strong_labels_df['filename'] = strong_labels_df['filename'].apply(os.path.basename)
sed_eval = True
if sed_eval:
event_result, segment_result = metrics.compute_metrics(
strong_labels_df, pred_df, time_resolution=0.2) # calculate f1
print("Event Based Results:\n{}".format(event_result))
event_results_dict = event_result.results_class_wise_metrics()
class_wise_results_df = pd.DataFrame().from_dict({
f: event_results_dict[f]['f_measure']
for f in event_results_dict.keys()}).T
class_wise_results_df.to_csv(os.path.join(self.save_tsv_path, class_result_file.format(test_data_filename)), sep='\t')
print("Class wise F1-Macro:\n{}".format(
tabulate(class_wise_results_df, headers='keys', tablefmt='github')))
if event_file:
with open(os.path.join(self.save_tsv_path, event_file.format(test_data_filename)), 'w') as wp:
wp.write(event_result.__str__())
print("=" * 100)
print(segment_result)
if segment_file:
with open(os.path.join(self.save_tsv_path,
segment_file.format(test_data_filename)), 'w') as wp:
wp.write(segment_result.__str__())
event_based_results = pd.DataFrame(
event_result.results_class_wise_average_metrics()['f_measure'], index=['event_based'])
segment_based_results = pd.DataFrame(
segment_result.results_class_wise_average_metrics()
['f_measure'], index=['segment_based'])
result_quick_report = pd.concat((event_based_results, segment_based_results))
# Add two columns
with open(os.path.join(self.save_tsv_path, 'quick_report_{}.md'.format(test_data_filename)), 'w') as wp:
print(tabulate(result_quick_report, headers='keys', tablefmt='github'), file=wp)
print("Quick Report: \n{}".format(tabulate(result_quick_report, headers='keys', tablefmt='github')))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-yaml', type=str, default='./config/Conv_Tasnet/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSD_exp/checkpoint_fsd2018_audio/TSDNet_audio_2gru/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=10000, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/Conv_Tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation('/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_mix_new.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_re_new.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_inf_new.scp',
args.yaml, args.model, gpuid)
separation.test()
if __name__ == "__main__":
main()
| 9,282 | 48.116402 | 167 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/train_Tasnet.py | import sys
sys.path.append('./')
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model.model import TSDNet,TSDNet_one_hot, TSDNet_plus_one_hot
from logger import set_logger
import logging
from config import option
import argparse
import torch
from trainer import trainer_Tasnet,trainer_Tasnet_one_hot,trainer_Tasnet_one_hot_regresion
import torch.optim.lr_scheduler as lr_scheduler
import random
import torch.backends.cudnn as cudnn
DEVICE = 'cpu'
if torch.cuda.is_available():
DEVICE = 'cuda'
torch.backends.cudnn.deterministic = True
DEVICE = torch.device(DEVICE)
seed = 19980228
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
cudnn.deterministic = True
def make_dataloader(opt):
# make train's dataloader
train_dataset = Datasets(
opt['datasets']['train']['dataroot_mix'],
opt['datasets']['train']['dataroot_targets'][0],
opt['datasets']['train']['dataroot_targets'][1],
opt['datasets']['train']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
train_dataloader = Loader(train_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make validation dataloader
val_dataset = Datasets(
opt['datasets']['val']['dataroot_mix'],
opt['datasets']['val']['dataroot_targets'][0],
opt['datasets']['val']['dataroot_targets'][1],
opt['datasets']['val']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
val_dataloader = Loader(val_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make test dataloader
test_dataset = Datasets(
opt['datasets']['test']['dataroot_mix'],
opt['datasets']['test']['dataroot_targets'][0],
opt['datasets']['test']['dataroot_targets'][1],
opt['datasets']['test']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
test_dataloader = Loader(test_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
return train_dataloader, val_dataloader, test_dataloader
def make_optimizer(params, opt):
optimizer = getattr(torch.optim, opt['optim']['name'])
if opt['optim']['name'] == 'Adam':
optimizer = optimizer(
params, lr=opt['optim']['lr'], weight_decay=opt['optim']['weight_decay'])
else:
optimizer = optimizer(params, lr=opt['optim']['lr'], weight_decay=opt['optim']
['weight_decay'], momentum=opt['optim']['momentum'])
return optimizer
def train():
parser = argparse.ArgumentParser(
description='Parameters for training Conv-TasNet')
parser.add_argument('--opt', type=str, help='Path to option YAML file.')
args = parser.parse_args()
opt = option.parse(args.opt)
set_logger.setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
logger = logging.getLogger(opt['logger']['name'])
# build model
logger.info("Building the model of Conv-Tasnet")
logger.info(opt['logger']['experimental_description'])
print(opt['model_name'])
if opt['model_name'] == 'TSDNet_one_hot':
net = TSDNet_one_hot(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
elif opt['model_name'] == 'TSDNet_plus_one_hot':
net = TSDNet_plus_one_hot(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
elif opt['model_name'] =='TSDNet':
net = TSDNet(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
else:
assert 1==2
# build optimizer
logger.info("Building the optimizer of Conv-Tasnet")
optimizer = make_optimizer(net.parameters(), opt)
# build dataloader
logger.info('Building the dataloader of Conv-Tasnet')
train_dataloader, val_dataloader, test_dataloader = make_dataloader(opt)
logger.info('Train Datasets Length: {}, Val Datasets Length: {}, Test Datasets Length: {}'.format(
len(train_dataloader), len(val_dataloader), len(test_dataloader)))
# build scheduler
# scheduler = ReduceLROnPlateau(
# optimizer, mode='min',
# factor=opt['scheduler']['factor'],
# patience=opt['scheduler']['patience'],
# verbose=True, min_lr=opt['scheduler']['min_lr'])
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, opt['train']['epoch'])
# build trainer
logger.info('Building the Trainer of Conv-Tasnet')
if opt['one_hot']:
if opt['reg']:
trainer = trainer_Tasnet_one_hot_regresion.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
else:
trainer = trainer_Tasnet_one_hot.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
else:
trainer = trainer_Tasnet.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
trainer.run()
if __name__ == "__main__":
train()
| 7,358 | 46.477419 | 145 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/tsd_utils.py | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
import augment
#import dataset
from scipy.interpolate import interp1d
def parse_config_or_kwargs(config_file, **kwargs):
"""parse_config_or_kwargs
:param config_file: Config file that has parameters, yaml format
:param **kwargs: Other alternative parameters or overwrites for config
"""
with open(config_file) as con_read:
yaml_config = yaml.load(con_read, Loader=yaml.FullLoader)
arguments = dict(yaml_config, **kwargs)
return arguments
def find_contiguous_regions(activity_array): # in this part, if you cannot understand the binary operation, I think you can write a O(n) complexity method
"""Find contiguous regions from bool valued numpy.array.
Copy of https://dcase-repo.github.io/dcase_util/_modules/dcase_util/data/decisions.html#DecisionEncoder
Reason is:
1. This does not belong to a class necessarily
2. Import DecisionEncoder requires sndfile over some other imports..which causes some problems on clusters
"""
change_indices = np.logical_xor(activity_array[1:], activity_array[:-1]).nonzero()[0]
change_indices += 1
if activity_array[0]:
# If the first element of activity_array is True add 0 at the beginning
change_indices = np.r_[0, change_indices]
if activity_array[-1]:
# If the last element of activity_array is True, add the length of the array
change_indices = np.r_[change_indices, activity_array.size]
# print(change_indices.reshape((-1, 2)))
# Reshape the result into two columns
return change_indices.reshape((-1, 2))
def split_train_cv(
data_frame: pd.DataFrame,
frac: float = 0.9,
y=None, # Only for stratified, computes necessary split
**kwargs):
"""split_train_cv
:param data_frame:
:type data_frame: pd.DataFrame
:param frac:
:type frac: float
"""
if kwargs.get('mode',
None) == 'urbansed': # Filenames are DATA_-1 DATA_-2 etc
data_frame.loc[:, 'id'] = data_frame.groupby(
data_frame['filename'].str.split('_').apply(
lambda x: '_'.join(x[:-1]))).ngroup()
sampler = np.random.permutation(data_frame['id'].nunique())
num_train = int(frac * len(sampler))
train_indexes = sampler[:num_train]
cv_indexes = sampler[num_train:]
train_data = data_frame[data_frame['id'].isin(train_indexes)]
cv_data = data_frame[data_frame['id'].isin(cv_indexes)]
del train_data['id']
del cv_data['id']
elif kwargs.get('mode', None) == 'stratified': # stratified --> 分层的 ?
# Use statified sampling
from skmultilearn.model_selection import iterative_train_test_split
index_train, _, index_cv, _ = iterative_train_test_split(
data_frame.index.values.reshape(-1, 1), y, test_size=1. - frac)
train_data = data_frame[data_frame.index.isin(index_train.squeeze())]
cv_data = data_frame[data_frame.index.isin(index_cv.squeeze())] # cv --> cross validation
else:
# Simply split train_test
train_data = data_frame.sample(frac=frac, random_state=10)
cv_data = data_frame[~data_frame.index.isin(train_data.index)]
return train_data, cv_data
def parse_transforms(transform_list):
"""parse_transforms
parses the config files transformation strings to coresponding methods
:param transform_list: String list
"""
transforms = []
for trans in transform_list:
if trans == 'shift':
transforms.append(augment.TimeShift(0, 50))
elif trans == 'freqmask':
transforms.append(augment.FreqMask(2, 8))
elif trans == 'timemask':
transforms.append(augment.TimeMask(2, 60))
return torch.nn.Sequential(*transforms)
def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml'): # print yaml file
"""pprint_dict
:param outputfun: function to use, defaults to sys.stdout
:param in_dict: dict to print
"""
if formatter == 'yaml':
format_fun = yaml.dump
elif formatter == 'pretty':
format_fun = pformat
for line in format_fun(in_dict).split('\n'):
outputfun(line)
def getfile_outlogger(outputfile):
log_format = "[<green>{time:YYYY-MM-DD HH:mm:ss}</green>] {message}"
logger.configure(handlers=[{"sink": sys.stderr, "format": log_format}])
if outputfile:
logger.add(outputfile, enqueue=True, format=log_format)
return logger
# according label, get encoder
def train_labelencoder(labels: pd.Series, sparse=True):
"""encode_labels
Encodes labels
:param labels: pd.Series representing the raw labels e.g., Speech, Water
:param encoder (optional): Encoder already fitted
returns encoded labels (many hot) and the encoder
"""
assert isinstance(labels, pd.Series), "Labels need to be series"
if isinstance(labels[0], six.string_types):
# In case of using non processed strings, e.g., Vaccum, Speech
label_array = labels.str.split(',').values.tolist() # split label according to ','
elif isinstance(labels[0], np.ndarray):
# Encoder does not like to see numpy array
label_array = [lab.tolist() for lab in labels]
elif isinstance(labels[0], collections.Iterable):
label_array = labels
encoder = pre.MultiLabelBinarizer(sparse_output=sparse)
encoder.fit(label_array)
return encoder
def encode_labels(labels: pd.Series, encoder=None, sparse=True):
"""encode_labels
Encodes labels
:param labels: pd.Series representing the raw labels e.g., Speech, Water
:param encoder (optional): Encoder already fitted
returns encoded labels (many hot) and the encoder
"""
assert isinstance(labels, pd.Series), "Labels need to be series"
instance = labels.iloc[0]
if isinstance(instance, six.string_types):
# In case of using non processed strings, e.g., Vaccum, Speech
label_array = labels.str.split(',').values.tolist()
elif isinstance(instance, np.ndarray):
# Encoder does not like to see numpy array
label_array = [lab.tolist() for lab in labels]
elif isinstance(instance, collections.Iterable):
label_array = labels
# get label_array, it is a list ,contain a lot of label, this label are string type
if not encoder:
encoder = pre.MultiLabelBinarizer(sparse_output=sparse) # if we encoder is None, we should init a encoder firstly.
encoder.fit(label_array)
labels_encoded = encoder.transform(label_array) # transform string to digit
return labels_encoded, encoder
# return pd.arrays.SparseArray(
# [row.toarray().ravel() for row in labels_encoded]), encoder
def decode_with_timestamps(events,labels: np.array):
"""decode_with_timestamps
Decodes the predicted label array (2d) into a list of
[(Labelname, onset, offset), ...]
:param encoder: Encoder during training
:type encoder: pre.MultiLabelBinarizer
:param labels: n-dim array
:type labels: np.array
"""
# print('labels ',labels.shape)
# assert 1==2
if labels.ndim == 3:
return [_decode_with_timestamps(events,lab) for lab in labels]
else:
return _decode_with_timestamps(events,labels)
def median_filter(x, window_size, threshold=0.5):
"""median_filter
:param x: input prediction array of shape (B, T, C) or (B, T).
Input is a sequence of probabilities 0 <= x <= 1
:param window_size: An integer to use
:param threshold: Binary thresholding threshold
"""
x = binarize(x, threshold=threshold) # transfer to 0 or 1
if x.ndim == 3:
size = (1, window_size, 1)
elif x.ndim == 2 and x.shape[0] == 1:
# Assume input is class-specific median filtering
# E.g, Batch x Time [1, 501]
size = (1, window_size)
elif x.ndim == 2 and x.shape[0] > 1:
# Assume input is standard median pooling, class-independent
# E.g., Time x Class [501, 10]
size = (window_size, 1)
return scipy.ndimage.median_filter(x, size=size)
def _decode_with_timestamps(events,labels):
result_labels = []
#print('labels ',labels.shape)
# print(labels)
change_indices = find_contiguous_regions(labels)
# print(change_indices)
# assert 1==2
for row in change_indices:
result_labels.append((events,row[0], row[1]))
return result_labels
def inverse_transform_labels(encoder, pred):
if pred.ndim == 3:
return [encoder.inverse_transform(x) for x in pred]
else:
return encoder.inverse_transform(pred)
def binarize(pred, threshold=0.5):
# Batch_wise
if pred.ndim == 3:
return np.array(
[pre.binarize(sub, threshold=threshold) for sub in pred])
else:
return pre.binarize(pred, threshold=threshold)
def double_threshold(x, high_thres, low_thres, n_connect=1):
"""double_threshold
Helper function to calculate double threshold for n-dim arrays
:param x: input array
:param high_thres: high threshold value
:param low_thres: Low threshold value
:param n_connect: Distance of <= n clusters will be merged
"""
assert x.ndim <= 3, "Whoops something went wrong with the input ({}), check if its <= 3 dims".format(
x.shape)
if x.ndim == 3:
apply_dim = 1
elif x.ndim < 3:
apply_dim = 0
# x is assumed to be 3d: (batch, time, dim)
# Assumed to be 2d : (time, dim)
# Assumed to be 1d : (time)
# time axis is therefore at 1 for 3d and 0 for 2d (
return np.apply_along_axis(lambda x: _double_threshold(
x, high_thres, low_thres, n_connect=n_connect),
axis=apply_dim,
arr=x)
def _double_threshold(x, high_thres, low_thres, n_connect=1, return_arr=True): # in nature, double_threshold considers boundary question
"""_double_threshold
Computes a double threshold over the input array
:param x: input array, needs to be 1d
:param high_thres: High threshold over the array
:param low_thres: Low threshold over the array
:param n_connect: Postprocessing, maximal distance between clusters to connect
:param return_arr: By default this function returns the filtered indiced, but if return_arr = True it returns an array of tsame size as x filled with ones and zeros.
"""
assert x.ndim == 1, "Input needs to be 1d"
high_locations = np.where(x > high_thres)[0] # return the index, where value is greater than high_thres
locations = x > low_thres # return true of false
encoded_pairs = find_contiguous_regions(locations)
# print('encoded_pairs ',encoded_pairs)
filtered_list = list(
filter(
lambda pair:
((pair[0] <= high_locations) & (high_locations <= pair[1])).any(),
encoded_pairs)) # find encoded_pair where inclide a high_lacations
#print('filtered_list ',filtered_list)
filtered_list = connect_(filtered_list, n_connect) # if the distance of two pair is less than n_connect, we can merge them
if return_arr:
zero_one_arr = np.zeros_like(x, dtype=int)
for sl in filtered_list:
zero_one_arr[sl[0]:sl[1]] = 1
return zero_one_arr
return filtered_list
def connect_clusters(x, n=1):
if x.ndim == 1:
return connect_clusters_(x, n)
if x.ndim >= 2:
return np.apply_along_axis(lambda a: connect_clusters_(a, n=n), -2, x)
def connect_clusters_(x, n=1):
"""connect_clusters_
Connects clustered predictions (0,1) in x with range n
:param x: Input array. zero-one format
:param n: Number of frames to skip until connection can be made
"""
assert x.ndim == 1, "input needs to be 1d"
reg = find_contiguous_regions(x)
start_end = connect_(reg, n=n)
zero_one_arr = np.zeros_like(x, dtype=int)
for sl in start_end:
zero_one_arr[sl[0]:sl[1]] = 1
return zero_one_arr
def connect_(pairs, n=1):
"""connect_
Connects two adjacent clusters if their distance is <= n
:param pairs: Clusters of iterateables e.g., [(1,5),(7,10)]
:param n: distance between two clusters
"""
if len(pairs) == 0:
return []
start_, end_ = pairs[0]
new_pairs = []
for i, (next_item, cur_item) in enumerate(zip(pairs[1:], pairs[0:])):
end_ = next_item[1]
if next_item[0] - cur_item[1] <= n:
pass
else:
new_pairs.append((start_, cur_item[1]))
start_ = next_item[0]
new_pairs.append((start_, end_))
return new_pairs
def predictions_to_time(df, ratio):
df.onset = df.onset * ratio
df.offset = df.offset * ratio
return df
def upgrade_resolution(arr, scale):
print('arr ',arr.shape)
x = np.arange(0, arr.shape[0])
f = interp1d(x, arr, kind='linear', axis=0, fill_value='extrapolate')
scale_x = np.arange(0, arr.shape[0], 1 / scale)
up_scale = f(scale_x)
return up_scale
# a = [0.1,0.2,0.3,0.8,0.4,0.1,0.3,0.9,0.4]
# a = np.array(a)
# b = a>0.2
# _double_threshold(a,0.7,0.2)
| 13,313 | 35.377049 | 169 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/train_rnn.py | import sys
sys.path.append('./')
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset import Datasets
from model import model_rnn
from logger import set_logger
import logging
from config import option
import argparse
import torch
from trainer import trainer_Dual_RNN
def make_dataloader(opt):
# make train's dataloader
train_dataset = Datasets(
opt['datasets']['train']['dataroot_mix'],
[opt['datasets']['train']['dataroot_targets'][0],
opt['datasets']['train']['dataroot_targets'][1]],
**opt['datasets']['audio_setting'])
train_dataloader = Loader(train_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make validation dataloader
val_dataset = Datasets(
opt['datasets']['val']['dataroot_mix'],
[opt['datasets']['val']['dataroot_targets'][0],
opt['datasets']['val']['dataroot_targets'][1]],
**opt['datasets']['audio_setting'])
val_dataloader = Loader(val_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=False)
return train_dataloader, val_dataloader
def make_optimizer(params, opt):
optimizer = getattr(torch.optim, opt['optim']['name'])
if opt['optim']['name'] == 'Adam':
optimizer = optimizer(
params, lr=opt['optim']['lr'], weight_decay=opt['optim']['weight_decay'])
else:
optimizer = optimizer(params, lr=opt['optim']['lr'], weight_decay=opt['optim']
['weight_decay'], momentum=opt['optim']['momentum'])
return optimizer
def train():
parser = argparse.ArgumentParser(
description='Parameters for training Dual-Path-RNN')
parser.add_argument('--opt', type=str, help='Path to option YAML file.')
args = parser.parse_args()
opt = option.parse(args.opt)
set_logger.setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
logger = logging.getLogger(opt['logger']['name'])
# build model
logger.info("Building the model of Dual-Path-RNN")
Dual_Path_RNN = model_rnn.Dual_RNN_model(**opt['Dual_Path_RNN'])
# build optimizer
logger.info("Building the optimizer of Dual-Path-RNN")
optimizer = make_optimizer(Dual_Path_RNN.parameters(), opt)
# build dataloader
logger.info('Building the dataloader of Dual-Path-RNN')
train_dataloader, val_dataloader = make_dataloader(opt)
logger.info('Train Datasets Length: {}, Val Datasets Length: {}'.format(
len(train_dataloader), len(val_dataloader)))
# build scheduler
scheduler = ReduceLROnPlateau(
optimizer, mode='min',
factor=opt['scheduler']['factor'],
patience=opt['scheduler']['patience'],
verbose=True, min_lr=opt['scheduler']['min_lr'])
# build trainer
logger.info('Building the Trainer of Dual-Path-RNN')
trainer = trainer_Dual_RNN.Trainer(train_dataloader, val_dataloader, Dual_Path_RNN, optimizer, scheduler, opt)
trainer.run()
if __name__ == "__main__":
train()
| 3,546 | 37.554348 | 114 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/train_Tasnet_tse.py | import sys
sys.path.append('./')
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets_tse # using add tse results
from model.model import TSDNet_tse,TSDNet_one_hot, TSDNet_plus_one_hot
from logger import set_logger
import logging
from config import option
import argparse
import torch
from trainer import trainer_Tasnet,trainer_Tasnet_one_hot,trainer_Tasnet_one_hot_regresion, trainer_Tasnet_tse
import torch.optim.lr_scheduler as lr_scheduler
import random
import torch.backends.cudnn as cudnn
DEVICE = 'cpu'
if torch.cuda.is_available():
DEVICE = 'cuda'
torch.backends.cudnn.deterministic = True
DEVICE = torch.device(DEVICE)
seed = 1508758
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
cudnn.deterministic = True
def make_dataloader(opt):
# make train's dataloader
train_dataset = Datasets_tse(
opt['datasets']['train']['dataroot_mix'],
opt['datasets']['train']['dataroot_targets'][0],
opt['datasets']['train']['dataroot_targets'][1],
opt['datasets']['train']['dataroot_targets'][2],
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tr_tse.scp',
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
train_dataloader = Loader(train_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make validation dataloader
val_dataset = Datasets_tse(
opt['datasets']['val']['dataroot_mix'],
opt['datasets']['val']['dataroot_targets'][0],
opt['datasets']['val']['dataroot_targets'][1],
opt['datasets']['val']['dataroot_targets'][2],
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_tse.scp',
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
val_dataloader = Loader(val_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make test dataloader
test_dataset = Datasets_tse(
opt['datasets']['test']['dataroot_mix'],
opt['datasets']['test']['dataroot_targets'][0],
opt['datasets']['test']['dataroot_targets'][1],
opt['datasets']['test']['dataroot_targets'][2],
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tt_tse.scp',
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
test_dataloader = Loader(test_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
return train_dataloader, val_dataloader, test_dataloader
def make_optimizer(params, opt):
optimizer = getattr(torch.optim, opt['optim']['name'])
if opt['optim']['name'] == 'Adam':
optimizer = optimizer(
params, lr=opt['optim']['lr'], weight_decay=opt['optim']['weight_decay'])
else:
optimizer = optimizer(params, lr=opt['optim']['lr'], weight_decay=opt['optim']
['weight_decay'], momentum=opt['optim']['momentum'])
return optimizer
def train():
parser = argparse.ArgumentParser(
description='Parameters for training Conv-TasNet')
parser.add_argument('--opt', type=str, help='Path to option YAML file.')
args = parser.parse_args()
opt = option.parse(args.opt)
set_logger.setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
logger = logging.getLogger(opt['logger']['name'])
# build model
logger.info("Building the model of Conv-Tasnet")
logger.info(opt['logger']['experimental_description'])
print(opt['model_name'])
if opt['model_name'] == 'TSDNet_one_hot':
net = TSDNet_one_hot(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
elif opt['model_name'] =='TSDNet':
net = TSDNet(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
elif opt['model_name'] =='TSDNet_tse':
net = TSDNet_tse(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth',
use_frame = opt['use_frame'],
only_ref=opt['only_ref']
)
else:
assert 1==2
# build optimizer
logger.info("Building the optimizer of Conv-Tasnet")
optimizer = make_optimizer(net.parameters(), opt)
# build dataloader
logger.info('Building the dataloader of Conv-Tasnet')
train_dataloader, val_dataloader, test_dataloader = make_dataloader(opt)
logger.info('Train Datasets Length: {}, Val Datasets Length: {}, Test Datasets Length: {}'.format(
len(train_dataloader), len(val_dataloader), len(test_dataloader)))
# build scheduler
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, opt['train']['epoch'])
# build trainer
logger.info('Building the Trainer of Conv-Tasnet')
if opt['one_hot']:
assert 1==2
else:
trainer = trainer_Tasnet_tse.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
trainer.run()
if __name__ == "__main__":
train()
| 7,255 | 47.373333 | 131 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/dualrnn_test_wav.py | import os
import torch
from data_loader.AudioReader import AudioReader, write_wav, read_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model_rnn import Dual_RNN_model
from logger.set_logger import setup_logger
import logging
from config.option import parse
import tqdm
class Separation():
def __init__(self, mix_path, yaml_path, model, gpuid):
super(Separation, self).__init__()
self.mix = read_wav(mix_path)
opt = parse(yaml_path)
net = Dual_RNN_model(**opt['Dual_Path_RNN'])
dicts = torch.load(model, map_location='cpu')
net.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.net=net
self.gpuid = gpuid
def inference(self, file_path):
self.net.eval()
with torch.no_grad():
egs=self.mix
norm = torch.norm(egs,float('inf'))
if len(self.gpuid) != 0:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach().cpu()) for s in ests]
else:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
print(ests[0].shape)
spks=[torch.squeeze(s.detach()) for s in ests]
index=0
for s in spks:
#norm
s = s - torch.mean(s)
s = s*norm/torch.max(torch.abs(s))
index += 1
os.makedirs(file_path+'/spk'+str(index), exist_ok=True)
filename=file_path+'/spk'+str(index)+'/'+'test.wav'
write_wav(filename, s, 16000)
self.logger.info("Compute over {:d} utterances".format(len(self.mix)))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-mix_scp', type=str, default='1_mix.wav', help='Path to mix scp file.')
parser.add_argument(
'-yaml', type=str, default='./config/train_rnn_opt.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='./checkpoint/Dual_Path_RNN_opt/best.pt', help="Path to model file.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./test', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation(args.mix_scp, args.yaml, args.model, [])
separation.inference(args.save_path)
if __name__ == "__main__":
main() | 2,918 | 38.986301 | 105 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/augment.py | import torch
import logging
import torch.nn as nn
import numpy as np
class TimeShift(nn.Module):
def __init__(self, mean, std):
super().__init__()
self.mean = mean
self.std = std
def forward(self, x):
if self.training:
shift = torch.empty(1).normal_(self.mean, self.std).int().item()
x = torch.roll(x, shift, dims=0)
return x
class TimeMask(nn.Module):
def __init__(self, n=1, p=50):
super().__init__()
self.p = p
self.n = n
def forward(self, x):
time, freq = x.shape
if self.training:
for i in range(self.n):
t = torch.empty(1, dtype=int).random_(self.p).item()
to_sample = max(time - t, 1)
t0 = torch.empty(1, dtype=int).random_(to_sample).item()
x[t0:t0 + t, :] = 0
return x
class FreqMask(nn.Module):
def __init__(self, n=1, p=12):
super().__init__()
self.p = p
self.n = n
def forward(self, x):
time, freq = x.shape
if self.training:
for i in range(self.n):
f = torch.empty(1, dtype=int).random_(self.p).item()
f0 = torch.empty(1, dtype=int).random_(freq - f).item()
x[:, f0:f0 + f] = 0.
return x
| 1,333 | 23.703704 | 76 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/test_tasnet_tse.py | import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import TSDNet,TSDNet_one_hot, TSDNet_tse
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model import model
from logger import set_logger
from config import option
import argparse
import torch
import time
import soundfile as sf
import metrics # import metrics.py file
import tsd_utils as utils # import utils.py
import pandas as pd
import numpy as np
from tabulate import tabulate
import datetime
import uuid
from pathlib import Path
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_scp, ref_scp, inf_scp, tse_scp, yaml_path, model, gpuid, pred_file='./tsd_result.tsv'):
super(Separation, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.ref_audio = handle_scp(ref_scp)
self.tse_audio = handle_scp(tse_scp)
self.clss,_,_ = handle_scp_inf(inf_scp)
self.key = list(self.mix_audio.keys())
opt = parse(yaml_path)
tsdnet = TSDNet_tse(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth',
use_frame = opt['use_frame'],
only_ref = opt['only_ref']
)
dicts = torch.load(model, map_location='cpu')
tsdnet.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.tsdnet=tsdnet.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.pred_file = pred_file
self.label_path = opt['label_path']
self.save_tsv_path = os.path.join(opt['save_tsv_path'], opt['name'],
"{}_{}".format(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%m'), uuid.uuid1().hex))
Path(self.save_tsv_path).mkdir(exist_ok=True, parents=True) # make dir
def test(self):
self.tsdnet.eval()
time_predictions = []
class_result_file = 'class_result_{}.txt'
event_file = 'event_{}.txt'
segment_file = 'segment_{}.txt'
with torch.no_grad():
for i in range(len(self.key)):
index = self.key[i]
ref_index = index.replace('.wav', '_re.wav')
tse_index = index.replace('.wav','_tse.wav')
cls = str(self.clss[index])
cls = 'class_' + cls
mix = read_wav(self.mix_audio[index])
ref = read_wav(self.ref_audio[ref_index])
tse_audio = read_wav(self.tse_audio[tse_index])
mix = mix.to(self.device)
ref = ref.to(self.device)
tse_audio = tse_audio.to(self.device)
mix = mix[None,:]
ref = ref[None,:]
tse_audio = tse_audio[None,:]
x_cls, out_tsd_time, out_tsd_up = self.tsdnet(mix, ref, tse_audio)
# x_cls: <bs,50>
# out_tsd_time: <bs,t/2>
# out_tsd_up: <bs,t>
pred = out_tsd_up.detach().cpu().numpy() # transpose to numpy
# pred = pred[:,:,0]
# print(pred.shape)
thres = 0.5
window_size = 1
filtered_pred = utils.median_filter(pred, window_size=window_size, threshold=thres)
decoded_pred = [] #
decoded_pred_ = utils.decode_with_timestamps(str(cls),filtered_pred[0,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((str(cls),0,0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
#print('len(decoded_pred) ',len(decoded_pred))
filename = index.split('/')[-1]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
for event_label, onset, offset in label_prediction:
time_predictions.append({
'filename': filename,
'onset': onset,
'offset': offset,
'event_label': str(event_label)}) # get real predict results,including event_label,onset,offset
assert len(time_predictions) > 0, "No outputs, lower threshold?"
pred_df = pd.DataFrame(time_predictions, columns=['filename', 'onset', 'offset','event_label']) # it store the happen event and its time information
time_ratio = 10.0/pred.shape[1] # calculate time
pred_df = utils.predictions_to_time(pred_df, ratio=time_ratio) # transform the number of frame to real time
label_path = self.label_path
test_data_filename = os.path.splitext(os.path.basename(label_path))[0]
print('test_data_filename ',test_data_filename)
pred_file = 'hard_predictions_{}.txt'
if pred_file: # it name is hard_predictions...
pred_df.to_csv(os.path.join(self.save_tsv_path, pred_file.format(test_data_filename)),
index=False, sep="\t")
strong_labels_df = pd.read_csv(self.label_path, sep='\t') # get
if not np.issubdtype(strong_labels_df['filename'].dtype, np.number):
strong_labels_df['filename'] = strong_labels_df['filename'].apply(os.path.basename)
sed_eval = True
if sed_eval:
event_result, segment_result = metrics.compute_metrics(
strong_labels_df, pred_df, time_resolution=0.2) # calculate f1
print("Event Based Results:\n{}".format(event_result))
event_results_dict = event_result.results_class_wise_metrics()
class_wise_results_df = pd.DataFrame().from_dict({
f: event_results_dict[f]['f_measure']
for f in event_results_dict.keys()}).T
class_wise_results_df.to_csv(os.path.join(self.save_tsv_path, class_result_file.format(test_data_filename)), sep='\t')
print("Class wise F1-Macro:\n{}".format(
tabulate(class_wise_results_df, headers='keys', tablefmt='github')))
if event_file:
with open(os.path.join(self.save_tsv_path, event_file.format(test_data_filename)), 'w') as wp:
wp.write(event_result.__str__())
print("=" * 100)
print(segment_result)
if segment_file:
with open(os.path.join(self.save_tsv_path,
segment_file.format(test_data_filename)), 'w') as wp:
wp.write(segment_result.__str__())
event_based_results = pd.DataFrame(
event_result.results_class_wise_average_metrics()['f_measure'], index=['event_based'])
segment_based_results = pd.DataFrame(
segment_result.results_class_wise_average_metrics()
['f_measure'], index=['segment_based'])
result_quick_report = pd.concat((event_based_results, segment_based_results))
# Add two columns
with open(os.path.join(self.save_tsv_path, 'quick_report_{}.md'.format(test_data_filename)), 'w') as wp:
print(tabulate(result_quick_report, headers='keys', tablefmt='github'), file=wp)
print("Quick Report: \n{}".format(tabulate(result_quick_report, headers='keys', tablefmt='github')))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-yaml', type=str, default='./config/Conv_Tasnet/train_tse.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSD_exp/checkpoint_fsd2018_audio/TSDNet_audio_2gru_tse_ML2_fix_random_kaiming_norm_w_clip_w_frame/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=10000, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/Conv_Tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation('/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_mix.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_re.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_inf.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_tse_7_1.scp',
args.yaml, args.model, gpuid)
separation.test()
if __name__ == "__main__":
main()
| 10,323 | 50.108911 | 214 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/draw.py | import torchaudio
import matplotlib
import matplotlib.pyplot as plt
[width, height] = matplotlib.rcParams['figure.figsize']
if width < 10:
matplotlib.rcParams['figure.figsize'] = [width * 2.5, height]
if __name__ == "__main__":
# filename = "/apdcephfs/private_helinwang/tsss/tsss_mixed/train/train_1.wav"
filename = "/apdcephfs/private_helinwang/tsss/tsss_mixed/test_offset/test_offset_10_re.wav"
waveform, sample_rate = torchaudio.load(filename)
print("Shape of waveform: {}".format(waveform.size()))
print("Sample rate of waveform: {}".format(sample_rate))
plt.figure()
plt.plot(waveform.t().numpy())
# plt.title('test_offset_100_mix')
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.savefig('test_offset_10_re.png')
| 775 | 32.73913 | 95 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.