repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
crpn | crpn-master/lib/rpn/proposal_target_layer.py | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import caffe
import numpy as np
import numpy.random as npr
from fast_rcnn.config import cfg
from quad.quad_transform import quad_transform, clip_quads
from quad.quad_convert import quad_2_aabb, whctrs, mkanchors, dual_roi
from quad.quad_overlaps import quad_overlaps
from quad.quad_2_obb import quad_2_obb
DEBUG = False
class ProposalTargetLayer(caffe.Layer):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
self._num_classes = 2
self._num_weights = 8
# sampled rois (0, x1, y1, x2, y2)
num_rois = 2 if cfg.DUAL_ROI else 1
top[0].reshape(num_rois, 5)
# labels
top[1].reshape(1, 1)
# bbox_targets
top[2].reshape(1, self._num_classes * self._num_weights)
# bbox_inside_weights
top[3].reshape(1, self._num_classes * self._num_weights)
# bbox_outside_weights
top[4].reshape(1, self._num_classes * self._num_weights)
def forward(self, bottom, top):
# RoIs: (0, x1, y1, x2, y2, x3, y3, x4, y4)
all_rois = bottom[0].data
# GT boxes: (x1, y1, x2, y2, x3, y3, x4, y4, label)
gt_boxes = bottom[1].data
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack((all_rois, np.hstack((zeros, gt_boxes[:, :-1]))))
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), 'Only single item batches are supported'
num_images = 1
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Sample rois with classification labels and bounding box regression targets
labels, quads, bbox_targets, bbox_inside_weights = _sample_rois(
all_rois, gt_boxes, fg_rois_per_image,
rois_per_image, self._num_classes, self._num_weights)
# Dual-RoI pooling module
if cfg.DUAL_ROI:
rois = quad_2_obb(np.array(quads[:, 1:9], dtype=np.float32))
rois = dual_roi(rois)
else:
rois = quad_2_obb(quads[:, 1:9])
batch_inds = np.zeros((rois.shape[0], 1), dtype=np.float32)
rois = np.hstack((batch_inds, rois.astype(np.float32, copy=False)))
if DEBUG:
print '#num fg: {}'.format((labels == 1).sum())
print '%num bg: {}'.format((labels == 0).sum())
# sampled rois
top[0].reshape(*rois.shape)
top[0].data[...] = rois
# classification labels
top[1].reshape(*labels.shape)
top[1].data[...] = labels
# bbox_targets
top[2].reshape(*bbox_targets.shape)
top[2].data[...] = bbox_targets
# bbox_inside_weights
top[3].reshape(*bbox_inside_weights.shape)
top[3].data[...] = bbox_inside_weights
# bbox_outside_weights
top[4].reshape(*bbox_inside_weights.shape)
top[4].data[...] = np.array(bbox_inside_weights > 0).astype(np.float32)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _get_bbox_regression_labels(bbox_target_data, num_classes, num_weights=4):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, num_weights * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = num_weights * cls
end = start + num_weights
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 8
assert gt_rois.shape[1] == 8
targets = quad_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))
/ np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes, num_weights):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
# all_roisL: (0, x1, y1, x2, y2, x3, y3, x4, y4)
# gt_boxes: (x1, y1, x2, y2, x3, y3, x4, y5, label)
overlaps = quad_overlaps(
np.ascontiguousarray(all_rois[:, 1:9], dtype=np.float32),
np.ascontiguousarray(gt_boxes[:, :8], dtype=np.float32))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 8]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
rois = all_rois[keep_inds]
bbox_target_data = _compute_targets(
rois[:, 1:9], gt_boxes[gt_assignment[keep_inds], :8], labels)
bbox_targets, bbox_inside_weights = \
_get_bbox_regression_labels(bbox_target_data, num_classes, num_weights)
return labels, rois, bbox_targets, bbox_inside_weights
| 7,625 | 37.321608 | 98 | py |
crpn | crpn-master/lib/rpn/labelmap_layer.py | # --------------------------------------------------------
# CRPN
# Written by Linjie Deng
# --------------------------------------------------------
import caffe
import numpy as np
import numpy.random as npr
from fast_rcnn.config import cfg
DEBUG = False
class LabelMapLayer(caffe.Layer):
def setup(self, bottom, top):
# no extra param
height, width = bottom[0].data.shape[-2:]
top[0].reshape(1, 1, height, width)
top[1].reshape(1, 1, height, width)
top[2].reshape(1, 1, height, width)
top[3].reshape(1, 1, height, width)
def forward(self, bottom, top):
# params
batch_size = 32
fg_fraction = 1.0
num_fg = int(fg_fraction * batch_size)
theta_interval = cfg.LD_INTERVAL
feat_map = bottom[0].data
im_info = bottom[1].data[0, :]
gt_boxes = bottom[2].data
img_h, img_w = im_info[:2]
map_h, map_w = feat_map.shape[-2:]
spatial_scale_x = map_w / img_w
spatial_scale_y = map_h / img_h
assert spatial_scale_x == spatial_scale_y, 'scale_x is not equal to scale_y'
spatial_scale = spatial_scale_x
labelmap_tl = np.zeros((map_h, map_w), dtype=np.float32)
labelmap_tr = np.zeros((map_h, map_w), dtype=np.float32)
labelmap_br = np.zeros((map_h, map_w), dtype=np.float32)
labelmap_bl = np.zeros((map_h, map_w), dtype=np.float32)
for bbox in gt_boxes:
# compute theta in raw image space
theta1 = _compute_theta(bbox[0], bbox[1], bbox[4], bbox[5])
theta2 = _compute_theta(bbox[2], bbox[3], bbox[6], bbox[7])
theta3 = _compute_theta(bbox[4], bbox[5], bbox[0], bbox[1])
theta4 = _compute_theta(bbox[6], bbox[7], bbox[2], bbox[3])
# filter corner which is outside of boundary
x1_valid = (0 <= bbox[0] < img_w)
y1_valid = (0 <= bbox[1] < img_h)
x2_valid = (0 <= bbox[2] < img_w)
y2_valid = (0 <= bbox[3] < img_h)
x3_valid = (0 <= bbox[4] < img_w)
y3_valid = (0 <= bbox[5] < img_h)
x4_valid = (0 <= bbox[6] < img_w)
y4_valid = (0 <= bbox[7] < img_h)
# map into feature map space
x1 = int(round(bbox[0] * spatial_scale))
y1 = int(round(bbox[1] * spatial_scale))
x2 = int(round(bbox[2] * spatial_scale))
y2 = int(round(bbox[3] * spatial_scale))
x3 = int(round(bbox[4] * spatial_scale))
y3 = int(round(bbox[5] * spatial_scale))
x4 = int(round(bbox[6] * spatial_scale))
y4 = int(round(bbox[7] * spatial_scale))
#
x1 = np.maximum(np.minimum(x1, map_w - 1), 0)
y1 = np.maximum(np.minimum(y1, map_h - 1), 0)
x2 = np.maximum(np.minimum(x2, map_w - 1), 0)
y2 = np.maximum(np.minimum(y2, map_h - 1), 0)
x3 = np.maximum(np.minimum(x3, map_w - 1), 0)
y3 = np.maximum(np.minimum(y3, map_h - 1), 0)
x4 = np.maximum(np.minimum(x4, map_w - 1), 0)
y4 = np.maximum(np.minimum(y4, map_h - 1), 0)
# compute link direction, +1 for background and other types
if x1_valid and y1_valid:
labelmap_tl[y1, x1] = np.floor(theta1 / theta_interval) + 1
if x2_valid and y2_valid:
labelmap_tr[y2, x2] = np.floor(theta2 / theta_interval) + 1
if x3_valid and y3_valid:
labelmap_br[y3, x3] = np.floor(theta3 / theta_interval) + 1
if x4_valid and y4_valid:
labelmap_bl[y4, x4] = np.floor(theta4 / theta_interval) + 1
# subsample positive or negative labels if we have too many
labelmap_tl = _subsample(labelmap_tl, batch_size, num_fg)
labelmap_tr = _subsample(labelmap_tr, batch_size, num_fg)
labelmap_br = _subsample(labelmap_br, batch_size, num_fg)
labelmap_bl = _subsample(labelmap_bl, batch_size, num_fg)
labelmap_tl = labelmap_tl.reshape((1, map_h, map_w, 1)).transpose(0, 3, 1, 2)
labelmap_tl = labelmap_tl.reshape((1, 1, map_h, map_w))
top[0].reshape(*labelmap_tl.shape)
top[0].data[...] = labelmap_tl
labelmap_tr = labelmap_tr.reshape((1, map_h, map_w, 1)).transpose(0, 3, 1, 2)
labelmap_tr = labelmap_tr.reshape((1, 1, map_h, map_w))
top[1].reshape(*labelmap_tr.shape)
top[1].data[...] = labelmap_tr
labelmap_br = labelmap_br.reshape((1, map_h, map_w, 1)).transpose(0, 3, 1, 2)
labelmap_br = labelmap_br.reshape((1, 1, map_h, map_w))
top[2].reshape(*labelmap_br.shape)
top[2].data[...] = labelmap_br
labelmap_bl = labelmap_bl.reshape((1, map_h, map_w, 1)).transpose(0, 3, 1, 2)
labelmap_bl = labelmap_bl.reshape((1, 1, map_h, map_w))
top[3].reshape(*labelmap_bl.shape)
top[3].data[...] = labelmap_bl
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _subsample(labelmap, batch_size, num_fg):
# positive
labelmap = labelmap.reshape(-1, 1)
fg_inds = np.where(labelmap > 0)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labelmap[disable_inds] = -1
# negative
num_bg = batch_size - np.sum(labelmap > 0)
bg_inds = np.where(labelmap == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labelmap[disable_inds] = -1
return labelmap
def _compute_theta(x1, y1, x2, y2):
dx = x2 - x1
dy = y2 - y1
val = dx / np.sqrt(dx * dx + dy * dy)
val = np.maximum(np.minimum(val, 1), -1)
theta = np.arccos(val) / np.pi * 180
if dy > 0:
theta = 360 - theta
return theta | 6,016 | 39.38255 | 87 | py |
crpn | crpn-master/lib/transform/torch_image_transform_layer.py | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
""" Transform images for compatibility with models trained with
https://github.com/facebook/fb.resnet.torch.
Usage in model prototxt:
layer {
name: 'data_xform'
type: 'Python'
bottom: 'data_caffe'
top: 'data'
python_param {
module: 'transform.torch_image_transform_layer'
layer: 'TorchImageTransformLayer'
}
}
"""
import caffe
from fast_rcnn.config import cfg
import numpy as np
class TorchImageTransformLayer(caffe.Layer):
def setup(self, bottom, top):
# (1, 3, 1, 1) shaped arrays
self.PIXEL_MEANS = \
np.array([[[[0.48462227599918]],
[[0.45624044862054]],
[[0.40588363755159]]]])
self.PIXEL_STDS = \
np.array([[[[0.22889466674951]],
[[0.22446679341259]],
[[0.22495548344775]]]])
# The default ("old") pixel means that were already subtracted
channel_swap = (0, 3, 1, 2)
self.OLD_PIXEL_MEANS = \
cfg.PIXEL_MEANS[np.newaxis, :, :, :].transpose(channel_swap)
top[0].reshape(*(bottom[0].shape))
def forward(self, bottom, top):
ims = bottom[0].data
# Invert the channel means that were already subtracted
ims += self.OLD_PIXEL_MEANS
# 1. Permute BGR to RGB and normalize to [0, 1]
ims = ims[:, [2, 1, 0], :, :] / 255.0
# 2. Remove channel means
ims -= self.PIXEL_MEANS
# 3. Standardize channels
ims /= self.PIXEL_STDS
top[0].reshape(*(ims.shape))
top[0].data[...] = ims
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
| 2,000 | 29.784615 | 72 | py |
AdaptSky | AdaptSky-main/Sub-6.py | import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import math
from collections import namedtuple
from itertools import count
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
import cv2
import time
import os
import pickle
import warnings
import IPython
out = display(IPython.display.Pretty('Starting'), display_id=True)
warnings.filterwarnings("ignore")
torch.manual_seed(0)
np.random.seed(0)
class DQN(nn.Module):
def __init__(self, NUMBER_OF_ARGUMENTS_PER_STATE, NUM_OF_LAYERS, NUM_OF_NEURONS_PER_LAYER, NUM_OF_ACTIONS):
super().__init__(),
self.NUM_OF_LAYERS = NUM_OF_LAYERS
if self.NUM_OF_LAYERS == 0:
self.fc1 = nn.Linear(in_features=NUMBER_OF_ARGUMENTS_PER_STATE, out_features=32)
elif self.NUM_OF_LAYERS == 1:
self.fc1 = nn.Linear(in_features=NUMBER_OF_ARGUMENTS_PER_STATE, out_features=NUM_OF_NEURONS_PER_LAYER)
self.out_v = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=1)
self.out_a = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=32)
elif self.NUM_OF_LAYERS == 2:
self.fc1 = nn.Linear(in_features=NUMBER_OF_ARGUMENTS_PER_STATE, out_features=NUM_OF_NEURONS_PER_LAYER)
self.fc2 = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=NUM_OF_NEURONS_PER_LAYER)
self.out_v = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=1)
self.out_a = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=NUM_OF_ACTIONS)
def forward(self, t):
t = t.flatten(start_dim=1)
if self.NUM_OF_LAYERS == 0:
t = self.fc1(t)
q = t
return q
elif self.NUM_OF_LAYERS == 1:
t = F.relu(self.fc1(t))
v = self.out_v(t) #Value Stream
a = self.out_a(t) # Advantage Stream
q = v + a - a.mean()
return q
elif self.NUM_OF_LAYERS == 2:
t = F.relu(self.fc1(t))
t = F.relu(self.fc2(t))
v = self.out_v(t) #Value Stream
a = self.out_a(t) # Advantage Stream
q = v + a - a.mean()
return q
Experience = namedtuple(
'Experience',
('state', 'action', 'next_state', 'reward')
)
class ReplayMemory():
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.push_count = 0
def push(self, experience):
if len(self.memory) < self.capacity:
self.memory.append(experience)
else:
self.memory[self.push_count % self.capacity] = experience
self.push_count += 1
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def can_provide_sample(self, batch_size):
return len(self.memory) >= batch_size
class EpsilonGreedyStrategy():
def __init__(self, start, end, decay):
self.start = start
self.end = end
self.decay = decay
def get_exploration_rate(self, current_step):
return self.end + (self.start - self.end) * \
math.exp(-1. * current_step / self.decay)
class Agent():
def __init__(self, strategy, num_actions, device):
self.current_step = 0
self.strategy = strategy
self.num_actions = num_actions
self.device = device
def select_action(self, state, policy_net):
rate = self.strategy.get_exploration_rate(self.current_step)
self.current_step += 1
if rate > random.random():
action = random.randrange(self.num_actions)
return torch.tensor([action]).to(self.device) # explore
else:
with torch.no_grad():
return policy_net(state).argmax(dim=1).to(self.device) # exploit
class QValues():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@staticmethod
def get_current(policy_net, states, actions):
return policy_net(states).gather(dim=1, index=actions.unsqueeze(-1))
@staticmethod
def get_next(target_net, next_states):
return target_net(next_states).max(dim=1)[0].detach()
def calc(SUM1,SUM2,SUM3,SUM4,Fairness, moving_avg_period, iteration):
if episode == 999:
Fairness = [element * 100 for element in Fairness]
moving_avg_fairness = get_moving_average(moving_avg_period, Fairness)
moving_avg_SUM1 = get_moving_average(moving_avg_period, SUM1)
moving_avg_SUM2 = get_moving_average(moving_avg_period, SUM2)
moving_avg_SUM1 = [element * 50 for element in moving_avg_SUM1]
moving_avg_SUM2 = [element * 50 for element in moving_avg_SUM2]
moving_avg_SUM3 = get_moving_average(moving_avg_period, SUM3)
moving_avg_SUM4 = get_moving_average(moving_avg_period, SUM4)
moving_avg_SUM3 = [element * 50 for element in moving_avg_SUM3]
moving_avg_SUM4 = [element * 50 for element in moving_avg_SUM4]
SUM = np.add(moving_avg_SUM1,moving_avg_SUM2)
SUM = np.add(SUM,moving_avg_SUM3)
SUM = np.add(SUM,moving_avg_SUM4)
with open(f"Sub-6-NLoS-SUM-Rate-{iteration+1}.pickle", "wb") as f:
pickle.dump(SUM, f)
with open(f"Sub-6-NLoS-FAIR-{iteration+1}.pickle", "wb") as f:
pickle.dump(moving_avg_fairness, f)
else:
out.update(IPython.display.Pretty(f'Episode: {len(SUM1)}. Iteration: {iteration+1}.'))
def get_moving_average(period, values):
values = torch.tensor(values, dtype=torch.float)
if len(values) >= period:
moving_avg = values.unfold(dimension=0, size=period, step=1) \
.mean(dim=1).flatten(start_dim=0)
moving_avg = torch.cat((torch.zeros(period-1), moving_avg))
return moving_avg.numpy()
else:
moving_avg = torch.zeros(len(values))
return moving_avg.numpy()
def LineOfSight_Check(D,H):
c = 0.6 #urban 2GHz
d = 0.11 #urban 2GHz
RAND = random.uniform(0,1)
teta = math.atan(H/D) * 180/math.pi
if teta < 15:
return 2
p1 = c * ((teta - 15) ** d)
p2 = 1 - p1
if p1 >= p2:
if RAND >= p2:
L = 1
else:
L = 2
else:
if RAND >= p1:
L = 2
else:
L = 1
return L
def Average(lst):
return sum(lst) / len(lst)
def extract_tensors(experiences):
# Convert batch of Experiences to Experience of batches
batch = Experience(*zip(*experiences))
t1 = torch.cat(batch.state)
t2 = torch.cat(batch.action)
t3 = torch.cat(batch.reward)
t4 = torch.cat(batch.next_state)
return (t1,t2,t3,t4)
class Blob():
def __init__(self, size, USER1=False, USER2=False,
USER3=False, USER4=False):
self.size = size
if USER1:
self.x = 35
self.y = 54
elif USER2:
self.x = 94
self.y = 1
elif USER3:
self.x = 29
self.y = 45
elif USER4:
self.x = 1
self.y = 97
else:
self.x = 50
self.y = 50
def __str__(self):
return f"Blob({self.x}, {self.y})"
def __sub__(self, other):
return [(self.x-other.x)/10, (self.y-other.y)/10]
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
if choice == 0:
self.move(x=1, y=1)
self.a1 += 0.01
self.a3 +=0.01
self.H += 1
elif choice == 1:
self.move(x=-1, y=-1)
self.a1 += 0.01
self.a3 +=0.01
self.H += 1
elif choice == 2:
self.move(x=-1, y=1)
self.a1 += 0.01
self.a3 +=0.01
self.H += 1
elif choice == 3:
self.move(x=1, y=-1)
self.a1 += 0.01
self.a3 +=0.01
self.H += 1
elif choice == 4:
self.move(x=1, y=1)
self.a1 += 0.01
self.a3 -=0.01
self.H += 1
elif choice == 5:
self.move(x=-1, y=-1)
self.a1 += 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 6:
self.move(x=-1, y=1)
self.a1 += 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 7:
self.move(x=1, y=-1)
self.a1 += 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 8:
self.move(x=1, y=1)
self.a1 -= 0.01
self.a3 += 0.01
self.H += 1
elif choice == 9:
self.move(x=-1, y=-1)
self.a1 -= 0.01
self.a3 += 0.01
self.H += 1
elif choice == 10:
self.a1 -= 0.01
self.a3 += 0.01
self.H += 1
elif choice == 11:
self.move(x=1, y=-1)
self.a1 -= 0.01
self.a3 += 0.01
self.H += 1
elif choice == 12:
self.move(x=1, y=1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 13:
self.move(x=-1, y=-1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 14:
self.move(x=-1, y=1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 15:
self.move(x=1, y=-1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H += 1
if choice == 16:
self.move(x=1, y=1)
self.a1 += 0.01
self.a3 +=0.01
self.H -= 1
elif choice == 17:
self.move(x=-1, y=-1)
self.a1 += 0.01
self.a3 +=0.01
self.H -= 1
elif choice == 18:
self.move(x=-1, y=1)
self.a1 += 0.01
self.a3 +=0.01
self.H -= 1
elif choice == 19:
self.move(x=1, y=-1)
self.a1 += 0.01
self.a3 +=0.01
self.H -= 1
elif choice == 20:
self.move(x=1, y=1)
self.a1 += 0.01
self.a3 -=0.01
self.H -= 1
elif choice == 21:
self.move(x=-1, y=-1)
self.a1 += 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 22:
self.move(x=-1, y=1)
self.a1 += 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 23:
self.move(x=1, y=-1)
self.a1 += 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 24:
self.move(x=1, y=1)
self.a1 -= 0.01
self.a3 += 0.01
self.H -= 1
elif choice == 25:
self.move(x=-1, y=-1)
self.a1 -= 0.01
self.a3 += 0.01
self.H -= 1
elif choice == 26:
self.a1 -= 0.01
self.a3 += 0.01
self.H -= 1
elif choice == 27:
self.move(x=1, y=-1)
self.a1 -= 0.01
self.a3 += 0.01
self.H -= 1
elif choice == 28:
self.move(x=1, y=1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 29:
self.move(x=-1, y=-1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 30:
self.move(x=-1, y=1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 31:
self.move(x=1, y=-1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H -= 1
if self.a1 > 1:
self.a1 = 1
elif self.a1 < 0:
self.a1 = 0
if self.a3 > 1:
self.a3 = 1
elif self.a3 < 0:
self.a3 = 0
if self.H <= 10:
self.H =10
def move(self, x=False, y=False):
if not x:
self.x += np.random.randint(-1, 2)
else:
self.x += x
if not y:
self.y += np.random.randint(-1, 2)
else:
self.y += y
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv():
SIZE = 100
MOVE_PENALTY = 1
OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3) # 4
UAV_N = 1 # UAV key in dict
USER_N = 2 # USER key in dict
UAV2_N = 4 # UAV2 key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255),
4: (175, 0, 255)}
def reset(self):
P = 1 # Transmitted power 30dbm (i.e. 1w)
W = 5e7 # Bandwidth 50MHz
fc = 2e9 # Carrier frequency = 2GHz
N0 = 10e-17 # W/Hz
LOS_PL = 1
NLOS_PL = 20
N = N0 * W
c = 3e8
lamda = c/fc
self.UAV = Blob(self.SIZE)
self.UAV2 = Blob(self.SIZE)
self.SUM1 = []
self.SUM2 = []
self.SUM3 = []
self.SUM4 = []
self.Fairness = []
self.UAV.a1 = 0.5
self.UAV.a2 = 0.5
self.UAV.a3 = 0.5
self.UAV.a4 = 0.5
self.UAV.H = 50
H = self.UAV.H
self.USER1 = Blob(self.SIZE, True, False, False, False)
self.USER2 = Blob(self.SIZE, False, True, False, False)
self.USER3 = Blob(self.SIZE, False, False, True, False)
self.USER4 = Blob(self.SIZE, False, False, False, True)
ob1 = self.UAV-self.USER1
ob2 = self.UAV-self.USER2
ob3 = self.UAV-self.USER3
ob4 = self.UAV-self.USER4
D1 = np.sum(np.sqrt([(10*ob1[0])**2, (10*ob1[1])**2]))
D2 = np.sum(np.sqrt([(10*ob2[0])**2, (10*ob2[1])**2]))
D3 = np.sum(np.sqrt([(10*ob3[0])**2, (10*ob3[1])**2]))
D4 = np.sum(np.sqrt([(10*ob4[0])**2, (10*ob4[1])**2]))
self.L1 = LineOfSight_Check(D1,H)
self.L2 = LineOfSight_Check(D2,H)
self.L3 = LineOfSight_Check(D3,H)
self.L4 = LineOfSight_Check(D4,H)
Dt1 = np.sum(np.sqrt([ (10*ob1[0])**2, (10*ob1[1])**2, H**2 ]))
Dt2 = np.sum(np.sqrt([ (10*ob2[0])**2, (10*ob2[1])**2, H**2 ]))
Dt3 = np.sum(np.sqrt([ (10*ob3[0])**2, (10*ob3[1])**2, H**2 ]))
Dt4 = np.sum(np.sqrt([ (10*ob4[0])**2, (10*ob4[1])**2, H**2 ]))
if self.L1 == 1:
h1 = 20*math.log10(Dt1) + 20*math.log10(fc) - 147.56 + LOS_PL
else:
h1 = 20*math.log10(Dt1) + 20*math.log10(fc) - 147.56 + NLOS_PL
if self.L2 == 1:
h2 = 20*math.log10(Dt2) + 20*math.log10(fc) - 147.56 + LOS_PL
else:
h2 = 20*math.log10(Dt2) + 20*math.log10(fc) - 147.56 + NLOS_PL
if self.L3 == 1:
h3 = 20*math.log10(Dt3) + 20*math.log10(fc) - 147.56 + LOS_PL
else:
h3 = 20*math.log10(Dt3) + 20*math.log10(fc) - 147.56 + NLOS_PL
if self.L4 == 1:
h4 = 20*math.log10(Dt4) + 20*math.log10(fc) - 147.56 + LOS_PL
else:
h4 = 20*math.log10(Dt4) + 20*math.log10(fc) - 147.56 + NLOS_PL
h1 = -h1
h2 = -h2
h3 = -h3
h4 = -h4
h1 = 10 ** (h1/10)
h2 = 10 ** (h2/10)
h3 = 10 ** (h3/10)
h4 = 10 ** (h4/10)
a1 = self.UAV.a1
a2 = 1 - a1
a3 = self.UAV.a3
a4 = 1 - a3
observation = [ob1[0]] + [ob1[1]] + [ob2[0]] + [ob2[1]]+ [ob3[0]] + [ob3[1]]+ [ob4[0]] + [ob4[1]] + [a1] + [a2] + [a3] + [a4] + [h1] + [h2] + [h3] + [h4] + [H]
self.episode_step = 0
return observation
def step(self, action):
done= False
P = 1 # Transmitted power 30dbm (i.e. 1w)
W = 5e7 # Bandwidth 50MHz
fc = 2e9 # Carrier frequency = 2GHz
N0 = 10e-17 # W/Hz
LOS_PL = 1
NLOS_PL = 20
N = N0 * W
H = self.UAV.H # antenna Height
c = 3e8
lamda = c/fc
H = self.UAV.H # antenna Height
self.episode_step += 1
ob1 = self.UAV-self.USER1
ob2 = self.UAV-self.USER2
ob3 = self.UAV-self.USER3
ob4 = self.UAV-self.USER4
D1 = np.sum(np.sqrt([(10*ob1[0])**2, (10*ob1[1])**2]))
D2 = np.sum(np.sqrt([(10*ob2[0])**2, (10*ob2[1])**2]))
D3 = np.sum(np.sqrt([(10*ob3[0])**2, (10*ob3[1])**2]))
D4 = np.sum(np.sqrt([(10*ob4[0])**2, (10*ob4[1])**2]))
self.L1 = LineOfSight_Check(D1,H)
self.L2 = LineOfSight_Check(D2,H)
self.L3 = LineOfSight_Check(D3,H)
self.L4 = LineOfSight_Check(D4,H)
Dt1 = np.sum(np.sqrt([ (10*ob1[0])**2, (10*ob1[1])**2, H**2 ]))
Dt2 = np.sum(np.sqrt([ (10*ob2[0])**2, (10*ob2[1])**2, H**2 ]))
Dt3 = np.sum(np.sqrt([ (10*ob3[0])**2, (10*ob3[1])**2, H**2 ]))
Dt4 = np.sum(np.sqrt([ (10*ob4[0])**2, (10*ob4[1])**2, H**2 ]))
if self.L1 == 1:
h1 = 20*math.log10(Dt1) + 20*math.log10(fc) - 147.56 + LOS_PL
else:
h1 = 20*math.log10(Dt1) + 20*math.log10(fc) - 147.56 + NLOS_PL
if self.L2 == 1:
h2 = 20*math.log10(Dt2) + 20*math.log10(fc) - 147.56 + LOS_PL
else:
h2 = 20*math.log10(Dt2) + 20*math.log10(fc) - 147.56 + NLOS_PL
if self.L3 == 1:
h3 = 20*math.log10(Dt3) + 20*math.log10(fc) - 147.56 + LOS_PL
else:
h3 = 20*math.log10(Dt3) + 20*math.log10(fc) - 147.56 + NLOS_PL
if self.L4 == 1:
h4 = 20*math.log10(Dt4) + 20*math.log10(fc) - 147.56 + LOS_PL
else:
h4 = 20*math.log10(Dt4) + 20*math.log10(fc) - 147.56 + NLOS_PL
h1 = -h1
h2 = -h2
h3 = -h3
h4 = -h4
h1 = 10 ** (h1/10)
h2 = 10 ** (h2/10)
h3 = 10 ** (h3/10)
h4 = 10 ** (h4/10)
self.UAV.action(action)
a1 = self.UAV.a1
a2 = 1 - a1
a3 = self.UAV.a3
a4 = 1 - a3
reward = 0
if h1 >= h2:
SUM1 = math.log2(1 + h1 * a1 * P/N)
SUM2 = math.log2(1 + a2 * h2 * P / (a1 * h2 * P + N) )
if a2 > a1:
reward += 10
else:
SUM1 = math.log2(1 + a1 * h1 * P / (a2 * h1 * P + N) )
SUM2 = math.log2(1 + h2 * a2 * P/N)
if a1 > a2:
reward += 10
if h3 >= h4:
SUM3 = math.log2(1 + h3 * a3 * P/N)
SUM4 = math.log2(1 + a4 * h4 * P / (a3 * h4 * P + N) )
if a4 > a3:
reward += 10
else:
SUM3 = math.log2(1 + a3 * h3 * P / (a4 * h3 * P + N) )
SUM4 = math.log2(1 + h4 * a4 * P/N)
if a3 > a4:
reward += 10
reward_3 = (SUM1 + SUM2 + SUM3 + SUM4)**2 / (4 * (SUM1**2 + SUM2**2 + SUM3**2 + SUM4**2))
self.Fairness.append(reward_3)
if reward_3 >= 0.6 and reward_3 <= 0.65:
reward += 10
reward_6 = 1e7 * (h1+h2+h3+h4)
reward += (SUM1 + SUM2 + SUM3 + SUM4) + reward_3 + reward_6
self.SUM1.append(SUM1)
self.SUM2.append(SUM2)
self.SUM3.append(SUM3)
self.SUM4.append(SUM4)
new_observation_m = ([ob1[0]] + [ob1[1]] + [ob2[0]] + [ob2[1]]+ [ob3[0]] + [ob3[1]] + [ob4[0]] + [ob4[1]] + [a1] + [a2] + [a3] + [a4] + [h1] + [h2] + [h3] + [h4] + [H] )
new_observation = new_observation_m
if self.episode_step >= 300:
SUM11.append(Average(self.SUM1))
SUM22.append(Average(self.SUM2))
SUM33.append(Average(self.SUM3))
SUM44.append(Average(self.SUM4))
Fairnessl.append(Average(self.Fairness))
calc(SUM11,SUM22,SUM33,SUM44,Fairnessl, 100, iteration)
done = True
return new_observation,new_observation_m, reward, done
batch_size = 128
gamma = 0.999
eps_start = 0.9
eps_end = 0.05
eps_decay = 200
target_update = 10
memory_size = 15000
lr = 0.001
num_episodes = 1000
num_of_actions = 32
num_of_arg_per_state = 17
ITERATIONS = 10
NUM_OF_LAYERS = [1]
NUM_OF_NEURONS_PER_LAYER = [128]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for num_of_layers in NUM_OF_LAYERS:
for num_of_neurons_per_layer in NUM_OF_NEURONS_PER_LAYER:
em = BlobEnv()
strategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay)
agent = Agent(strategy, num_of_actions, device)
memory = ReplayMemory(memory_size)
policy_net = DQN(num_of_arg_per_state, num_of_layers,
num_of_neurons_per_layer, num_of_actions).to(device)
target_net = DQN(num_of_arg_per_state, num_of_layers,
num_of_neurons_per_layer, num_of_actions).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.Adam(params=policy_net.parameters(), lr=lr)
SUM11 = []
SUM22 = []
SUM33 = []
SUM44 = []
Fairnessl = []
for episode in range(num_episodes):
state = torch.tensor([em.reset()], dtype=torch.float32).to(device)
for timestep in count():
action = agent.select_action(state, policy_net)
next_state, next_state_m, reward, done = em.step(action.item())
reward = torch.tensor([reward], dtype=torch.int64).to(device)
next_state = torch.tensor([next_state], dtype=torch.float32)
.to(device)
next_state_m = torch.tensor([next_state_m], dtype=torch.float32)
.to(device)
memory.push(Experience(state, action, next_state_m, reward))
state = next_state
if memory.can_provide_sample(batch_size):
experiences = memory.sample(batch_size)
states, actions, rewards, next_states =
extract_tensors(experiences)
current_q_values = QValues.get_current(policy_net, states, actions)
next_q_values = QValues.get_next(target_net, next_states)
target_q_values = (next_q_values * gamma) + rewards
loss = F.mse_loss(current_q_values,
target_q_values.unsqueeze(1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if done:
break
if episode % target_update == 0:
target_net.load_state_dict(policy_net.state_dict())
| 23,348 | 29.402344 | 178 | py |
AdaptSky | AdaptSky-main/AdaptSky [Commented Version] .py | #Import used libraries
%matplotlib inline
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import math
from collections import namedtuple
from itertools import count
from PIL import Image
import cv2
import time
from ipywidgets.widgets.interaction import show_inline_matplotlib_plots
from IPython.display import clear_output, display
import os
import pickle
#Import torch libraries
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
#Print "Starting" in the screen to know that everything has been imported correctly
out = display(IPython.display.Pretty('Starting'), display_id=True)
#Set seed to the randomization process of torch and NumPy libraries to ensure the same performance each run
torch.manual_seed(0)
np.random.seed(0)
#Neural Network class
class DQN(nn.Module):
def __init__(self, NUMBER_OF_ARGUMENTS_PER_STATE):
#Override torch library
super().__init__(),
#Build two fully connected layers with 128 output features
self.fc1 = nn.Linear(in_features=NUMBER_OF_ARGUMENTS_PER_STATE, out_features=128)
self.fc2 = nn.Linear(in_features=128, out_features=128)
#Build two output independent layers, for value and advantage function approximation
self.out_v = nn.Linear(in_features=128, out_features=1)
self.out_a = nn.Linear(in_features=128, out_features=32)
#Forward input through neural network layers
def forward(self, t):
#Flatten the input to make it 1D
t = t.flatten(start_dim=1)
#Input the flatten layer output to the fully connected layers
t = F.relu(self.fc1(t))
t = F.relu(self.fc2(t))
#Get the value function output from the second layer output
v = self.out_v(t) #Value stream
#Get the advantage function output from the second layer output
a = self.out_a(t) #Advantage stream
#Perform the Q function approximation based on value and advantage functions outputs
q = v + a - a.mean()
#Return the Q value
return q
#Initiating a tuple of experiences
Experience = namedtuple(
'Experience',
('state', 'action', 'next_state', 'reward')
)
#Initiating a ReplyMemory with a size of "capacity"
class ReplayMemory():
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.push_count = 0
def push(self, experience):
#Test the memory length to see whether we should append an experience at the end of the list or at the beginning
if len(self.memory) < self.capacity:
self.memory.append(experience)
else:
self.memory[self.push_count % self.capacity] = experience
self.push_count += 1
#Sample from reply memory to train the network
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
#Test if we can get a sample from the reply memory
def can_provide_sample(self, batch_size):
return len(self.memory) >= batch_size
#Exploitation and Exploration strategy
class EpsilonGreedyStrategy():
def __init__(self, start, end, decay):
self.start = start
self.end = end
self.decay = decay
def get_exploration_rate(self, current_step):
return self.end + (self.start - self.end) * \
math.exp(-1. * current_step / self.decay)
#Build an agent class (UAV class)
class Agent():
def __init__(self, strategy, num_actions, device):
self.current_step = 0
self.strategy = strategy
self.num_actions = num_actions
self.device = device
#Select an action from the state given by the environment
def select_action(self, state, policy_net):
rate = self.strategy.get_exploration_rate(self.current_step)
self.current_step += 1
#Check if the exploration rate is larger than a random number from 0 to 1, if so, choose a random action
if rate > random.random():
action = random.randrange(self.num_actions)
return torch.tensor([action]).to(self.device) #Explore
#Else choose the action via the neural network
else:
with torch.no_grad(): #This means do not update the weights and biases of the neural network after this forward process
return policy_net(state).argmax(dim=1).to(self.device) # exploit
class QValues():
#Check if CUDA is installed to train the network via the GPU, otherwise train via the CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#Return the current Q values from the neural network output after inputting the states
@staticmethod
def get_current(policy_net, states, actions):
return policy_net(states).gather(dim=1, index=actions.unsqueeze(-1))
#Return the target Q values from the neural network output after inputting the next states
@staticmethod
def get_next(target_net, next_states):
return target_net(next_states).max(dim=1)[0].detach()
#Plot the moving averages of the values we saved through training
def plot(values,h1,h2,h3,h4,a1,a2,a3,a4,SUM1,SUM2,SUM3,SUM4,Fairness,H,AVG2, Fairness2, moving_avg_period):
#Get the moving average of the rewards
moving_avg_rewards = get_moving_average(moving_avg_period, values)
#Plot at the end of each training session
if episode == 999:
#AdaptSky fairness
Fairness = [element * 100 for element in Fairness]
moving_avg_fairness = get_moving_average(moving_avg_period, Fairness)
#SoA fairness
Fairness2 = [element * 100 for element in Fairness2]
moving_avg_fairness2 = get_moving_average(moving_avg_period, Fairness2)
#Channel conditions of cluster 1
moving_avg_h1 = get_moving_average(moving_avg_period, h1)
moving_avg_h2 = get_moving_average(moving_avg_period, h2)
#Channel conditions of cluster 2
moving_avg_h3 = get_moving_average(moving_avg_period, h3)
moving_avg_h4 = get_moving_average(moving_avg_period, h4)
#Power percentages of cluster 1
moving_avg_a1 = get_moving_average(moving_avg_period, a1)
moving_avg_a2 = get_moving_average(moving_avg_period, a2)
moving_avg_a1 = [element * 100 for element in moving_avg_a1]
moving_avg_a2 = [element * 100 for element in moving_avg_a2]
#Power percentages of cluster 2
moving_avg_a3 = get_moving_average(moving_avg_period, a3)
moving_avg_a4 = get_moving_average(moving_avg_period, a4)
moving_avg_a3 = [element * 100 for element in moving_avg_a3]
moving_avg_a4 = [element * 100 for element in moving_avg_a4]
#Calculate the moving average of cluster 1 spectral efficiencies and multiply it by 2000MHz to get the achievable sum-rate
moving_avg_SUM1 = get_moving_average(moving_avg_period, SUM1)
moving_avg_SUM2 = get_moving_average(moving_avg_period, SUM2)
moving_avg_SUM1 = [element * 2000 for element in moving_avg_SUM1]
moving_avg_SUM2 = [element * 2000 for element in moving_avg_SUM2]
#Calculate the moving average of cluster 2 spectral efficiencies and multiply it by 2000MHz to get the achievable sum-rate
moving_avg_SUM3 = get_moving_average(moving_avg_period, SUM3)
moving_avg_SUM4 = get_moving_average(moving_avg_period, SUM4)
moving_avg_SUM3 = [element * 2000 for element in moving_avg_SUM3]
moving_avg_SUM4 = [element * 2000 for element in moving_avg_SUM4]
#Calculate the average sum rate of all clusters
SUM = np.add(moving_avg_SUM1,moving_avg_SUM2)
SUM = np.add(SUM,moving_avg_SUM3)
SUM = np.add(SUM,moving_avg_SUM4)
#Calculate SoA UAV sum rate
avg2 = get_moving_average(moving_avg_period, AVG2)
avg2 = [element * 2000 for element in avg2]
#Calculate the UAV height
moving_avg_Height = get_moving_average(moving_avg_period, H)
#Print values
print(f"r = {r}, iteration = {i}")
print("Sum Rate Moving Average:",round(SUM[-1],2)/1000, " Gbps, Total SE = ", round(SUM[-1]/(2000),2), " bps/Hz")
print("SE1 = ", round(moving_avg_SUM1[-1]/2000, 2) , "SE2 = ", round(moving_avg_SUM2[-1]/2000, 2), "SE3 = ",round(moving_avg_SUM3[-1]/2000, 2), "SE4 = ", round(moving_avg_SUM4[-1]/2000, 2), "\n")
else:
#Print the current rewards to troubleshot any inefficient training process
out.update(IPython.display.Pretty(f'r = {r}, iteration = {i} \nMoving Average Rewards: {round(moving_avg_rewards[-1], 2)}, Episode: {len(SUM1)}.'))
#Calculate the moving average of any values given the period
def get_moving_average(period, values):
values = torch.tensor(values, dtype=torch.float)
if len(values) >= period:
moving_avg = values.unfold(dimension=0, size=period, step=1) \
.mean(dim=1).flatten(start_dim=0)
moving_avg = torch.cat((torch.zeros(period-1), moving_avg))
return moving_avg.numpy()
else:
moving_avg = torch.zeros(len(values))
return moving_avg.numpy()
#Check the mmWave line-of-sight probability
def mmLineOfSight_Check(D,H):
L = 1
return L
C = 9.6117 #Urban LOS probability parameter
Y = 0.1581 #Urban LOS probability parameter
RAND = random.uniform(0,1)
teta = math.asin(H/D) * 180/math.pi
p1 = 1 / ( 1 + (C * math.exp( -Y * (teta - C ) ) ) )
p2 = 1 - p1
if p1 >= p2:
if RAND >= p2:
L = 1
else:
L = 2
else:
if RAND >= p1:
L = 2
else:
L = 1
return L
#Calculate the average of any list
def Average(lst):
return sum(lst) / len(lst)
#Extract values from the experiences created above
def extract_tensors(experiences):
#Convert a batch of Experiences to Experience of batches
batch = Experience(*zip(*experiences))
t1 = torch.cat(batch.state)
t2 = torch.cat(batch.action)
t3 = torch.cat(batch.reward)
t4 = torch.cat(batch.next_state)
return (t1,t2,t3,t4)
class UAV():
def __init__(self, size, USER1=False, USER2=False, USER3=False, USER4=False):
#Locating the initial locations of each user
self.size = size
if USER1:
self.x = 35
self.y = 54
elif USER2:
self.x = 94
self.y = 1
elif USER3:
self.x = 29
self.y = 45
elif USER4:
self.x = 1
self.y = 97
else:
self.x = 50
self.y = 50
def __str__(self):
return f"UAV({self.x}, {self.y})"
#Get the location differences between two objects
def __sub__(self, other):
return [(self.x-other.x), (self.y-other.y)]
#Choose an action from the 32 choices
def action(self, choice):
if choice == 0:
self.move(x=1, y=1)
self.a1 += 0.01
self.a3 +=0.01
self.H += 1
elif choice == 1:
self.move(x=-1, y=-1)
self.a1 += 0.01
self.a3 +=0.01
self.H += 1
elif choice == 2:
self.move(x=-1, y=1)
self.a1 += 0.01
self.a3 +=0.01
self.H += 1
elif choice == 3:
self.move(x=1, y=-1)
self.a1 += 0.01
self.a3 +=0.01
self.H += 1
elif choice == 4:
self.move(x=1, y=1)
self.a1 += 0.01
self.a3 -=0.01
self.H += 1
elif choice == 5:
self.move(x=-1, y=-1)
self.a1 += 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 6:
self.move(x=-1, y=1)
self.a1 += 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 7:
self.move(x=1, y=-1)
self.a1 += 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 8:
self.move(x=1, y=1)
self.a1 -= 0.01
self.a3 += 0.01
self.H += 1
elif choice == 9:
self.move(x=-1, y=-1)
self.a1 -= 0.01
self.a3 += 0.01
self.H += 1
elif choice == 10:
self.a1 -= 0.01
self.a3 += 0.01
self.H += 1
elif choice == 11:
self.move(x=1, y=-1)
self.a1 -= 0.01
self.a3 += 0.01
self.H += 1
elif choice == 12:
self.move(x=1, y=1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 13:
self.move(x=-1, y=-1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 14:
self.move(x=-1, y=1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H += 1
elif choice == 15:
self.move(x=1, y=-1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H += 1
if choice == 16:
self.move(x=1, y=1)
self.a1 += 0.01
self.a3 +=0.01
self.H -= 1
elif choice == 17:
self.move(x=-1, y=-1)
self.a1 += 0.01
self.a3 +=0.01
self.H -= 1
elif choice == 18:
self.move(x=-1, y=1)
self.a1 += 0.01
self.a3 +=0.01
self.H -= 1
elif choice == 19:
self.move(x=1, y=-1)
self.a1 += 0.01
self.a3 +=0.01
self.H -= 1
elif choice == 20:
self.move(x=1, y=1)
self.a1 += 0.01
self.a3 -=0.01
self.H -= 1
elif choice == 21:
self.move(x=-1, y=-1)
self.a1 += 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 22:
self.move(x=-1, y=1)
self.a1 += 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 23:
self.move(x=1, y=-1)
self.a1 += 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 24:
self.move(x=1, y=1)
self.a1 -= 0.01
self.a3 += 0.01
self.H -= 1
elif choice == 25:
self.move(x=-1, y=-1)
self.a1 -= 0.01
self.a3 += 0.01
self.H -= 1
elif choice == 26:
self.a1 -= 0.01
self.a3 += 0.01
self.H -= 1
elif choice == 27:
self.move(x=1, y=-1)
self.a1 -= 0.01
self.a3 += 0.01
self.H -= 1
elif choice == 28:
self.move(x=1, y=1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 29:
self.move(x=-1, y=-1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 30:
self.move(x=-1, y=1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H -= 1
elif choice == 31:
self.move(x=1, y=-1)
self.a1 -= 0.01
self.a3 -= 0.01
self.H -= 1
if self.a1 > 1:
self.a1 = 1
elif self.a1 < 0:
self.a1 = 0
if self.a3 > 1:
self.a3 = 1
elif self.a3 < 0:
self.a3 = 0
if self.H <= 10:
self.H =10
#Move the UAV and/or the users
def move(self, x=False, y=False):
if not x:
self.x += np.random.randint(-1, 2)
else:
self.x += x
if not y:
self.y += np.random.randint(-1, 2)
else:
self.y += y
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class mmWave_Env():
SIZE = 100 #The environment size
MOVE_PENALTY = 1 #Didn't use it but it was meant to make the UAV loss reward whenever it moves
OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3) #RGB troubleshoot screen
UAV_N = 1 #UAV key in the colors dictionary
USER_N = 2 #USERs key in the colors dictionary
UAV2_N = 4 # SoA UAV key in the colors dictionary
#The colors dictionary
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255),
4: (175, 0, 255)}
#Reset the environment
def reset(self):
p = 20
P = 10**((p-30)/10) #Transmitted power 20dbm (i.e. .1w)
N_uav = 8 #Number of Tx antennas
N_ue = 8 #Number of Rx antennas
G = N_uav * N_ue #MIMO Gain
P *= G
W = 2e9 #Bandwidth of 2 GHz
fc = 28e9 #Carrier frequency of 28 GHz
NF = 10**(5/10) #5dB noise figure
TN = 10**(-114/10) #-84dBm thermal noise
N = NF * TN
C_LOS = 10**(-6.4) #mmWave LoS Urban parameter
a_LOS = 2 #mmWave LoS Urban parameter
C_NLOS = 10**(-7.2) #mmWave NLoS Urban parameter
a_NLOS = 2.92 #mmWave NLoS Urban parameter
#Initiating environment objects
self.UAV = UAV(self.SIZE)
self.UAV2 = UAV(self.SIZE)
self.USER1 = UAV(self.SIZE, True, False, False, False)
self.USER2 = UAV(self.SIZE, False, True, False, False)
self.USER3 = UAV(self.SIZE, False, False, True, False)
self.USER4 = UAV(self.SIZE, False, False, False, True)
self.UAV2.x = int((self.USER1.x +self.USER2.x + self.USER3.x + self.USER4.x)/4)
self.UAV2.y = int((self.USER1.y +self.USER2.y + self.USER3.y + self.USER4.y)/4)
#Initiating lists to store eact time step values
self.h1 = []
self.h2 = []
self.h3 = []
self.h4 = []
self.a1 = []
self.a2 = []
self.a3 = []
self.a4 = []
self.SUM1 = []
self.SUM2 = []
self.SUM3 = []
self.SUM4 = []
self.Fairness = []
self.Hl = []
self.NLOS = []
self.NOMA = []
self.reward1 = []
self.reward2 = []
self.reward3 = []
self.reward4 = []
self.reward5 = []
self.reward6 = []
#Initiate power percentages
self.UAV.a1 = 0.5
self.UAV.a2 = 0.5
self.UAV.a3 = 0.5
self.UAV.a4 = 0.5
#Initiate UAVs height
self.UAV.H = 50
H2 = 50
#Get the difference between the UAV and each user
ob1 = self.UAV-self.USER1
ob2 = self.UAV-self.USER2
ob3 = self.UAV-self.USER3
ob4 = self.UAV-self.USER4
#Get the perpendicular distance between the UAV and each user
Dt1 = np.sum(np.sqrt([ (ob1[0])**2, (ob1[1])**2, H**2 ]))
Dt2 = np.sum(np.sqrt([ (ob2[0])**2, (ob2[1])**2, H**2 ]))
Dt3 = np.sum(np.sqrt([ (ob3[0])**2, (ob3[1])**2, H**2 ]))
Dt4 = np.sum(np.sqrt([ (ob4[0])**2, (ob4[1])**2, H**2 ]))
#Line-of-sight check
self.L1 = mmLineOfSight_Check(Dt1,H)
self.L2 = mmLineOfSight_Check(Dt2,H)
self.L3 = mmLineOfSight_Check(Dt3,H)
self.L4 = mmLineOfSight_Check(Dt4,H)
#Calculate the path loss for each user
if self.L1 == 1:
h1 = C_LOS * Dt1**(-a_LOS)
else:
h1 = C_NLOS * Dt1**(-a_NLOS)
if self.L2 == 1:
h2 = C_LOS * Dt2**(-a_LOS)
else:
h2 = C_NLOS * Dt2**(-a_NLOS)
if self.L3 == 1:
h3 = C_LOS * Dt3**(-a_LOS)
else:
h3 = C_NLOS * Dt3**(-a_NLOS)
if self.L4 == 1:
h4 = C_LOS * Dt4**(-a_LOS)
else:
h4 = C_NLOS * Dt4**(-a_NLOS)
#put each state in an observations list
observation = [ob1[0]] + [ob1[1]] + [ob2[0]] + [ob2[1]]+ [ob3[0]] + [ob3[1]]+ [ob4[0]] + [ob4[1]] + [a1] + [a2] + [a3] + [a4] + [h1] + [h2] + [h3] + [h4] + [H]
#Initiate episodes count
self.episode_step = 0
return observation
#Calculate the states' values and take an action
def step(self, action):
done= False
p = 20
P = 10**((p-30)/10) #Transmitted power 20dbm (i.e. .1w)
N_uav = 8
N_ue = 8
G = N_uav * N_ue
P *= G
W = 2e9 #Bandwidth = 2 GHz
fc = 28e9 # Carrier frequency = 28 GHz
NF = 10**(5/10) #5dB noise figure
TN = 10**(-114/10) #-84dBm thermal noise
N = NF * TN
C_LOS = 10**(-6.4)
a_LOS = 2
C_NLOS = 10**(-7.2)
a_NLOS = 2.92
H = self.UAV.H #Current UAV's antenna height
#There are some redundancies in this part that can easily be managed by a function
self.episode_step += 1
ob1 = self.UAV-self.USER1
ob2 = self.UAV-self.USER2
ob3 = self.UAV-self.USER3
ob4 = self.UAV-self.USER4
H = self.UAV.H
Dt1 = np.sum(np.sqrt([ (ob1[0])**2, (ob1[1])**2, H**2 ]))
Dt2 = np.sum(np.sqrt([ (ob2[0])**2, (ob2[1])**2, H**2 ]))
Dt3 = np.sum(np.sqrt([ (ob3[0])**2, (ob3[1])**2, H**2 ]))
Dt4 = np.sum(np.sqrt([ (ob4[0])**2, (ob4[1])**2, H**2 ]))
self.L1 = mmLineOfSight_Check(Dt1,H)
self.L2 = mmLineOfSight_Check(Dt2,H)
self.L3 = mmLineOfSight_Check(Dt3,H)
self.L4 = mmLineOfSight_Check(Dt4,H)
if self.L1 == 1:
h1 = C_LOS * Dt1**(-a_LOS)
self.NLOS.append(0)
else:
h1 = C_NLOS * Dt1**(-a_NLOS)
self.NLOS.append(1)
if self.L2 == 1:
h2 = C_LOS * Dt2**(-a_LOS)
self.NLOS.append(0)
else:
h2 = C_NLOS * Dt2**(-a_NLOS)
self.NLOS.append(1)
if self.L3 == 1:
h3 = C_LOS * Dt3**(-a_LOS)
self.NLOS.append(0)
else:
h3 = C_NLOS * Dt3**(-a_NLOS)
self.NLOS.append(1)
if self.L4 == 1:
h4 = C_LOS * Dt4**(-a_LOS)
self.NLOS.append(0)
else:
h4 = C_NLOS * Dt4**(-a_NLOS)
self.NLOS.append(1)
#Take an action
self.UAV.action(action)
#Receive the new power percentages
a1 = self.UAV.a1
a2 = 1 - a1
a3 = self.UAV.a3
a4 = 1 - a3
#Append the new states
self.h1.append(h1)
self.h2.append(h2)
self.h3.append(h3)
self.h4.append(h4)
self.a1.append(a1)
self.a2.append(a2)
self.a3.append(a3)
self.a4.append(a4)
self.Hl.append(H)
#Reset reward values
reward = 0
reward_1 = 0
reward_2 = 0
reward_4 = 0
reward_5 = 0
reward_6 = 0
#SIC check and spectral efficiency calculations of cluster 1
if h1 >= h2:
SUM1 = math.log2(1 + h1 * a1 * P/N)
SUM2 = math.log2(1 + a2 * h2 * P / (a1 * h2 * P + N) )
reward_1 += SUM1
reward_2 += SUM2
else:
SUM1 = math.log2(1 + a1 * h1 * P / (a2 * h1 * P + N) )
SUM2 = math.log2(1 + h2 * a2 * P/N)
reward_1 += SUM2
reward_2 += SUM1
#SIC check and spectral efficiency calculations of cluster 2
if h3 >= h4:
SUM3 = math.log2(1 + h3 * a3 * P/N)
SUM4 = math.log2(1 + a4 * h4 * P / (a3 * h4 * P + N) )
reward_4 += SUM3
reward_5 += SUM4
else:
SUM3 = math.log2(1 + a3 * h3 * P / (a4 * h3 * P + N) )
SUM4 = math.log2(1 + h4 * a4 * P/N)
reward_4 += SUM4
reward_5 += SUM3
#Fairness calculations
reward_3 = (SUM1 + SUM2 + SUM3 + SUM4)**2 / (4 * (SUM1**2 + SUM2**2 + SUM3**2 + SUM4**2))
self.Fairness.append(reward_3)
self.SUM1.append(SUM1)
self.SUM2.append(SUM2)
self.SUM3.append(SUM3)
self.SUM4.append(SUM4)
#Check if each user spectral efficiency is above the threshold
if SUM1 >= r:
reward += 100
if SUM2 >= r:
reward += 100
if SUM3 >= r:
reward += 100
if SUM4 >= r:
reward += 100
#A motivation to force the UAV to achieve our objective
if reward >= 400:
SUM1*=10
SUM2*=10
SUM3*=10
SUM4*=10
#Reward function weights
w1 = 1
w2 = 0
w3 = 0
reward_3 *= w3
reward_6 += 2e10 * (h1+h2+h3+h4) * w2
reward += w1* (SUM1 + SUM2 + SUM3 + SUM4) + reward_3 + reward_6
self.reward1.append(reward_1)
self.reward2.append(reward_2)
self.reward3.append(reward_3)
self.reward4.append(reward_4)
self.reward5.append(reward_5)
self.reward6.append(reward_6)
new_observation_m = ([ob1[0]] + [ob1[1]] + [ob2[0]] + [ob2[1]]+ [ob3[0]] + [ob3[1]] + [ob4[0]] + [ob4[1]] + [a1] + [a2] + [a3] + [a4] + [h1] + [h2] + [h3] + [h4] + [H])
#new_obervation is used in moving users only, otherwise new_observation = new_observation_m
new_observation = new_observation_m
#At the end of each episode calculate SoA values
if self.episode_step >= 300:
ob21 = self.UAV2-self.USER1
ob22 = self.UAV2-self.USER2
ob23 = self.UAV2-self.USER3
ob24 = self.UAV2-self.USER4
H2 = 50
Dt21 = np.sum(np.sqrt([ (ob21[0])**2, (ob21[1])**2, H2**2 ]))
Dt22 = np.sum(np.sqrt([ (ob22[0])**2, (ob22[1])**2, H2**2 ]))
Dt23 = np.sum(np.sqrt([ (ob23[0])**2, (ob23[1])**2, H2**2 ]))
Dt24 = np.sum(np.sqrt([ (ob24[0])**2, (ob24[1])**2, H2**2 ]))
h221 = C_LOS * Dt21**(-a_LOS)
h222 = C_LOS * Dt22**(-a_LOS)
h223 = C_LOS * Dt23**(-a_LOS)
h224 = C_LOS * Dt24**(-a_LOS)
if h221 >= h222:
a222 = ((2**r - 1)/2**r) * (1 + N/(P*h222))
if a222 >= 1:
a222 = 1
a221 = 1 - a222
SUM221 = math.log2(1 + h221 * a221 * P/N)
SUM222 = math.log2(1 + a222 * h222 * P / (a221 * h222 * P + N) )
else:
a221 = ((2**r - 1)/2**r) * (1 + N/(P*h221))
if a221 >= 1:
a221 = 1
a222 = 1-a221
SUM221 = math.log2(1 + a221 * h221 * P / (a222 * h221 * P + N) )
SUM222 = math.log2(1 + h222 * a222 * P/N)
if h223 >= h224:
a224 = ((2**r - 1)/2**r) * (1 + N/(P*h224))
if a224 >= 1:
a224 = 1
a223 = 1 - a224
SUM223 = math.log2(1 + h223 * a223 * P/N)
SUM224 = math.log2(1 + a224 * h224 * P / (a223 * h224 * P + N) )
else:
a223 = ((2**r - 1)/2**r) * (1 + N/(P*h223))
if a223 >= 1:
a223 = 1
a224 = 1 - a223
SUM223 = math.log2(1 + a223 * h223 * P / (a224 * h223 * P + N) )
SUM224 = math.log2(1 + h224 * a224 * P/N)
#Calculate SoA's sum rate and fairness
average_sum_rate2 = SUM221 + SUM222 + SUM223 + SUM224
Fairness222 = (SUM221 + SUM222 + SUM223 + SUM224)**2 / (4 * (SUM221**2 + SUM222**2 + SUM223**2 + SUM224**2))
#Take the average of the episode generated values
h11.append(Average(self.h1))
h22.append(Average(self.h2))
h33.append(Average(self.h3))
h44.append(Average(self.h4))
a11.append(Average(self.a1))
a22.append(Average(self.a2))
a33.append(Average(self.a3))
a44.append(Average(self.a4))
SUM11.append(Average(self.SUM1))
SUM22.append(Average(self.SUM2))
SUM33.append(Average(self.SUM3))
SUM44.append(Average(self.SUM4))
reward1.append(Average(self.reward1))
reward2.append(Average(self.reward2))
reward3.append(Average(self.reward3))
reward4.append(Average(self.reward4))
reward5.append(Average(self.reward5))
reward6.append(Average(self.reward6))
average_episode_reward = episode_reward/self.episode_step
Fairnessl.append(Average(self.Fairness))
episode_rewards.append(average_episode_reward)
episode_durations.append(timestep)
Height.append(Average(self.Hl))
AVG2.append(average_sum_rate2)
Fairnessl_2.append(Fairness222)
#End the episode
done = True
#Call the plot function
plot(episode_rewards,reward1,reward2,reward3,reward4,reward5,reward6,h11,h22,h33,h44,a11,a22,a33,a44,SUM11,SUM22,SUM33,SUM44,Fairnessl,Height,AVG2,Fairnessl_2, 100)
#At the end of the training session, calculate and plot the last moving average value
if episode >=999:
average_h1 = 10 * math.log10(h11[-1])
average_h2 = 10 * math.log10(h22[-1])
average_h3 = 10 * math.log10(h33[-1])
average_h4 = 10 * math.log10(h44[-1])
average_h21 = 10* math.log10(h221)
average_h22 = 10* math.log10(h222)
average_h23 = 10* math.log10(h223)
average_h24 = 10* math.log10(h224)
average_sum_rate = SUM11[-1] + SUM22[-1] + SUM33[-1] + SUM44[-1]
print("\n UAV2 ")
print("Sum Rate:", round(2*average_sum_rate2, 2), "Gbps, Total SE = ", round(average_sum_rate2, 2), " EE = ", round(average_sum_rate2/(P),2))
print("SE1: ",round(SUM221, 2),"Bits/s/Hz, SE2: ",round(SUM222, 2),"Bits/s/Hz, SE3: ",round(SUM223, 2),"Bits/s/Hz, SE4: ",round(SUM224, 2),"Bits/s/Hz")
return new_observation,new_observation_m, reward, done
#Render the troubleshoot function
def render(self):
img = self.get_image()
img = img.resize((500, 500)) # Resizing
cv2.imshow("UAV Beta 1.0", np.array(img))
cv2.waitKey(1)
#Get the environment image
def get_image(self):
env = np.full((self.SIZE, self.SIZE, 3), 255, dtype=np.uint8) #Start an RGB image
env[self.USER1.x][self.USER1.y] = self.d[(self.L1+1)] #Set the pixel value to d[(self.L1+1)] color from the colors dictionary
env[self.USER2.x][self.USER2.y] = self.d[(self.L2+1)] #Set the pixel value to d[(self.L2+1)] color from the colors dictionary
env[self.USER3.x][self.USER3.y] = self.d[(self.L3+1)] #Set the pixel value to d[(self.L3+1)] color from the colors dictionary
env[self.USER4.x][self.USER4.y] = self.d[(self.L4+1)] #Set the pixel value to d[(self.L4+1)] color from the colors dictionary
env[self.UAV.x][self.UAV.y] = self.d[self.UAV_N] #Set the pixel value to d[self.UAV_N] color from the colors dictionary
img = Image.fromarray(env, 'RGB') #Transform the array to an RGB image
return img
#Set DRL parameters
batch_size = 128
gamma = 0.999
eps_start = 0.9
eps_end = 0.05
eps_decay = 200
target_update = 10
memory_size = 15000
lr = 0.001
num_episodes = 1000
num_of_actions = 32
num_of_arg_per_state = 17
SHOW_PREVIEW = False
AGGREGATE_STATS_EVERY = 10
ITERATIONS = 5
#For threshold spectral efficiency 0 to 3.5bps/Hz
for r in np.arange(0, 3.5, 0.5):
#Repeat training sessions "ITERATIONS" number of times to take the confidence interval afterward
for i in range(ITERATIONS):
#Check if CUDA is installed to train the network via the GPU, otherwise train via the CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#Initialize the environment
em = mmWave_Env()
#Initialize Epsilon Greedy Strategy with the parameters we set above
strategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay)
#Initialize the agent with the parameters we set above
agent = Agent(strategy, num_of_actions, device)
#Initialize the replay memory
memory = ReplayMemory(memory_size)
#Initialize the policy network
policy_net = DQN(num_of_arg_per_state).to(device)
#Initialize the target network
target_net = DQN(num_of_arg_per_state).to(device)
#Copy policy net' parameters to target net
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
#Initialize Adam's optimizer and the learning rate to lr
optimizer = optim.Adam(params=policy_net.parameters(), lr=lr)
#Initialize the evaluation lists
episode_durations = []
episode_rewards = []
episode_wins = []
h11 = []
h22 = []
h33 = []
h44 = []
a11 = []
a22 = []
a33 = []
a44 = []
SUM11 = []
SUM22 = []
SUM33 = []
SUM44 = []
TOTAL_SUM = []
Fairnessl = []
Height = []
reward1 = []
reward2 = []
reward3 = []
reward4 = []
reward5 = []
reward6 = []
AVG2 = []
Fairnessl_2 = []
#Start training session
for episode in range(num_episodes):
#Reset the environment
state = torch.tensor([em.reset()], dtype=torch.float32).to(device)
episode_reward = 0
for timestep in count():
#Choose an action and store it in the action variable
action = agent.select_action(state, policy_net)
#Execute the action and receive the next_state and reward
next_state, next_state_m, reward, done = em.step(action.item())
#Add the rewards to the episode_reward variable
episode_reward += reward
#Change the reward, next_state, next_state_m to a torch tensor variable so the torch library can handle it
reward = torch.tensor([reward], dtype=torch.int64).to(device)
next_state = torch.tensor([next_state], dtype=torch.float32).to(device)
next_state_m = torch.tensor([next_state_m], dtype=torch.float32).to(device)
#Store state, action, next_state_m, reward in the reply memory after converting it to an experience tuble
memory.push(Experience(state, action, next_state_m, reward))
state = next_state
#Test if memory can provide a sample of size "batch_size"
if memory.can_provide_sample(batch_size):
#Sample a random batch with the size of "batch_size"
experiences = memory.sample(batch_size)
#Extract values from Experiences tuble
states, actions, rewards, next_states = extract_tensors(experiences)
#Get the current and next Q value
current_q_values = QValues.get_current(policy_net, states, actions)
next_q_values = QValues.get_next(target_net, next_states)
#Calculate the target Q values
target_q_values = (next_q_values * gamma) + rewards
#Calculate the loss between current_q_values and target_q_values, and update the neural network parameters accordingly
loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
#Show the troubleshooting screen if SHOW_PREVIEW is true and the episode is "AGGREGATE_STATS_EVERY" multiples
if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
#Call the render function
em.render()
#Break, if the episode is finished
if done:
break
#Clone the policy network parameters to the target network parameters if episode number is "target_update" multiples
if episode % target_update == 0:
target_net.load_state_dict(policy_net.state_dict()) | 36,953 | 32.965074 | 203 | py |
greedy | greedy-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# greedy documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 2 14:36:59 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo',
'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'greedy'
copyright = u'2019, Paul Yushkevich'
author = u'Paul Yushkevich'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.1'
# The full version, including alpha/beta/rc tags.
release = u'1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'greedydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'greedy.tex', u'greedy Documentation',
u'Paul Yushkevich', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'greedy', u'greedy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'greedy', u'greedy Documentation',
author, 'greedy', 'One line description of project.',
'Miscellaneous'),
]
def setup(app):
app.add_stylesheet('css/custom.css')
| 5,243 | 29.312139 | 79 | py |
CADP | CADP-main/CADP-PG/onpolicy/config.py | import argparse
def get_config():
"""
The configuration parser for common hyperparameters of all environment.
Please reach each `scripts/train/<env>_runner.py` file to find private hyperparameters
only used in <env>.
Prepare parameters:
--algorithm_name <algorithm_name>
specifiy the algorithm, including `["rmappo", "mappo", "rmappg", "mappg", "trpo"]`
--experiment_name <str>
an identifier to distinguish different experiment.
--seed <int>
set seed for numpy and torch
--cuda
by default True, will use GPU to train; or else will use CPU;
--cuda_deterministic
by default, make sure random seed effective. if set, bypass such function.
--n_training_threads <int>
number of training threads working in parallel. by default 1
--n_rollout_threads <int>
number of parallel envs for training rollout. by default 32
--n_eval_rollout_threads <int>
number of parallel envs for evaluating rollout. by default 1
--n_render_rollout_threads <int>
number of parallel envs for rendering, could only be set as 1 for some environments.
--num_env_steps <int>
number of env steps to train (default: 10e6)
--user_name <str>
[for wandb usage], to specify user's name for simply collecting training data.
--use_wandb
[for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data.
Env parameters:
--env_name <str>
specify the name of environment
--use_obs_instead_of_state
[only for some env] by default False, will use global state; or else will use concatenated local obs.
Replay Buffer parameters:
--episode_length <int>
the max length of episode in the buffer.
Network parameters:
--share_policy
by default True, all agents will share the same network; set to make training agents use different policies.
--use_centralized_V
by default True, use centralized training mode; or else will decentralized training mode.
--stacked_frames <int>
Number of input frames which should be stack together.
--hidden_size <int>
Dimension of hidden layers for actor/critic networks
--layer_N <int>
Number of layers for actor/critic networks
--use_ReLU
by default True, will use ReLU. or else will use Tanh.
--use_popart
by default True, use PopArt to normalize rewards.
--use_valuenorm
by default True, use running mean and std to normalize rewards.
--use_feature_normalization
by default True, apply layernorm to normalize inputs.
--use_orthogonal
by default True, use Orthogonal initialization for weights and 0 initialization for biases. or else, will use xavier uniform inilialization.
--gain
by default 0.01, use the gain # of last action layer
--use_naive_recurrent_policy
by default False, use the whole trajectory to calculate hidden states.
--use_recurrent_policy
by default, use Recurrent Policy. If set, do not use.
--recurrent_N <int>
The number of recurrent layers ( default 1).
--data_chunk_length <int>
Time length of chunks used to train a recurrent_policy, default 10.
Optimizer parameters:
--lr <float>
learning rate parameter, (default: 5e-4, fixed).
--critic_lr <float>
learning rate of critic (default: 5e-4, fixed)
--opti_eps <float>
RMSprop optimizer epsilon (default: 1e-5)
--weight_decay <float>
coefficience of weight decay (default: 0)
PPO parameters:
--ppo_epoch <int>
number of ppo epochs (default: 15)
--use_clipped_value_loss
by default, clip loss value. If set, do not clip loss value.
--clip_param <float>
ppo clip parameter (default: 0.2)
--num_mini_batch <int>
number of batches for ppo (default: 1)
--entropy_coef <float>
entropy term coefficient (default: 0.01)
--use_max_grad_norm
by default, use max norm of gradients. If set, do not use.
--max_grad_norm <float>
max norm of gradients (default: 0.5)
--use_gae
by default, use generalized advantage estimation. If set, do not use gae.
--gamma <float>
discount factor for rewards (default: 0.99)
--gae_lambda <float>
gae lambda parameter (default: 0.95)
--use_proper_time_limits
by default, the return value does consider limits of time. If set, compute returns with considering time limits factor.
--use_huber_loss
by default, use huber loss. If set, do not use huber loss.
--use_value_active_masks
by default True, whether to mask useless data in value loss.
--huber_delta <float>
coefficient of huber loss.
PPG parameters:
--aux_epoch <int>
number of auxiliary epochs. (default: 4)
--clone_coef <float>
clone term coefficient (default: 0.01)
Run parameters:
--use_linear_lr_decay
by default, do not apply linear decay to learning rate. If set, use a linear schedule on the learning rate
Save & Log parameters:
--save_interval <int>
time duration between contiunous twice models saving.
--log_interval <int>
time duration between contiunous twice log printing.
Eval parameters:
--use_eval
by default, do not start evaluation. If set`, start evaluation alongside with training.
--eval_interval <int>
time duration between contiunous twice evaluation progress.
--eval_episodes <int>
number of episodes of a single evaluation.
Render parameters:
--save_gifs
by default, do not save render video. If set, save video.
--use_render
by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam.
--render_episodes <int>
the number of episodes to render a given env
--ifi <float>
the play interval of each rendered image in saved video.
Pretrained parameters:
--model_dir <str>
by default None. set the path to pretrained model.
"""
parser = argparse.ArgumentParser(
description='onpolicy', formatter_class=argparse.RawDescriptionHelpFormatter)
# prepare parameters
parser.add_argument("--algorithm_name", type=str,
default='mappo', choices=["rmappo", "mappo"])
parser.add_argument("--experiment_name", type=str, default="check", help="an identifier to distinguish different experiment.")
parser.add_argument("--seed", type=int, default=1, help="Random seed for numpy/torch")
parser.add_argument("--seed_specify", action="store_true",
default=False, help="Random or specify seed for numpy/torch")
parser.add_argument("--cuda", action='store_false', default=True, help="by default True, will use GPU to train; or else will use CPU;")
parser.add_argument("--cuda_deterministic",
action='store_false', default=True, help="by default, make sure random seed effective. if set, bypass such function.")
parser.add_argument("--n_training_threads", type=int,
default=1, help="Number of torch threads for training")
parser.add_argument("--n_rollout_threads", type=int, default=8,
help="Number of parallel envs for training rollouts")
parser.add_argument("--n_eval_rollout_threads", type=int, default=1,
help="Number of parallel envs for evaluating rollouts")
parser.add_argument("--n_render_rollout_threads", type=int, default=1,
help="Number of parallel envs for rendering rollouts")
parser.add_argument("--num_env_steps", type=int, default=20e6,
help='Number of environment steps to train (default: 20e6)')
parser.add_argument("--user_name", type=str, default='admin', help="[for wandb usage], to specify user's name for simply collecting training data.")
parser.add_argument("--use_wandb", action='store_false', default=True, help="[for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data.")
# env parameters
parser.add_argument("--env_name", type=str, default='StarCraft2', help="specify the name of environment")
parser.add_argument("--use_obs_instead_of_state", action='store_true',
default=False, help="Whether to use global state or concatenated obs")
# replay buffer parameters
parser.add_argument("--episode_length", type=int,
default=400, help="Max length for any episode")
# network parameters
parser.add_argument("--share_policy", action='store_false',
default=True, help='Whether agent share the same policy')
parser.add_argument("--use_centralized_V", action='store_false',
default=True, help="Whether to use centralized V function")
parser.add_argument("--stacked_frames", type=int, default=1,
help="Dimension of hidden layers for actor/critic networks")
parser.add_argument("--use_stacked_frames", action='store_true',
default=False, help="Whether to use stacked_frames")
parser.add_argument("--hidden_size", type=int, default=64,
help="Dimension of hidden layers for actor/critic networks")
parser.add_argument("--layer_N", type=int, default=1,
help="Number of layers for actor/critic networks")
parser.add_argument("--use_ReLU", action='store_false',
default=True, help="Whether to use ReLU")
parser.add_argument("--use_popart", action='store_true', default=False, help="by default False, use PopArt to normalize rewards.")
parser.add_argument("--use_valuenorm", action='store_false', default=True, help="by default True, use running mean and std to normalize rewards.")
parser.add_argument("--use_feature_normalization", action='store_false',
default=True, help="Whether to apply layernorm to the inputs")
parser.add_argument("--use_orthogonal", action='store_false', default=True,
help="Whether to use Orthogonal initialization for weights and 0 initialization for biases")
parser.add_argument("--gain", type=float, default=0.01,
help="The gain # of last action layer")
parser.add_argument("--use_CADP", action='store_true',
default=False, help="Whether to use CADP")
parser.add_argument("--cadp_breakpoint", type=int, default=3000000,
help="cadp breakpoint")
# recurrent parameters
parser.add_argument("--use_naive_recurrent_policy", action='store_true',
default=False, help='Whether to use a naive recurrent policy')
parser.add_argument("--use_recurrent_policy", action='store_false',
default=True, help='use a recurrent policy')
parser.add_argument("--recurrent_N", type=int, default=1, help="The number of recurrent layers.")
parser.add_argument("--data_chunk_length", type=int, default=10,
help="Time length of chunks used to train a recurrent_policy")
# optimizer parameters
parser.add_argument("--lr", type=float, default=5e-4,
help='learning rate (default: 5e-4)')
parser.add_argument("--critic_lr", type=float, default=5e-4,
help='critic learning rate (default: 5e-4)')
parser.add_argument("--opti_eps", type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument("--weight_decay", type=float, default=0)
# ppo parameters
parser.add_argument("--ppo_epoch", type=int, default=5,
help='number of ppo epochs (default: 5)')
parser.add_argument("--use_clipped_value_loss",
action='store_false', default=True, help="by default, clip loss value. If set, do not clip loss value.")
parser.add_argument("--clip_param", type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument("--num_mini_batch", type=int, default=1,
help='number of batches for ppo (default: 1)')
parser.add_argument("--entropy_coef", type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument("--value_loss_coef", type=float,
default=1, help='value loss coefficient (default: 0.5)')
parser.add_argument("--use_max_grad_norm",
action='store_false', default=True, help="by default, use max norm of gradients. If set, do not use.")
parser.add_argument("--max_grad_norm", type=float, default=10.0,
help='max norm of gradients (default: 0.5)')
parser.add_argument("--use_gae", action='store_false',
default=True, help='use generalized advantage estimation')
parser.add_argument("--gamma", type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument("--gae_lambda", type=float, default=0.95,
help='gae lambda parameter (default: 0.95)')
parser.add_argument("--use_proper_time_limits", action='store_true',
default=False, help='compute returns taking into account time limits')
parser.add_argument("--use_huber_loss", action='store_false', default=True, help="by default, use huber loss. If set, do not use huber loss.")
parser.add_argument("--use_value_active_masks",
action='store_false', default=True, help="by default True, whether to mask useless data in value loss.")
parser.add_argument("--use_policy_active_masks",
action='store_false', default=True, help="by default True, whether to mask useless data in policy loss.")
parser.add_argument("--huber_delta", type=float, default=10.0, help=" coefficience of huber loss.")
# run parameters
parser.add_argument("--use_linear_lr_decay", action='store_true',
default=False, help='use a linear schedule on the learning rate')
# save parameters
parser.add_argument("--save_interval", type=int, default=1, help="time duration between contiunous twice models saving.")
# log parameters
parser.add_argument("--log_interval", type=int, default=5, help="time duration between contiunous twice log printing.")
# eval parameters
parser.add_argument("--use_eval", action='store_true', default=False, help="by default, do not start evaluation. If set`, start evaluation alongside with training.")
parser.add_argument("--eval_interval", type=int, default=25, help="time duration between contiunous twice evaluation progress.")
parser.add_argument("--eval_episodes", type=int, default=32, help="number of episodes of a single evaluation.")
# render parameters
parser.add_argument("--save_gifs", action='store_true', default=False, help="by default, do not save render video. If set, save video.")
parser.add_argument("--use_render", action='store_true', default=False, help="by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam.")
parser.add_argument("--render_episodes", type=int, default=5, help="the number of episodes to render a given env")
parser.add_argument("--ifi", type=float, default=0.1, help="the play interval of each rendered image in saved video.")
# pretrained parameters
parser.add_argument("--model_dir", type=str, default=None, help="by default None. set the path to pretrained model.")
return parser
| 16,591 | 55.435374 | 261 | py |
CADP | CADP-main/CADP-PG/onpolicy/envs/env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
import torch
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
from onpolicy.utils.util import tile_images
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class ShareVecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, share_observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.share_observation_space = share_observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if np.all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class GuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = False # could cause zombie process
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class SubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def shareworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob, s_ob, available_actions = env.reset()
else:
if np.all(done):
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'render_vulnerability':
fr = env.render_vulnerability(data)
remote.send((fr))
else:
raise NotImplementedError
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def choosesimpleworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseSimpleSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=choosesimpleworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset(data)
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseguardworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseGuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseguardworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = False # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
# single env
class DummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i] = self.envs[i].reset()
self.actions = None
return obs, rews, dones, infos
def reset(self):
obs = [env.reset() for env in self.envs]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ShareDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self):
results = [env.reset() for env in self.envs]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self, reset_choose):
results = [env.reset(choose)
for (env, choose) in zip(self.envs, reset_choose)]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseSimpleDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.actions = None
return obs, rews, dones, infos
def reset(self, reset_choose):
obs = [env.reset(choose)
for (env, choose) in zip(self.envs, reset_choose)]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
| 28,209 | 33.277035 | 118 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/r_mappo/r_mappo.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from onpolicy.utils.util import get_gard_norm, huber_loss, mse_loss
from onpolicy.utils.valuenorm import ValueNorm
from onpolicy.algorithms.utils.util import check
class R_MAPPO():
"""
Trainer class for MAPPO to update policies.
:param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param policy: (R_MAPPO_Policy) policy to update.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self,
args,
policy,
device=torch.device("cpu")):
self.args = args
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.policy = policy
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
assert (self._use_popart and self._use_valuenorm) == False, ("self._use_popart and self._use_valuenorm can not be set True simultaneously")
if self._use_popart:
self.value_normalizer = self.policy.critic.v_out
elif self._use_valuenorm:
self.value_normalizer = ValueNorm(1).to(self.device)
else:
self.value_normalizer = None
if getattr(self.args, 'use_CADP', False):
self.use_cadp_loss = False
def cal_value_loss(self, values, value_preds_batch, return_batch, active_masks_batch):
"""
Calculate value function loss.
:param values: (torch.Tensor) value function predictions.
:param value_preds_batch: (torch.Tensor) "old" value predictions from data batch (used for value clip loss)
:param return_batch: (torch.Tensor) reward to go returns.
:param active_masks_batch: (torch.Tensor) denotes if agent is active or dead at a given timesep.
:return value_loss: (torch.Tensor) value function loss.
"""
value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param,
self.clip_param)
if self._use_popart or self._use_valuenorm:
self.value_normalizer.update(return_batch)
error_clipped = self.value_normalizer.normalize(return_batch) - value_pred_clipped
error_original = self.value_normalizer.normalize(return_batch) - values
else:
error_clipped = return_batch - value_pred_clipped
error_original = return_batch - values
if self._use_huber_loss:
value_loss_clipped = huber_loss(error_clipped, self.huber_delta)
value_loss_original = huber_loss(error_original, self.huber_delta)
else:
value_loss_clipped = mse_loss(error_clipped)
value_loss_original = mse_loss(error_original)
if self._use_clipped_value_loss:
value_loss = torch.max(value_loss_original, value_loss_clipped)
else:
value_loss = value_loss_original
if self._use_value_active_masks:
value_loss = (value_loss * active_masks_batch).sum() / active_masks_batch.sum()
else:
value_loss = value_loss.mean()
return value_loss
def ppo_update(self, sample, update_actor=True):
"""
Update actor and critic networks.
:param sample: (Tuple) contains data batch with which to update networks.
:update_actor: (bool) whether to update actor network.
:return value_loss: (torch.Tensor) value function loss.
:return critic_grad_norm: (torch.Tensor) gradient norm from critic up9date.
;return policy_loss: (torch.Tensor) actor(policy) loss value.
:return dist_entropy: (torch.Tensor) action entropies.
:return actor_grad_norm: (torch.Tensor) gradient norm from actor update.
:return imp_weights: (torch.Tensor) importance sampling weights.
"""
share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, \
adv_targ, available_actions_batch = sample
old_action_log_probs_batch = check(old_action_log_probs_batch).to(**self.tpdv)
adv_targ = check(adv_targ).to(**self.tpdv)
value_preds_batch = check(value_preds_batch).to(**self.tpdv)
return_batch = check(return_batch).to(**self.tpdv)
active_masks_batch = check(active_masks_batch).to(**self.tpdv)
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy = self.policy.evaluate_actions(share_obs_batch,
obs_batch,
rnn_states_batch,
rnn_states_critic_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch)
# actor update
imp_weights = torch.exp(action_log_probs - old_action_log_probs_batch)
surr1 = imp_weights * adv_targ
surr2 = torch.clamp(imp_weights, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ
if self._use_policy_active_masks:
policy_action_loss = (-torch.sum(torch.min(surr1, surr2),
dim=-1,
keepdim=True) * active_masks_batch).sum() / active_masks_batch.sum()
else:
policy_action_loss = -torch.sum(torch.min(surr1, surr2), dim=-1, keepdim=True).mean()
policy_loss = policy_action_loss
if getattr(self.args, 'use_CADP', False):
if self.use_cadp_loss:
att = self.policy.actor.att.dot
eps = 1e-8
eye = torch.eye(self.args.num_agents).unsqueeze(0).repeat(att.shape[0], 1, 1)
eye = eye.view(-1, self.args.num_agents).to(**self.tpdv)
att = att.view(-1, self.args.num_agents)
cadp_loss = F.kl_div((att + eps).log(), eye, reduction='mean')
policy_loss = policy_loss + 0.5 * cadp_loss
self.policy.actor_optimizer.zero_grad()
if update_actor:
(policy_loss - dist_entropy * self.entropy_coef).backward()
if self._use_max_grad_norm:
actor_grad_norm = nn.utils.clip_grad_norm_(self.policy.actor.parameters(), self.max_grad_norm)
else:
actor_grad_norm = get_gard_norm(self.policy.actor.parameters())
self.policy.actor_optimizer.step()
# critic update
value_loss = self.cal_value_loss(values, value_preds_batch, return_batch, active_masks_batch)
self.policy.critic_optimizer.zero_grad()
(value_loss * self.value_loss_coef).backward()
if self._use_max_grad_norm:
critic_grad_norm = nn.utils.clip_grad_norm_(self.policy.critic.parameters(), self.max_grad_norm)
else:
critic_grad_norm = get_gard_norm(self.policy.critic.parameters())
self.policy.critic_optimizer.step()
return value_loss, critic_grad_norm, policy_loss, dist_entropy, actor_grad_norm, imp_weights
def train(self, buffer, update_actor=True):
"""
Perform a training update using minibatch GD.
:param buffer: (SharedReplayBuffer) buffer containing training data.
:param update_actor: (bool) whether to update actor network.
:return train_info: (dict) contains information regarding training update (e.g. loss, grad norms, etc).
"""
if self._use_popart or self._use_valuenorm:
advantages = buffer.returns[:-1] - self.value_normalizer.denormalize(buffer.value_preds[:-1])
else:
advantages = buffer.returns[:-1] - buffer.value_preds[:-1]
advantages_copy = advantages.copy()
advantages_copy[buffer.active_masks[:-1] == 0.0] = np.nan
mean_advantages = np.nanmean(advantages_copy)
std_advantages = np.nanstd(advantages_copy)
advantages = (advantages - mean_advantages) / (std_advantages + 1e-5)
train_info = {}
train_info['value_loss'] = 0
train_info['policy_loss'] = 0
train_info['dist_entropy'] = 0
train_info['actor_grad_norm'] = 0
train_info['critic_grad_norm'] = 0
train_info['ratio'] = 0
for _ in range(self.ppo_epoch):
if self._use_recurrent_policy:
data_generator = buffer.recurrent_generator(advantages, self.num_mini_batch, self.data_chunk_length)
elif self._use_naive_recurrent:
data_generator = buffer.naive_recurrent_generator(advantages, self.num_mini_batch)
else:
data_generator = buffer.feed_forward_generator(advantages, self.num_mini_batch)
for sample in data_generator:
value_loss, critic_grad_norm, policy_loss, dist_entropy, actor_grad_norm, imp_weights \
= self.ppo_update(sample, update_actor)
train_info['value_loss'] += value_loss.item()
train_info['policy_loss'] += policy_loss.item()
train_info['dist_entropy'] += dist_entropy.item()
train_info['actor_grad_norm'] += actor_grad_norm
train_info['critic_grad_norm'] += critic_grad_norm
train_info['ratio'] += imp_weights.mean()
num_updates = self.ppo_epoch * self.num_mini_batch
for k in train_info.keys():
train_info[k] /= num_updates
return train_info
def prep_training(self):
self.policy.actor.train()
self.policy.critic.train()
def prep_rollout(self):
self.policy.actor.eval()
self.policy.critic.eval()
| 11,116 | 44.938017 | 147 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/r_mappo/algorithm/r_actor_critic.py | import torch
import torch.nn as nn
from onpolicy.algorithms.utils.util import init, check
from onpolicy.algorithms.utils.cnn import CNNBase
from onpolicy.algorithms.utils.mlp import MLPBase
from onpolicy.algorithms.utils.rnn import RNNLayer
from onpolicy.algorithms.utils.act import ACTLayer
from onpolicy.algorithms.utils.popart import PopArt
from onpolicy.utils.util import get_shape_from_obs_space
class R_Actor(nn.Module):
"""
Actor network class for MAPPO. Outputs actions given observations.
:param args: (argparse.Namespace) arguments containing relevant model information.
:param obs_space: (gym.Space) observation space.
:param action_space: (gym.Space) action space.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self, args, obs_space, action_space, device=torch.device("cpu")):
super(R_Actor, self).__init__()
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self._use_policy_active_masks = args.use_policy_active_masks
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._recurrent_N = args.recurrent_N
self.tpdv = dict(dtype=torch.float32, device=device)
obs_shape = get_shape_from_obs_space(obs_space)
base = CNNBase if len(obs_shape) == 3 else MLPBase
self.base = base(args, obs_shape)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
self.act = ACTLayer(action_space, self.hidden_size, self._use_orthogonal, self._gain)
self.to(device)
def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False):
"""
Compute actions from the given inputs.
:param obs: (np.ndarray / torch.Tensor) observation inputs into network.
:param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.
:param masks: (np.ndarray / torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.
:param available_actions: (np.ndarray / torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether to sample from action distribution or return the mode.
:return actions: (torch.Tensor) actions to take.
:return action_log_probs: (torch.Tensor) log probabilities of taken actions.
:return rnn_states: (torch.Tensor) updated RNN hidden states.
"""
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
actions, action_log_probs = self.act(actor_features, available_actions, deterministic)
return actions, action_log_probs, rnn_states
def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None):
"""
Compute log probability and entropy of given actions.
:param obs: (torch.Tensor) observation inputs into network.
:param action: (torch.Tensor) actions whose entropy and log probability to evaluate.
:param rnn_states: (torch.Tensor) if RNN network, hidden states for RNN.
:param masks: (torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.
:param available_actions: (torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param active_masks: (torch.Tensor) denotes whether an agent is active or dead.
:return action_log_probs: (torch.Tensor) log probabilities of the input actions.
:return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.
"""
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
action = check(action).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
if active_masks is not None:
active_masks = check(active_masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features,
action, available_actions,
active_masks=
active_masks if self._use_policy_active_masks
else None)
return action_log_probs, dist_entropy
class R_Critic(nn.Module):
"""
Critic network class for MAPPO. Outputs value function predictions given centralized input (MAPPO) or
local observations (IPPO).
:param args: (argparse.Namespace) arguments containing relevant model information.
:param cent_obs_space: (gym.Space) (centralized) observation space.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self, args, cent_obs_space, device=torch.device("cpu")):
super(R_Critic, self).__init__()
self.hidden_size = args.hidden_size
self._use_orthogonal = args.use_orthogonal
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._recurrent_N = args.recurrent_N
self._use_popart = args.use_popart
self.tpdv = dict(dtype=torch.float32, device=device)
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]
cent_obs_shape = get_shape_from_obs_space(cent_obs_space)
base = CNNBase if len(cent_obs_shape) == 3 else MLPBase
self.base = base(args, cent_obs_shape)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0))
if self._use_popart:
self.v_out = init_(PopArt(self.hidden_size, 1, device=device))
else:
self.v_out = init_(nn.Linear(self.hidden_size, 1))
self.to(device)
def forward(self, cent_obs, rnn_states, masks):
"""
Compute actions from the given inputs.
:param cent_obs: (np.ndarray / torch.Tensor) observation inputs into network.
:param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.
:param masks: (np.ndarray / torch.Tensor) mask tensor denoting if RNN states should be reinitialized to zeros.
:return values: (torch.Tensor) value function predictions.
:return rnn_states: (torch.Tensor) updated RNN hidden states.
"""
cent_obs = check(cent_obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
critic_features = self.base(cent_obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
critic_features, rnn_states = self.rnn(critic_features, rnn_states, masks)
values = self.v_out(critic_features)
return values, rnn_states
| 8,201 | 48.409639 | 121 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/r_mappo/algorithm/r_actor_critic_cadp.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onpolicy.algorithms.utils.util import init, check
from onpolicy.algorithms.utils.cnn import CNNBase
from onpolicy.algorithms.utils.mlp import MLPBase
from onpolicy.algorithms.utils.rnn import RNNLayer
from onpolicy.algorithms.utils.act import ACTLayer
from onpolicy.algorithms.utils.popart import PopArt
from onpolicy.utils.util import get_shape_from_obs_space
class SelfAttention(nn.Module):
def __init__(self, input_size, heads=4, embed_size=32):
super().__init__()
self.input_size = input_size
self.heads = heads
self.emb_size = embed_size
self.tokeys = nn.Linear(self.input_size, self.emb_size * heads, bias = False)
self.toqueries = nn.Linear(self.input_size, self.emb_size * heads, bias = False)
self.tovalues = nn.Linear(self.input_size, self.emb_size * heads, bias = False)
def forward(self, x):
b, t, hin = x.size()
assert hin == self.input_size, f'Input size {{hin}} should match {{self.input_size}}'
h = self.heads
e = self.emb_size
keys = self.tokeys(x).view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x).view(b, t, h, e)
# dot-product attention
# folding heads to batch dimensions
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4))
keys = keys / (e ** (1/4))
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b*h, t, t)
# row wise self attention probabilities
dot = F.softmax(dot, dim=2)
self.dot = dot
out = torch.bmm(dot, values).view(b, h, t, e)
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
values = values.view(b, h, t, e)
values = values.transpose(1, 2).contiguous().view(b, t, h * e)
self.values = values
return out
class R_Actor(nn.Module):
"""
Actor network class for MAPPO. Outputs actions given observations.
:param args: (argparse.Namespace) arguments containing relevant model information.
:param obs_space: (gym.Space) observation space.
:param action_space: (gym.Space) action space.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self, args, obs_space, action_space, device=torch.device("cpu")):
super(R_Actor, self).__init__()
self.n_rollout_threads = args.n_rollout_threads
self.num_agents = args.num_agents
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self._use_policy_active_masks = args.use_policy_active_masks
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._recurrent_N = args.recurrent_N
self.tpdv = dict(dtype=torch.float32, device=device)
obs_shape = get_shape_from_obs_space(obs_space)
base = CNNBase if len(obs_shape) == 3 else MLPBase
self.base = base(args, obs_shape)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
self.obs_dim = obs_shape[0]
self.att = SelfAttention(self.obs_dim, 4, 32)
self.fc1 = nn.Linear(4 * 32, self.hidden_size)
self.fc2 = nn.Linear(self.hidden_size * 2, self.hidden_size)
# self.fc2 = nn.Linear(self.hidden_size, self.hidden_size)
self.use_att_v = False
self.act = ACTLayer(action_space, self.hidden_size, self._use_orthogonal, self._gain)
self.to(device)
def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False):
"""
Compute actions from the given inputs.
:param obs: (np.ndarray / torch.Tensor) observation inputs into network.
:param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.
:param masks: (np.ndarray / torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.
:param available_actions: (np.ndarray / torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether to sample from action distribution or return the mode.
:return actions: (torch.Tensor) actions to take.
:return action_log_probs: (torch.Tensor) log probabilities of taken actions.
:return rnn_states: (torch.Tensor) updated RNN hidden states.
"""
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
# ATT
att_features = self.att(obs.view(-1, self.num_agents, self.obs_dim))
if self.use_att_v:
att_features = F.relu(self.fc1(self.att.values), inplace=True).view(-1, self.hidden_size)
else:
att_features = F.relu(self.fc1(att_features), inplace=True).view(-1, self.hidden_size)
actor_features = torch.cat((actor_features, att_features), dim=-1)
# actor_features = att_features
actor_features = self.fc2(actor_features)
actions, action_log_probs = self.act(actor_features, available_actions, deterministic)
return actions, action_log_probs, rnn_states
def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None):
"""
Compute log probability and entropy of given actions.
:param obs: (torch.Tensor) observation inputs into network.
:param action: (torch.Tensor) actions whose entropy and log probability to evaluate.
:param rnn_states: (torch.Tensor) if RNN network, hidden states for RNN.
:param masks: (torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.
:param available_actions: (torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param active_masks: (torch.Tensor) denotes whether an agent is active or dead.
:return action_log_probs: (torch.Tensor) log probabilities of the input actions.
:return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.
"""
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
action = check(action).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
if active_masks is not None:
active_masks = check(active_masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
# ATT
att_features = self.att(obs.view(-1, self.num_agents, self.obs_dim))
if self.use_att_v:
att_features = F.relu(self.fc1(self.att.values), inplace=True).view(-1, self.hidden_size)
else:
att_features = F.relu(self.fc1(att_features), inplace=True).view(-1, self.hidden_size)
actor_features = torch.cat((actor_features, att_features), dim=-1)
# actor_features = att_features
actor_features = self.fc2(actor_features)
action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features,
action, available_actions,
active_masks=
active_masks if self._use_policy_active_masks
else None)
return action_log_probs, dist_entropy
class R_Critic(nn.Module):
"""
Critic network class for MAPPO. Outputs value function predictions given centralized input (MAPPO) or
local observations (IPPO).
:param args: (argparse.Namespace) arguments containing relevant model information.
:param cent_obs_space: (gym.Space) (centralized) observation space.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self, args, cent_obs_space, device=torch.device("cpu")):
super(R_Critic, self).__init__()
self.hidden_size = args.hidden_size
self._use_orthogonal = args.use_orthogonal
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._recurrent_N = args.recurrent_N
self._use_popart = args.use_popart
self.tpdv = dict(dtype=torch.float32, device=device)
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]
cent_obs_shape = get_shape_from_obs_space(cent_obs_space)
base = CNNBase if len(cent_obs_shape) == 3 else MLPBase
self.base = base(args, cent_obs_shape)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0))
if self._use_popart:
self.v_out = init_(PopArt(self.hidden_size, 1, device=device))
else:
self.v_out = init_(nn.Linear(self.hidden_size, 1))
self.to(device)
def forward(self, cent_obs, rnn_states, masks):
"""
Compute actions from the given inputs.
:param cent_obs: (np.ndarray / torch.Tensor) observation inputs into network.
:param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.
:param masks: (np.ndarray / torch.Tensor) mask tensor denoting if RNN states should be reinitialized to zeros.
:return values: (torch.Tensor) value function predictions.
:return rnn_states: (torch.Tensor) updated RNN hidden states.
"""
cent_obs = check(cent_obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
critic_features = self.base(cent_obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
critic_features, rnn_states = self.rnn(critic_features, rnn_states, masks)
values = self.v_out(critic_features)
return values, rnn_states
| 11,332 | 45.069106 | 121 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/r_mappo/algorithm/rMAPPOPolicy.py | import torch
from onpolicy.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic
from onpolicy.algorithms.r_mappo.algorithm.r_actor_critic_cadp import R_Actor as R_Actor_CADP
from onpolicy.algorithms.r_mappo.algorithm.r_actor_critic_cadp import R_Critic as R_Critic_CADP
from onpolicy.utils.util import update_linear_schedule, get_params_size
class R_MAPPOPolicy:
"""
MAPPO Policy class. Wraps actor and critic networks to compute actions and value function predictions.
:param args: (argparse.Namespace) arguments containing relevant model and policy information.
:param obs_space: (gym.Space) observation space.
:param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO).
:param action_space: (gym.Space) action space.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device("cpu")):
self.device = device
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = cent_obs_space
self.act_space = act_space
if getattr(args, 'use_CADP', False):
self.actor = R_Actor_CADP(args, self.obs_space, self.act_space, self.device)
self.critic = R_Critic_CADP(args, self.share_obs_space, self.device)
else:
self.actor = R_Actor(args, self.obs_space, self.act_space, self.device)
self.critic = R_Critic(args, self.share_obs_space, self.device)
params = list(self.actor.parameters()) + list(self.critic.parameters())
params_size = get_params_size(params)
print(("params_size: {}".format(params_size)))
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),
lr=self.lr, eps=self.opti_eps,
weight_decay=self.weight_decay)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
lr=self.critic_lr,
eps=self.opti_eps,
weight_decay=self.weight_decay)
def lr_decay(self, episode, episodes):
"""
Decay the actor and critic learning rates.
:param episode: (int) current training episode.
:param episodes: (int) total number of training episodes.
"""
update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr)
update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr)
def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None,
deterministic=False):
"""
Compute actions and value function predictions for the given inputs.
:param cent_obs (np.ndarray): centralized input to the critic.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether the action should be mode of distribution or should be sampled.
:return values: (torch.Tensor) value function predictions.
:return actions: (torch.Tensor) actions to take.
:return action_log_probs: (torch.Tensor) log probabilities of chosen actions.
:return rnn_states_actor: (torch.Tensor) updated actor network RNN states.
:return rnn_states_critic: (torch.Tensor) updated critic network RNN states.
"""
actions, action_log_probs, rnn_states_actor = self.actor(obs,
rnn_states_actor,
masks,
available_actions,
deterministic)
values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks)
return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic
def get_values(self, cent_obs, rnn_states_critic, masks):
"""
Get value function predictions.
:param cent_obs (np.ndarray): centralized input to the critic.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:return values: (torch.Tensor) value function predictions.
"""
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
return values
def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks,
available_actions=None, active_masks=None):
"""
Get action logprobs / entropy and value function predictions for actor update.
:param cent_obs (np.ndarray): centralized input to the critic.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param action: (np.ndarray) actions whose log probabilites and entropy to compute.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param active_masks: (torch.Tensor) denotes whether an agent is active or dead.
:return values: (torch.Tensor) value function predictions.
:return action_log_probs: (torch.Tensor) log probabilities of the input actions.
:return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.
"""
action_log_probs, dist_entropy = self.actor.evaluate_actions(obs,
rnn_states_actor,
action,
masks,
available_actions,
active_masks)
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
return values, action_log_probs, dist_entropy
def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False):
"""
Compute actions using the given inputs.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether the action should be mode of distribution or should be sampled.
"""
actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)
return actions, rnn_states_actor
| 7,896 | 56.224638 | 120 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/utils/distributions.py | import torch
import torch.nn as nn
from .util import init
"""
Modify standard PyTorch distributions so they to make compatible with this codebase.
"""
#
# Standardize distribution interfaces
#
# Categorical
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entropy(self):
return super.entropy().sum(-1)
def mode(self):
return self.mean
# Bernoulli
class FixedBernoulli(torch.distributions.Bernoulli):
def log_probs(self, actions):
return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1)
def entropy(self):
return super().entropy().sum(-1)
def mode(self):
return torch.gt(self.probs, 0.5).float()
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Categorical, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x, available_actions=None):
x = self.linear(x)
if available_actions is not None:
x[available_actions == 0] = -1e10
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(DiagGaussian, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
class Bernoulli(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Bernoulli, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedBernoulli(logits=x)
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
| 3,475 | 28.210084 | 86 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/utils/cnn.py | import torch.nn as nn
from .util import init
"""CNN Modules and utils."""
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class CNNLayer(nn.Module):
def __init__(self, obs_shape, hidden_size, use_orthogonal, use_ReLU, kernel_size=3, stride=1):
super(CNNLayer, self).__init__()
active_func = [nn.Tanh(), nn.ReLU()][use_ReLU]
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU])
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)
input_channel = obs_shape[0]
input_width = obs_shape[1]
input_height = obs_shape[2]
self.cnn = nn.Sequential(
init_(nn.Conv2d(in_channels=input_channel,
out_channels=hidden_size // 2,
kernel_size=kernel_size,
stride=stride)
),
active_func,
Flatten(),
init_(nn.Linear(hidden_size // 2 * (input_width - kernel_size + stride) * (input_height - kernel_size + stride),
hidden_size)
),
active_func,
init_(nn.Linear(hidden_size, hidden_size)), active_func)
def forward(self, x):
x = x / 255.0
x = self.cnn(x)
return x
class CNNBase(nn.Module):
def __init__(self, args, obs_shape):
super(CNNBase, self).__init__()
self._use_orthogonal = args.use_orthogonal
self._use_ReLU = args.use_ReLU
self.hidden_size = args.hidden_size
self.cnn = CNNLayer(obs_shape, self.hidden_size, self._use_orthogonal, self._use_ReLU)
def forward(self, x):
x = self.cnn(x)
return x
| 1,852 | 30.40678 | 124 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/utils/mlp.py | import torch.nn as nn
from .util import init, get_clones
"""MLP modules."""
class MLPLayer(nn.Module):
def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, use_ReLU):
super(MLPLayer, self).__init__()
self._layer_N = layer_N
active_func = [nn.Tanh(), nn.ReLU()][use_ReLU]
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU])
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)
self.fc1 = nn.Sequential(
init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc_h = nn.Sequential(init_(
nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc2 = get_clones(self.fc_h, self._layer_N)
def forward(self, x):
x = self.fc1(x)
for i in range(self._layer_N):
x = self.fc2[i](x)
return x
class MLPBase(nn.Module):
def __init__(self, args, obs_shape, cat_self=True, attn_internal=False):
super(MLPBase, self).__init__()
self._use_feature_normalization = args.use_feature_normalization
self._use_orthogonal = args.use_orthogonal
self._use_ReLU = args.use_ReLU
self._stacked_frames = args.stacked_frames
self._layer_N = args.layer_N
self.hidden_size = args.hidden_size
obs_dim = obs_shape[0]
if self._use_feature_normalization:
self.feature_norm = nn.LayerNorm(obs_dim)
self.mlp = MLPLayer(obs_dim, self.hidden_size,
self._layer_N, self._use_orthogonal, self._use_ReLU)
def forward(self, x):
if self._use_feature_normalization:
x = self.feature_norm(x)
x = self.mlp(x)
return x | 1,892 | 32.803571 | 93 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/utils/popart.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PopArt(torch.nn.Module):
def __init__(self, input_shape, output_shape, norm_axes=1, beta=0.99999, epsilon=1e-5, device=torch.device("cpu")):
super(PopArt, self).__init__()
self.beta = beta
self.epsilon = epsilon
self.norm_axes = norm_axes
self.tpdv = dict(dtype=torch.float32, device=device)
self.input_shape = input_shape
self.output_shape = output_shape
self.weight = nn.Parameter(torch.Tensor(output_shape, input_shape)).to(**self.tpdv)
self.bias = nn.Parameter(torch.Tensor(output_shape)).to(**self.tpdv)
self.stddev = nn.Parameter(torch.ones(output_shape), requires_grad=False).to(**self.tpdv)
self.mean = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.mean_sq = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
self.mean.zero_()
self.mean_sq.zero_()
self.debiasing_term.zero_()
def forward(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
return F.linear(input_vector, self.weight, self.bias)
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
old_mean, old_var = self.debiased_mean_var()
old_stddev = torch.sqrt(old_var)
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
self.mean.mul_(self.beta).add_(batch_mean * (1.0 - self.beta))
self.mean_sq.mul_(self.beta).add_(batch_sq_mean * (1.0 - self.beta))
self.debiasing_term.mul_(self.beta).add_(1.0 * (1.0 - self.beta))
self.stddev = (self.mean_sq - self.mean ** 2).sqrt().clamp(min=1e-4)
new_mean, new_var = self.debiased_mean_var()
new_stddev = torch.sqrt(new_var)
self.weight = self.weight * old_stddev / new_stddev
self.bias = (old_stddev * self.bias + old_mean - new_mean) / new_stddev
def debiased_mean_var(self):
debiased_mean = self.mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
def normalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,944 | 38.848485 | 119 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/utils/util.py | import copy
import numpy as np
import torch
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def check(input):
output = torch.from_numpy(input) if type(input) == np.ndarray else input
return output
| 425 | 22.666667 | 76 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/utils/act.py | from .distributions import Bernoulli, Categorical, DiagGaussian
import torch
import torch.nn as nn
class ACTLayer(nn.Module):
"""
MLP Module to compute actions.
:param action_space: (gym.Space) action space.
:param inputs_dim: (int) dimension of network input.
:param use_orthogonal: (bool) whether to use orthogonal initialization.
:param gain: (float) gain of the output layer of the network.
"""
def __init__(self, action_space, inputs_dim, use_orthogonal, gain):
super(ACTLayer, self).__init__()
self.mixed_action = False
self.multi_discrete = False
if action_space.__class__.__name__ == "Discrete":
action_dim = action_space.n
self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "Box":
action_dim = action_space.shape[0]
self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiBinary":
action_dim = action_space.shape[0]
self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiDiscrete":
self.multi_discrete = True
action_dims = action_space.high - action_space.low + 1
self.action_outs = []
for action_dim in action_dims:
self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain))
self.action_outs = nn.ModuleList(self.action_outs)
else: # discrete + continous
self.mixed_action = True
continous_dim = action_space[0].shape[0]
discrete_dim = action_space[1].n
self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain), Categorical(
inputs_dim, discrete_dim, use_orthogonal, gain)])
def forward(self, x, available_actions=None, deterministic=False):
"""
Compute actions and action logprobs from given input.
:param x: (torch.Tensor) input to network.
:param available_actions: (torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether to sample from action distribution or return the mode.
:return actions: (torch.Tensor) actions to take.
:return action_log_probs: (torch.Tensor) log probabilities of taken actions.
"""
if self.mixed_action :
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action.float())
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
elif self.multi_discrete:
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action)
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.cat(action_log_probs, -1)
else:
action_logits = self.action_out(x, available_actions)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
return actions, action_log_probs
def get_probs(self, x, available_actions=None):
"""
Compute action probabilities from inputs.
:param x: (torch.Tensor) input to network.
:param available_actions: (torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:return action_probs: (torch.Tensor)
"""
if self.mixed_action or self.multi_discrete:
action_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action_prob = action_logit.probs
action_probs.append(action_prob)
action_probs = torch.cat(action_probs, -1)
else:
action_logits = self.action_out(x, available_actions)
action_probs = action_logits.probs
return action_probs
def evaluate_actions(self, x, action, available_actions=None, active_masks=None):
"""
Compute log probability and entropy of given actions.
:param x: (torch.Tensor) input to network.
:param action: (torch.Tensor) actions whose entropy and log probability to evaluate.
:param available_actions: (torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param active_masks: (torch.Tensor) denotes whether an agent is active or dead.
:return action_log_probs: (torch.Tensor) log probabilities of the input actions.
:return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.
"""
if self.mixed_action:
a, b = action.split((2, 1), -1)
b = b.long()
action = [a, b]
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
if len(action_logit.entropy().shape) == len(active_masks.shape):
dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum())
else:
dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
dist_entropy = dist_entropy[0] / 2.0 + dist_entropy[1] / 0.98 #! dosen't make sense
elif self.multi_discrete:
action = torch.transpose(action, 0, 1)
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong
dist_entropy = sum(dist_entropy)/len(dist_entropy)
else:
action_logits = self.action_out(x, available_actions)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
return action_log_probs, dist_entropy
| 7,872 | 47.300613 | 121 | py |
CADP | CADP-main/CADP-PG/onpolicy/algorithms/utils/rnn.py | import torch
import torch.nn as nn
"""RNN modules."""
class RNNLayer(nn.Module):
def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal):
super(RNNLayer, self).__init__()
self._recurrent_N = recurrent_N
self._use_orthogonal = use_orthogonal
self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N)
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
if self._use_orthogonal:
nn.init.orthogonal_(param)
else:
nn.init.xavier_uniform_(param)
self.norm = nn.LayerNorm(outputs_dim)
def forward(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.rnn(x.unsqueeze(0),
(hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous())
x = x.squeeze(0)
hxs = hxs.transpose(0, 1)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0)
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.transpose(0, 1)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous()
rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp)
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.reshape(T * N, -1)
hxs = hxs.transpose(0, 1)
x = self.norm(x)
return x, hxs
| 2,849 | 34.185185 | 116 | py |
CADP | CADP-main/CADP-PG/onpolicy/scripts/train/train_smac.py | #!/usr/bin/env python
import sys
import os
sys.path.append("../")
import wandb
import socket
import setproctitle
import numpy as np
from pathlib import Path
import torch
from onpolicy.config import get_config
from onpolicy.envs.starcraft2.StarCraft2_Env import StarCraft2Env
from onpolicy.envs.starcraft2.smac_maps import get_map_params
from onpolicy.envs.env_wrappers import ShareSubprocVecEnv, ShareDummyVecEnv
"""Train script for SMAC."""
def make_train_env(all_args):
def get_env_fn(rank):
def init_env():
if all_args.env_name == "StarCraft2":
env = StarCraft2Env(all_args)
else:
print("Can not support the " + all_args.env_name + "environment.")
raise NotImplementedError
env.seed(all_args.seed + rank * 1000)
return env
return init_env
if all_args.n_rollout_threads == 1:
return ShareDummyVecEnv([get_env_fn(0)])
else:
return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_rollout_threads)])
def make_eval_env(all_args):
def get_env_fn(rank):
def init_env():
if all_args.env_name == "StarCraft2":
env = StarCraft2Env(all_args)
else:
print("Can not support the " + all_args.env_name + "environment.")
raise NotImplementedError
env.seed(all_args.seed * 50000 + rank * 10000)
return env
return init_env
if all_args.n_eval_rollout_threads == 1:
return ShareDummyVecEnv([get_env_fn(0)])
else:
return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])
def parse_args(args, parser):
parser.add_argument('--map_name', type=str, default='3m',
help="Which smac map to run on")
parser.add_argument("--add_move_state", action='store_true', default=False)
parser.add_argument("--add_local_obs", action='store_true', default=False)
parser.add_argument("--add_distance_state", action='store_true', default=False)
parser.add_argument("--add_enemy_action_state", action='store_true', default=False)
parser.add_argument("--add_agent_id", action='store_true', default=False)
parser.add_argument("--add_visible_state", action='store_true', default=False)
parser.add_argument("--add_xy_state", action='store_true', default=False)
parser.add_argument("--use_state_agent", action='store_false', default=True)
parser.add_argument("--use_mustalive", action='store_false', default=True)
parser.add_argument("--add_center_xy", action='store_false', default=True)
parser.add_argument("--sight_range", type=int, default=9)
all_args = parser.parse_known_args(args)[0]
return all_args
def main(args):
parser = get_config()
all_args = parse_args(args, parser)
if not all_args.seed_specify:
all_args.seed = np.random.randint(10000, 100000)
print("seed is :", all_args.seed)
if all_args.algorithm_name == "rmappo":
print("u are choosing to use rmappo, we set use_recurrent_policy to be True")
all_args.use_recurrent_policy = True
all_args.use_naive_recurrent_policy = False
elif all_args.algorithm_name == "mappo":
print("u are choosing to use mappo, we set use_recurrent_policy & use_naive_recurrent_policy to be False")
all_args.use_recurrent_policy = False
all_args.use_naive_recurrent_policy = False
elif all_args.algorithm_name == "ippo":
print("u are choosing to use ippo, we set use_centralized_V to be False")
all_args.use_centralized_V = False
else:
raise NotImplementedError
# cuda
if all_args.cuda and torch.cuda.is_available():
print("choose to use gpu...")
device = torch.device("cuda:0")
torch.set_num_threads(all_args.n_training_threads)
if all_args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print("choose to use cpu...")
device = torch.device("cpu")
torch.set_num_threads(all_args.n_training_threads)
run_dir = Path(os.path.split(os.path.dirname(os.path.abspath(__file__)))[
0] + "/results") / all_args.env_name / all_args.map_name / all_args.algorithm_name / all_args.experiment_name
if not run_dir.exists():
os.makedirs(str(run_dir))
if all_args.use_wandb:
run = wandb.init(config=all_args,
project=all_args.env_name,
entity=all_args.user_name,
notes=socket.gethostname(),
name=str(all_args.algorithm_name) + "_" +
str(all_args.experiment_name) +
"_seed" + str(all_args.seed),
group=all_args.map_name,
dir=str(run_dir),
job_type="training",
reinit=True)
else:
if not run_dir.exists():
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if
str(folder.name).startswith('run')]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
run_dir = run_dir / curr_run
if not run_dir.exists():
os.makedirs(str(run_dir))
setproctitle.setproctitle(
str(all_args.algorithm_name) + "-" + str(all_args.env_name) + "-" + str(all_args.experiment_name) + "@" + str(
all_args.user_name))
# seed
torch.manual_seed(all_args.seed)
torch.cuda.manual_seed_all(all_args.seed)
np.random.seed(all_args.seed)
# env
envs = make_train_env(all_args)
eval_envs = make_eval_env(all_args) if all_args.use_eval else None
num_agents = get_map_params(all_args.map_name)["n_agents"]
config = {
"all_args": all_args,
"envs": envs,
"eval_envs": eval_envs,
"num_agents": num_agents,
"device": device,
"run_dir": run_dir
}
# run experiments
if all_args.share_policy:
from onpolicy.runner.shared.smac_runner import SMACRunner as Runner
else:
from onpolicy.runner.separated.smac_runner import SMACRunner as Runner
runner = Runner(config)
runner.run()
# post process
envs.close()
if all_args.use_eval and eval_envs is not envs:
eval_envs.close()
if all_args.use_wandb:
run.finish()
else:
runner.writter.export_scalars_to_json(str(runner.log_dir + '/summary.json'))
runner.writter.close()
if __name__ == "__main__":
main(sys.argv[1:])
| 6,859 | 35.296296 | 132 | py |
CADP | CADP-main/CADP-PG/onpolicy/runner/shared/smac_runner.py | import time
import wandb
import numpy as np
from functools import reduce
import torch
from onpolicy.runner.shared.base_runner import Runner
def _t2n(x):
return x.detach().cpu().numpy()
class SMACRunner(Runner):
"""Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details."""
def __init__(self, config):
super(SMACRunner, self).__init__(config)
def run(self):
self.warmup()
start = time.time()
episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads
tmp_timestep = 0
last_battles_game = np.zeros(self.n_rollout_threads, dtype=np.float32)
last_battles_won = np.zeros(self.n_rollout_threads, dtype=np.float32)
for episode in range(episodes):
if self.use_linear_lr_decay:
self.trainer.policy.lr_decay(episode, episodes)
for step in range(self.episode_length):
# Sample actions
values, actions, action_log_probs, rnn_states, rnn_states_critic = self.collect(step)
# Obser reward and next obs
obs, share_obs, rewards, dones, infos, available_actions = self.envs.step(actions)
data = obs, share_obs, rewards, dones, infos, available_actions, \
values, actions, action_log_probs, \
rnn_states, rnn_states_critic
# insert data into buffer
self.insert(data)
# compute return and update network
self.compute()
train_infos = self.train()
# post process
total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads
# save model
if (episode % self.save_interval == 0 or episode == episodes - 1):
self.save()
if getattr(self.all_args, 'use_CADP', False):
if (total_num_steps - tmp_timestep) - 500000 > 0:
tmp_timestep = total_num_steps
self.save_timestep(total_num_steps)
if total_num_steps > self.all_args.cadp_breakpoint:
self.trainer.use_cadp_loss = True
# log information
if episode % self.log_interval == 0:
end = time.time()
print("\n Map {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n"
.format(self.all_args.map_name,
self.algorithm_name,
self.experiment_name,
episode,
episodes,
total_num_steps,
self.num_env_steps,
int(total_num_steps / (end - start))))
if self.env_name == "StarCraft2":
battles_won = []
battles_game = []
incre_battles_won = []
incre_battles_game = []
for i, info in enumerate(infos):
if 'battles_won' in info[0].keys():
battles_won.append(info[0]['battles_won'])
incre_battles_won.append(info[0]['battles_won']-last_battles_won[i])
if 'battles_game' in info[0].keys():
battles_game.append(info[0]['battles_game'])
incre_battles_game.append(info[0]['battles_game']-last_battles_game[i])
incre_win_rate = np.sum(incre_battles_won)/np.sum(incre_battles_game) if np.sum(incre_battles_game)>0 else 0.0
print("incre win rate is {}.".format(incre_win_rate))
if self.use_wandb:
wandb.log({"incre_win_rate": incre_win_rate}, step=total_num_steps)
else:
self.writter.add_scalars("incre_win_rate", {"incre_win_rate": incre_win_rate}, total_num_steps)
last_battles_game = battles_game
last_battles_won = battles_won
train_infos['dead_ratio'] = 1 - self.buffer.active_masks.sum() / reduce(lambda x, y: x*y, list(self.buffer.active_masks.shape))
self.log_train(train_infos, total_num_steps)
# eval
if episode % self.eval_interval == 0 and self.use_eval:
if getattr(self.all_args, 'use_CADP', False):
self.policy.actor.use_att_v = True
self.eval(total_num_steps, "student_")
self.policy.actor.use_att_v = False
self.eval(total_num_steps, "teacher_")
pass
else:
self.eval(total_num_steps)
def warmup(self):
# reset env
obs, share_obs, available_actions = self.envs.reset()
# replay buffer
if not self.use_centralized_V:
share_obs = obs
self.buffer.share_obs[0] = share_obs.copy()
self.buffer.obs[0] = obs.copy()
self.buffer.available_actions[0] = available_actions.copy()
@torch.no_grad()
def collect(self, step):
self.trainer.prep_rollout()
value, action, action_log_prob, rnn_state, rnn_state_critic \
= self.trainer.policy.get_actions(np.concatenate(self.buffer.share_obs[step]),
np.concatenate(self.buffer.obs[step]),
np.concatenate(self.buffer.rnn_states[step]),
np.concatenate(self.buffer.rnn_states_critic[step]),
np.concatenate(self.buffer.masks[step]),
np.concatenate(self.buffer.available_actions[step]))
# [self.envs, agents, dim]
values = np.array(np.split(_t2n(value), self.n_rollout_threads))
actions = np.array(np.split(_t2n(action), self.n_rollout_threads))
action_log_probs = np.array(np.split(_t2n(action_log_prob), self.n_rollout_threads))
rnn_states = np.array(np.split(_t2n(rnn_state), self.n_rollout_threads))
rnn_states_critic = np.array(np.split(_t2n(rnn_state_critic), self.n_rollout_threads))
return values, actions, action_log_probs, rnn_states, rnn_states_critic
def insert(self, data):
obs, share_obs, rewards, dones, infos, available_actions, \
values, actions, action_log_probs, rnn_states, rnn_states_critic = data
dones_env = np.all(dones, axis=1)
rnn_states[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
rnn_states_critic[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, *self.buffer.rnn_states_critic.shape[3:]), dtype=np.float32)
masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)
active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
active_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32)
active_masks[dones_env == True] = np.ones(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)
bad_masks = np.array([[[0.0] if info[agent_id]['bad_transition'] else [1.0] for agent_id in range(self.num_agents)] for info in infos])
if not self.use_centralized_V:
share_obs = obs
self.buffer.insert(share_obs, obs, rnn_states, rnn_states_critic,
actions, action_log_probs, values, rewards, masks, bad_masks, active_masks, available_actions)
def log_train(self, train_infos, total_num_steps):
train_infos["average_step_rewards"] = np.mean(self.buffer.rewards)
for k, v in train_infos.items():
if self.use_wandb:
wandb.log({k: v}, step=total_num_steps)
else:
self.writter.add_scalars(k, {k: v}, total_num_steps)
@torch.no_grad()
def eval(self, total_num_steps, log_prefix=""):
eval_battles_won = 0
eval_episode = 0
eval_episode_rewards = []
one_episode_rewards = []
eval_obs, eval_share_obs, eval_available_actions = self.eval_envs.reset()
eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)
while True:
self.trainer.prep_rollout()
eval_actions, eval_rnn_states = \
self.trainer.policy.act(np.concatenate(eval_obs),
np.concatenate(eval_rnn_states),
np.concatenate(eval_masks),
np.concatenate(eval_available_actions),
deterministic=True)
eval_actions = np.array(np.split(_t2n(eval_actions), self.n_eval_rollout_threads))
eval_rnn_states = np.array(np.split(_t2n(eval_rnn_states), self.n_eval_rollout_threads))
# Obser reward and next obs
eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, eval_available_actions = self.eval_envs.step(eval_actions)
one_episode_rewards.append(eval_rewards)
eval_dones_env = np.all(eval_dones, axis=1)
eval_rnn_states[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)
eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)
for eval_i in range(self.n_eval_rollout_threads):
if eval_dones_env[eval_i]:
eval_episode += 1
eval_episode_rewards.append(np.sum(one_episode_rewards, axis=0))
one_episode_rewards = []
if eval_infos[eval_i][0]['won']:
eval_battles_won += 1
if eval_episode >= self.all_args.eval_episodes:
eval_episode_rewards = np.array(eval_episode_rewards)
eval_env_infos = {log_prefix + 'eval_average_episode_rewards': eval_episode_rewards}
self.log_env(eval_env_infos, total_num_steps)
eval_win_rate = eval_battles_won/eval_episode
print((log_prefix + "eval win rate is {}.").format(eval_win_rate))
if self.use_wandb:
wandb.log({log_prefix + "eval_win_rate": eval_win_rate}, step=total_num_steps)
else:
self.writter.add_scalars(log_prefix + "eval_win_rate", {log_prefix + "eval_win_rate": eval_win_rate}, total_num_steps)
break
| 11,392 | 48.320346 | 167 | py |
CADP | CADP-main/CADP-PG/onpolicy/runner/shared/base_runner.py | import wandb
import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
from onpolicy.utils.shared_buffer import SharedReplayBuffer
def _t2n(x):
"""Convert torch tensor to a numpy array."""
return x.detach().cpu().numpy()
class Runner(object):
"""
Base class for training recurrent policies.
:param config: (dict) Config dictionary containing parameters for training.
"""
def __init__(self, config):
self.all_args = config['all_args']
self.envs = config['envs']
self.eval_envs = config['eval_envs']
self.device = config['device']
self.num_agents = config['num_agents']
if config.__contains__("render_envs"):
self.render_envs = config['render_envs']
self.all_args.num_agents = config['num_agents']
# parameters
self.env_name = self.all_args.env_name
self.algorithm_name = self.all_args.algorithm_name
self.experiment_name = self.all_args.experiment_name
self.use_centralized_V = self.all_args.use_centralized_V
self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state
self.num_env_steps = self.all_args.num_env_steps
self.episode_length = self.all_args.episode_length
self.n_rollout_threads = self.all_args.n_rollout_threads
self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
self.n_render_rollout_threads = self.all_args.n_render_rollout_threads
self.use_linear_lr_decay = self.all_args.use_linear_lr_decay
self.hidden_size = self.all_args.hidden_size
self.use_wandb = self.all_args.use_wandb
self.use_render = self.all_args.use_render
self.recurrent_N = self.all_args.recurrent_N
# interval
self.save_interval = self.all_args.save_interval
self.use_eval = self.all_args.use_eval
self.eval_interval = self.all_args.eval_interval
self.log_interval = self.all_args.log_interval
# dir
self.model_dir = self.all_args.model_dir
if self.use_wandb:
self.save_dir = str(wandb.run.dir)
self.run_dir = str(wandb.run.dir)
else:
self.run_dir = config["run_dir"]
self.log_dir = str(self.run_dir / 'logs')
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.writter = SummaryWriter(self.log_dir)
self.save_dir = str(self.run_dir / 'models')
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
from onpolicy.algorithms.r_mappo.r_mappo import R_MAPPO as TrainAlgo
from onpolicy.algorithms.r_mappo.algorithm.rMAPPOPolicy import R_MAPPOPolicy as Policy
share_observation_space = self.envs.share_observation_space[0] if self.use_centralized_V else self.envs.observation_space[0]
# policy network
self.policy = Policy(self.all_args,
self.envs.observation_space[0],
share_observation_space,
self.envs.action_space[0],
device = self.device)
# algorithm
self.trainer = TrainAlgo(self.all_args, self.policy, device = self.device)
if self.model_dir is not None:
# self.restore()
self.restore_timestep(timestep=5526400)
# buffer
self.buffer = SharedReplayBuffer(self.all_args,
self.num_agents,
self.envs.observation_space[0],
share_observation_space,
self.envs.action_space[0])
def run(self):
"""Collect training data, perform training updates, and evaluate policy."""
raise NotImplementedError
def warmup(self):
"""Collect warmup pre-training data."""
raise NotImplementedError
def collect(self, step):
"""Collect rollouts for training."""
raise NotImplementedError
def insert(self, data):
"""
Insert data into buffer.
:param data: (Tuple) data to insert into training buffer.
"""
raise NotImplementedError
@torch.no_grad()
def compute(self):
"""Calculate returns for the collected data."""
self.trainer.prep_rollout()
next_values = self.trainer.policy.get_values(np.concatenate(self.buffer.share_obs[-1]),
np.concatenate(self.buffer.rnn_states_critic[-1]),
np.concatenate(self.buffer.masks[-1]))
next_values = np.array(np.split(_t2n(next_values), self.n_rollout_threads))
self.buffer.compute_returns(next_values, self.trainer.value_normalizer)
def train(self):
"""Train policies with data in buffer. """
self.trainer.prep_training()
train_infos = self.trainer.train(self.buffer)
self.buffer.after_update()
return train_infos
def save(self):
"""Save policy's actor and critic networks."""
policy_actor = self.trainer.policy.actor
torch.save(policy_actor.state_dict(), str(self.save_dir) + "/actor.pt")
policy_critic = self.trainer.policy.critic
torch.save(policy_critic.state_dict(), str(self.save_dir) + "/critic.pt")
if self.trainer._use_valuenorm:
policy_vnorm = self.trainer.value_normalizer
torch.save(policy_vnorm.state_dict(), str(self.save_dir) + "/vnorm.pt")
def save_timestep(self, timestep):
"""Save policy's actor and critic networks."""
policy_actor = self.trainer.policy.actor
torch.save(policy_actor.state_dict(), str(self.save_dir) + "/actor" + str(timestep) + ".pt")
policy_critic = self.trainer.policy.critic
torch.save(policy_critic.state_dict(), str(self.save_dir) + "/critic" + str(timestep) + ".pt")
if self.trainer._use_valuenorm:
policy_vnorm = self.trainer.value_normalizer
torch.save(policy_vnorm.state_dict(), str(self.save_dir) + "/vnorm" + str(timestep) + ".pt")
def restore(self):
"""Restore policy's networks from a saved model."""
policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor.pt')
self.policy.actor.load_state_dict(policy_actor_state_dict)
if not self.all_args.use_render:
policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic.pt')
self.policy.critic.load_state_dict(policy_critic_state_dict)
if self.trainer._use_valuenorm:
policy_vnorm_state_dict = torch.load(str(self.model_dir) + '/vnorm.pt')
self.trainer.value_normalizer.load_state_dict(policy_vnorm_state_dict)
def restore_timestep(self, timestep):
"""Restore policy's networks from a saved model."""
policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor' + str(timestep) + '.pt')
self.policy.actor.load_state_dict(policy_actor_state_dict)
if not self.all_args.use_render:
policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic' + str(timestep) + '.pt')
self.policy.critic.load_state_dict(policy_critic_state_dict)
if self.trainer._use_valuenorm:
policy_vnorm_state_dict = torch.load(str(self.model_dir) + '/vnorm' + str(timestep) + '.pt')
self.trainer.value_normalizer.load_state_dict(policy_vnorm_state_dict)
def log_train(self, train_infos, total_num_steps):
"""
Log training info.
:param train_infos: (dict) information about training update.
:param total_num_steps: (int) total number of training env steps.
"""
for k, v in train_infos.items():
if self.use_wandb:
wandb.log({k: v}, step=total_num_steps)
else:
self.writter.add_scalars(k, {k: v}, total_num_steps)
def log_env(self, env_infos, total_num_steps):
"""
Log env info.
:param env_infos: (dict) information about env state.
:param total_num_steps: (int) total number of training env steps.
"""
for k, v in env_infos.items():
if len(v)>0:
if self.use_wandb:
wandb.log({k: np.mean(v)}, step=total_num_steps)
else:
self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps)
| 8,591 | 42.836735 | 132 | py |
CADP | CADP-main/CADP-PG/onpolicy/runner/separated/base_runner.py |
import time
import wandb
import os
import numpy as np
from itertools import chain
import torch
from tensorboardX import SummaryWriter
from onpolicy.utils.separated_buffer import SeparatedReplayBuffer
from onpolicy.utils.util import update_linear_schedule
def _t2n(x):
return x.detach().cpu().numpy()
class Runner(object):
def __init__(self, config):
self.all_args = config['all_args']
self.envs = config['envs']
self.eval_envs = config['eval_envs']
self.device = config['device']
self.num_agents = config['num_agents']
# parameters
self.env_name = self.all_args.env_name
self.algorithm_name = self.all_args.algorithm_name
self.experiment_name = self.all_args.experiment_name
self.use_centralized_V = self.all_args.use_centralized_V
self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state
self.num_env_steps = self.all_args.num_env_steps
self.episode_length = self.all_args.episode_length
self.n_rollout_threads = self.all_args.n_rollout_threads
self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
self.use_linear_lr_decay = self.all_args.use_linear_lr_decay
self.hidden_size = self.all_args.hidden_size
self.use_wandb = self.all_args.use_wandb
self.use_render = self.all_args.use_render
self.recurrent_N = self.all_args.recurrent_N
# interval
self.save_interval = self.all_args.save_interval
self.use_eval = self.all_args.use_eval
self.eval_interval = self.all_args.eval_interval
self.log_interval = self.all_args.log_interval
# dir
self.model_dir = self.all_args.model_dir
if self.use_render:
import imageio
self.run_dir = config["run_dir"]
self.gif_dir = str(self.run_dir / 'gifs')
if not os.path.exists(self.gif_dir):
os.makedirs(self.gif_dir)
else:
if self.use_wandb:
self.save_dir = str(wandb.run.dir)
else:
self.run_dir = config["run_dir"]
self.log_dir = str(self.run_dir / 'logs')
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.writter = SummaryWriter(self.log_dir)
self.save_dir = str(self.run_dir / 'models')
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
from onpolicy.algorithms.r_mappo.r_mappo import R_MAPPO as TrainAlgo
from onpolicy.algorithms.r_mappo.algorithm.rMAPPOPolicy import R_MAPPOPolicy as Policy
self.policy = []
for agent_id in range(self.num_agents):
share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id]
# policy network
po = Policy(self.all_args,
self.envs.observation_space[agent_id],
share_observation_space,
self.envs.action_space[agent_id],
device = self.device)
self.policy.append(po)
if self.model_dir is not None:
self.restore()
self.trainer = []
self.buffer = []
for agent_id in range(self.num_agents):
# algorithm
tr = TrainAlgo(self.all_args, self.policy[agent_id], device = self.device)
# buffer
share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id]
bu = SeparatedReplayBuffer(self.all_args,
self.envs.observation_space[agent_id],
share_observation_space,
self.envs.action_space[agent_id])
self.buffer.append(bu)
self.trainer.append(tr)
def run(self):
raise NotImplementedError
def warmup(self):
raise NotImplementedError
def collect(self, step):
raise NotImplementedError
def insert(self, data):
raise NotImplementedError
@torch.no_grad()
def compute(self):
for agent_id in range(self.num_agents):
self.trainer[agent_id].prep_rollout()
next_value = self.trainer[agent_id].policy.get_values(self.buffer[agent_id].share_obs[-1],
self.buffer[agent_id].rnn_states_critic[-1],
self.buffer[agent_id].masks[-1])
next_value = _t2n(next_value)
self.buffer[agent_id].compute_returns(next_value, self.trainer[agent_id].value_normalizer)
def train(self):
train_infos = []
for agent_id in range(self.num_agents):
self.trainer[agent_id].prep_training()
train_info = self.trainer[agent_id].train(self.buffer[agent_id])
train_infos.append(train_info)
self.buffer[agent_id].after_update()
return train_infos
def save(self):
for agent_id in range(self.num_agents):
policy_actor = self.trainer[agent_id].policy.actor
torch.save(policy_actor.state_dict(), str(self.save_dir) + "/actor_agent" + str(agent_id) + ".pt")
policy_critic = self.trainer[agent_id].policy.critic
torch.save(policy_critic.state_dict(), str(self.save_dir) + "/critic_agent" + str(agent_id) + ".pt")
if self.trainer[agent_id]._use_valuenorm:
policy_vnrom = self.trainer[agent_id].value_normalizer
torch.save(policy_vnrom.state_dict(), str(self.save_dir) + "/vnrom_agent" + str(agent_id) + ".pt")
def restore(self):
for agent_id in range(self.num_agents):
policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor_agent' + str(agent_id) + '.pt')
self.policy[agent_id].actor.load_state_dict(policy_actor_state_dict)
policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic_agent' + str(agent_id) + '.pt')
self.policy[agent_id].critic.load_state_dict(policy_critic_state_dict)
if self.trainer[agent_id]._use_valuenorm:
policy_vnrom_state_dict = torch.load(str(self.model_dir) + '/vnrom_agent' + str(agent_id) + '.pt')
self.trainer[agent_id].value_normalizer.load_state_dict(policy_vnrom_state_dict)
def log_train(self, train_infos, total_num_steps):
for agent_id in range(self.num_agents):
for k, v in train_infos[agent_id].items():
agent_k = "agent%i/" % agent_id + k
if self.use_wandb:
wandb.log({agent_k: v}, step=total_num_steps)
else:
self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps)
def log_env(self, env_infos, total_num_steps):
for k, v in env_infos.items():
if len(v) > 0:
if self.use_wandb:
wandb.log({k: np.mean(v)}, step=total_num_steps)
else:
self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps)
| 7,371 | 42.364706 | 150 | py |
CADP | CADP-main/CADP-PG/onpolicy/utils/valuenorm.py |
import numpy as np
import torch
import torch.nn as nn
class ValueNorm(nn.Module):
""" Normalize a vector of observations - across the first norm_axes dimensions"""
def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5):
super(ValueNorm, self).__init__()
self.input_shape = input_shape
self.norm_axes = norm_axes
self.epsilon = epsilon
self.beta = beta
self.per_element_update = per_element_update
self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False)
self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_mean_sq.zero_()
self.debiasing_term.zero_()
def running_mean_var(self):
debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(self.running_mean.device) # not elegant, but works in most cases
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
if self.per_element_update:
batch_size = np.prod(input_vector.size()[:self.norm_axes])
weight = self.beta ** batch_size
else:
weight = self.beta
self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))
self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))
self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))
def normalize(self, input_vector):
# Make sure input is float32
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(self.running_mean.device) # not elegant, but works in most cases
mean, var = self.running_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
""" Transform normalized data back into original distribution """
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(self.running_mean.device) # not elegant, but works in most cases
mean, var = self.running_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,132 | 38.658228 | 105 | py |
CADP | CADP-main/CADP-PG/onpolicy/utils/shared_buffer.py | import torch
import numpy as np
from onpolicy.utils.util import get_shape_from_obs_space, get_shape_from_act_space
def _flatten(T, N, x):
return x.reshape(T * N, *x.shape[2:])
def _cast(x):
return x.transpose(1, 2, 0, 3).reshape(-1, *x.shape[3:])
class SharedReplayBuffer(object):
"""
Buffer to store training data.
:param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param num_agents: (int) number of agents in the env.
:param obs_space: (gym.Space) observation space of agents.
:param cent_obs_space: (gym.Space) centralized observation space of agents.
:param act_space: (gym.Space) action space for agents.
"""
def __init__(self, args, num_agents, obs_space, cent_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(cent_obs_space)
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape),
dtype=np.float32)
self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32)
self.rnn_states = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, self.recurrent_N, self.hidden_size),
dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.returns = np.zeros_like(self.value_preds)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_space.n),
dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.action_log_probs = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.rewards = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self, share_obs, obs, rnn_states_actor, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
"""
Insert data into the buffer.
:param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param obs: (np.ndarray) local agent observations.
:param rnn_states_actor: (np.ndarray) RNN states for actor network.
:param rnn_states_critic: (np.ndarray) RNN states for critic network.
:param actions:(np.ndarray) actions taken by agents.
:param action_log_probs:(np.ndarray) log probs of actions taken by agents
:param value_preds: (np.ndarray) value function prediction at each step.
:param rewards: (np.ndarray) reward collected at each step.
:param masks: (np.ndarray) denotes whether the environment has terminated or not.
:param bad_masks: (np.ndarray) action space for agents.
:param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.
:param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.
"""
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states_actor.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
"""
Insert data into the buffer. This insert function is used specifically for Hanabi, which is turn based.
:param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param obs: (np.ndarray) local agent observations.
:param rnn_states_actor: (np.ndarray) RNN states for actor network.
:param rnn_states_critic: (np.ndarray) RNN states for critic network.
:param actions:(np.ndarray) actions taken by agents.
:param action_log_probs:(np.ndarray) log probs of actions taken by agents
:param value_preds: (np.ndarray) value function prediction at each step.
:param rewards: (np.ndarray) reward collected at each step.
:param masks: (np.ndarray) denotes whether the environment has terminated or not.
:param bad_masks: (np.ndarray) denotes indicate whether whether true terminal state or due to episode limit
:param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.
:param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.
"""
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
"""Copy last timestep data to first index. Called after update to model."""
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
"""Copy last timestep data to first index. This method is used for Hanabi."""
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
"""
Compute returns either as discounted sum of rewards, or using GAE.
:param next_value: (np.ndarray) value predictions for the step after the last episode step.
:param value_normalizer: (PopArt) If not None, PopArt value normalizer instance.
"""
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
# step + 1
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * gae * self.masks[step + 1]
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \
self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[
step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(
self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[
step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \
self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
"""
Yield training data for MLP policies.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
:param mini_batch_size: (int) number of samples in each minibatch.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) * number of agents ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length, num_agents,
n_rollout_threads * episode_length * num_agents,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)]
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:])
rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim]
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
"""
Yield training data for non-chunked RNN training.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * num_agents
assert n_rollout_threads * num_agents >= num_mini_batch, (
"PPO requires the number of processes ({})* number of agents ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_agents, num_mini_batch))
num_envs_per_batch = batch_size // num_mini_batch
perm = torch.randperm(batch_size).numpy()
share_obs = self.share_obs.reshape(-1, batch_size, *self.share_obs.shape[3:])
obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:])
rnn_states = self.rnn_states.reshape(-1, batch_size, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic.reshape(-1, batch_size, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions.reshape(-1, batch_size, self.available_actions.shape[-1])
value_preds = self.value_preds.reshape(-1, batch_size, 1)
returns = self.returns.reshape(-1, batch_size, 1)
masks = self.masks.reshape(-1, batch_size, 1)
active_masks = self.active_masks.reshape(-1, batch_size, 1)
action_log_probs = self.action_log_probs.reshape(-1, batch_size, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, batch_size, 1)
for start_ind in range(0, batch_size, num_envs_per_batch):
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
share_obs_batch.append(share_obs[:-1, ind])
obs_batch.append(obs[:-1, ind])
rnn_states_batch.append(rnn_states[0:1, ind])
rnn_states_critic_batch.append(rnn_states_critic[0:1, ind])
actions_batch.append(actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(available_actions[:-1, ind])
value_preds_batch.append(value_preds[:-1, ind])
return_batch.append(returns[:-1, ind])
masks_batch.append(masks[:-1, ind])
active_masks_batch.append(active_masks[:-1, ind])
old_action_log_probs_batch.append(action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, dim) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length):
"""
Yield training data for chunked RNN training.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
:param data_chunk_length: (int) length of sequence chunks with which to train RNN.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
data_chunks = batch_size // data_chunk_length # [C=r*T*M/L]
mini_batch_size = data_chunks // num_mini_batch
rand = torch.randperm(data_chunks).numpy()
sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)]
if len(self.share_obs.shape) > 4:
share_obs = self.share_obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs.shape[3:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 2, 0, 3, 4).reshape(-1,
*self.rnn_states_critic.shape[
3:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim]
share_obs_batch.append(share_obs[ind:ind + data_chunk_length])
obs_batch.append(obs[ind:ind + data_chunk_length])
actions_batch.append(actions[ind:ind + data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(available_actions[ind:ind + data_chunk_length])
value_preds_batch.append(value_preds[ind:ind + data_chunk_length])
return_batch.append(returns[ind:ind + data_chunk_length])
masks_batch.append(masks[ind:ind + data_chunk_length])
active_masks_batch.append(active_masks[ind:ind + data_chunk_length])
old_action_log_probs_batch.append(action_log_probs[ind:ind + data_chunk_length])
adv_targ.append(advantages[ind:ind + data_chunk_length])
# size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (L, N, Dim)
share_obs_batch = np.stack(share_obs_batch, axis=1)
obs_batch = np.stack(obs_batch, axis=1)
actions_batch = np.stack(actions_batch, axis=1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, axis=1)
value_preds_batch = np.stack(value_preds_batch, axis=1)
return_batch = np.stack(return_batch, axis=1)
masks_batch = np.stack(masks_batch, axis=1)
active_masks_batch = np.stack(active_masks_batch, axis=1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, axis=1)
adv_targ = np.stack(adv_targ, axis=1)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
| 27,173 | 53.89697 | 120 | py |
CADP | CADP-main/CADP-PG/onpolicy/utils/util.py | import numpy as np
import math
import torch
def get_params_size(params_list):
# params_size = sum([np.prod(list(p.size())) for p in params_list]) * 4 / 1024
# return "{:.0f}KB".format(params_size)
params_size = sum([np.prod(list(p.size())) for p in params_list]) / 1000
return "{:.0f}K".format(params_size)
def check(input):
if type(input) == np.ndarray:
return torch.from_numpy(input)
def get_gard_norm(it):
sum_grad = 0
for x in it:
if x.grad is None:
continue
sum_grad += x.grad.norm() ** 2
return math.sqrt(sum_grad)
def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):
"""Decreases the learning rate linearly"""
lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (abs(e) > d).float()
return a*e**2/2 + b*d*(abs(e)-d/2)
def mse_loss(e):
return e**2/2
def get_shape_from_obs_space(obs_space):
if obs_space.__class__.__name__ == 'Box':
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == 'list':
obs_shape = obs_space
else:
raise NotImplementedError
return obs_shape
def get_shape_from_act_space(act_space):
if act_space.__class__.__name__ == 'Discrete':
act_shape = 1
elif act_space.__class__.__name__ == "MultiDiscrete":
act_shape = act_space.shape
elif act_space.__class__.__name__ == "Box":
act_shape = act_space.shape[0]
elif act_space.__class__.__name__ == "MultiBinary":
act_shape = act_space.shape[0]
else: # agar
act_shape = act_space[0].shape[0] + 1
return act_shape
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
| 2,520 | 30.5125 | 82 | py |
CADP | CADP-main/CADP-PG/onpolicy/utils/separated_buffer.py | import torch
import numpy as np
from collections import defaultdict
from onpolicy.utils.util import check, get_shape_from_obs_space, get_shape_from_act_space
def _flatten(T, N, x):
return x.reshape(T * N, *x.shape[2:])
def _cast(x):
return x.transpose(1,0,2).reshape(-1, *x.shape[2:])
class SeparatedReplayBuffer(object):
def __init__(self, args, obs_space, share_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.rnn_hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(share_obs_space)
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *share_obs_shape), dtype=np.float32)
self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *obs_shape), dtype=np.float32)
self.rnn_states = np.zeros((self.episode_length + 1, self.n_rollout_threads, self.recurrent_N, self.rnn_hidden_size), dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32)
self.returns = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, act_space.n), dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32)
self.action_log_probs = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32)
self.rewards = np.zeros((self.episode_length, self.n_rollout_threads, 1), dtype=np.float32)
self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[
step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
episode_length, n_rollout_threads = self.rewards.shape[0:2]
batch_size = n_rollout_threads * episode_length
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length, n_rollout_threads * episode_length,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[2:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[2:])
rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[2:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[2:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[index,Dim]
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
n_rollout_threads = self.rewards.shape[1]
assert n_rollout_threads >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_mini_batch))
num_envs_per_batch = n_rollout_threads // num_mini_batch
perm = torch.randperm(n_rollout_threads).numpy()
for start_ind in range(0, n_rollout_threads, num_envs_per_batch):
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
share_obs_batch.append(self.share_obs[:-1, ind])
obs_batch.append(self.obs[:-1, ind])
rnn_states_batch.append(self.rnn_states[0:1, ind])
rnn_states_critic_batch.append(self.rnn_states_critic[0:1, ind])
actions_batch.append(self.actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(self.available_actions[:-1, ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
masks_batch.append(self.masks[:-1, ind])
active_masks_batch.append(self.active_masks[:-1, ind])
old_action_log_probs_batch.append(self.action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, -1) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch, 1).reshape(N, *self.rnn_states.shape[2:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch, 1).reshape(N, *self.rnn_states_critic.shape[2:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length):
episode_length, n_rollout_threads = self.rewards.shape[0:2]
batch_size = n_rollout_threads * episode_length
data_chunks = batch_size // data_chunk_length # [C=r*T/L]
mini_batch_size = data_chunks // num_mini_batch
assert episode_length * n_rollout_threads >= data_chunk_length, (
"PPO requires the number of processes ({}) * episode length ({}) "
"to be greater than or equal to the number of "
"data chunk length ({}).".format(n_rollout_threads, episode_length, data_chunk_length))
assert data_chunks >= 2, ("need larger batch size")
rand = torch.randperm(data_chunks).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
if len(self.share_obs.shape) > 3:
share_obs = self.share_obs[:-1].transpose(1, 0, 2, 3, 4).reshape(-1, *self.share_obs.shape[2:])
obs = self.obs[:-1].transpose(1, 0, 2, 3, 4).reshape(-1, *self.obs.shape[2:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 0, 2, 3).reshape(-1, *self.rnn_states.shape[2:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 0, 2, 3).reshape(-1, *self.rnn_states_critic.shape[2:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N Dim]-->[N T Dim]-->[T*N,Dim]-->[L,Dim]
share_obs_batch.append(share_obs[ind:ind+data_chunk_length])
obs_batch.append(obs[ind:ind+data_chunk_length])
actions_batch.append(actions[ind:ind+data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(available_actions[ind:ind+data_chunk_length])
value_preds_batch.append(value_preds[ind:ind+data_chunk_length])
return_batch.append(returns[ind:ind+data_chunk_length])
masks_batch.append(masks[ind:ind+data_chunk_length])
active_masks_batch.append(active_masks[ind:ind+data_chunk_length])
old_action_log_probs_batch.append(action_log_probs[ind:ind+data_chunk_length])
adv_targ.append(advantages[ind:ind+data_chunk_length])
# size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (N, L, Dim)
share_obs_batch = np.stack(share_obs_batch)
obs_batch = np.stack(obs_batch)
actions_batch = np.stack(actions_batch)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch)
value_preds_batch = np.stack(value_preds_batch)
return_batch = np.stack(return_batch)
masks_batch = np.stack(masks_batch)
active_masks_batch = np.stack(active_masks_batch)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch)
adv_targ = np.stack(adv_targ)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[2:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[2:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
| 21,466 | 53.484772 | 231 | py |
CADP | CADP-main/CADP-VD/src/main.py | import numpy as np
import os
import collections
from os.path import dirname, abspath
from copy import deepcopy
from sacred import Experiment, SETTINGS
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
import sys
import torch as th
from utils.logging import get_logger
import yaml
from run import run
SETTINGS['CAPTURE_MODE'] = "fd" # set to "no" if you want to see stdout/stderr in console
logger = get_logger()
ex = Experiment("pymarl")
ex.logger = logger
ex.captured_out_filter = apply_backspaces_and_linefeeds
results_path = os.path.join(dirname(dirname(abspath(__file__))), "results")
@ex.main
def my_main(_run, _config, _log):
# Setting the random seed throughout the modules
config = config_copy(_config)
np.random.seed(config["seed"])
th.manual_seed(config["seed"])
config['env_args']['seed'] = config["seed"]
# run the framework
run(_run, config, _log)
def _get_config(params, arg_name, subfolder):
config_name = None
for _i, _v in enumerate(params):
if _v.split("=")[0] == arg_name:
config_name = _v.split("=")[1]
del params[_i]
break
if config_name is not None:
with open(os.path.join(os.path.dirname(__file__), "config", subfolder, "{}.yaml".format(config_name)), "r") as f:
try:
config_dict = yaml.load(f)
except yaml.YAMLError as exc:
assert False, "{}.yaml error: {}".format(config_name, exc)
return config_dict
def recursive_dict_update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = recursive_dict_update(d.get(k, {}), v)
else:
d[k] = v
return d
def config_copy(config):
if isinstance(config, dict):
return {k: config_copy(v) for k, v in config.items()}
elif isinstance(config, list):
return [config_copy(v) for v in config]
else:
return deepcopy(config)
if __name__ == '__main__':
params = deepcopy(sys.argv)
# Get the defaults from default.yaml
with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
try:
config_dict = yaml.load(f)
except yaml.YAMLError as exc:
assert False, "default.yaml error: {}".format(exc)
# Load algorithm and env base configs
env_config = _get_config(params, "--env-config", "envs")
alg_config = _get_config(params, "--config", "algs")
# config_dict = {**config_dict, **env_config, **alg_config}
config_dict = recursive_dict_update(config_dict, env_config)
config_dict = recursive_dict_update(config_dict, alg_config)
# now add all the config to sacred
ex.add_config(config_dict)
# Save to disk by default for sacred
logger.info("Saving to FileStorageObserver in results/sacred.")
file_obs_path = os.path.join(results_path, "sacred")
ex.observers.append(FileStorageObserver.create(file_obs_path))
ex.run_commandline(params)
| 3,050 | 29.51 | 121 | py |
CADP | CADP-main/CADP-VD/src/run.py | import datetime
import os
import pprint
import time
import threading
import torch as th
from types import SimpleNamespace as SN
from utils.logging import Logger
from utils.timehelper import time_left, time_str
from os.path import dirname, abspath
from learners import REGISTRY as le_REGISTRY
from runners import REGISTRY as r_REGISTRY
from controllers import REGISTRY as mac_REGISTRY
from components.episode_buffer import ReplayBuffer
from components.transforms import OneHot
def run(_run, _config, _log):
# check args sanity
_config = args_sanity_check(_config, _log)
args = SN(**_config)
args.device = "cuda" if args.use_cuda else "cpu"
# setup loggers
logger = Logger(_log)
_log.info("Experiment Parameters:")
experiment_params = pprint.pformat(_config,
indent=4,
width=1)
_log.info("\n\n" + experiment_params + "\n")
# configure tensorboard logger
unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
args.unique_token = unique_token
if args.use_tensorboard:
tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
logger.setup_tb(tb_exp_direc)
# sacred is on by default
logger.setup_sacred(_run)
# Run and train
run_sequential(args=args, logger=logger)
# Clean up after finishing
print("Exiting Main")
print("Stopping all threads")
for t in threading.enumerate():
if t.name != "MainThread":
print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon))
t.join(timeout=1)
print("Thread joined")
print("Exiting script")
# Making sure framework really exits
os._exit(os.EX_OK)
def evaluate_sequential(args, runner):
for _ in range(args.test_nepisode):
runner.run(test_mode=True)
if args.save_replay:
runner.save_replay()
runner.close_env()
def run_sequential(args, logger):
# Init runner so we can get env info
runner = r_REGISTRY[args.runner](args=args, logger=logger)
# Set up schemes and groups here
env_info = runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.state_shape = env_info["state_shape"]
args.unit_dim = env_info["unit_dim"]
# Default/Base scheme
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents"},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runner the scheme
runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
if args.checkpoint_path != "":
timesteps = []
timestep_to_load = 0
if not os.path.isdir(args.checkpoint_path):
logger.console_logger.info("Checkpoint directiory {} doesn't exist".format(args.checkpoint_path))
return
# Go through all files in args.checkpoint_path
for name in os.listdir(args.checkpoint_path):
full_name = os.path.join(args.checkpoint_path, name)
# Check if they are dirs the names of which are numbers
if os.path.isdir(full_name) and name.isdigit():
timesteps.append(int(name))
if args.load_step == 0:
# choose the max timestep
timestep_to_load = max(timesteps)
else:
# choose the timestep closest to load_step
timestep_to_load = min(timesteps, key=lambda x: abs(x - args.load_step))
model_path = os.path.join(args.checkpoint_path, str(timestep_to_load))
logger.console_logger.info("Loading model from {}".format(model_path))
learner.load_models(model_path)
runner.t_env = timestep_to_load
if args.evaluate or args.save_replay:
evaluate_sequential(args, runner)
return
# start training
episode = 0
last_test_T = -args.test_interval - 1
last_log_T = 0
model_save_time = 0
start_time = time.time()
last_time = start_time
logger.console_logger.info("Beginning training for {} timesteps".format(args.t_max))
while runner.t_env <= args.t_max:
# Run for a whole episode at a time
episode_batch = runner.run(test_mode=False)
buffer.insert_episode_batch(episode_batch)
if buffer.can_sample(args.batch_size):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, runner.t_env, episode)
# Execute test runs once in a while
n_test_runs = max(1, args.test_nepisode // runner.batch_size)
if (runner.t_env - last_test_T) / args.test_interval >= 1.0:
logger.console_logger.info("t_env: {} / {}".format(runner.t_env, args.t_max))
logger.console_logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, last_test_T, runner.t_env, args.t_max), time_str(time.time() - start_time)))
last_time = time.time()
last_test_T = runner.t_env
if ("CADP" in args.name):
mac.agent.use_q_v = True
for _ in range(n_test_runs):
runner.run(test_mode=True,which='student')
mac.agent.use_q_v = False
for _ in range(n_test_runs):
runner.run(test_mode=True,which='teacher')
else:
for _ in range(n_test_runs):
runner.run(test_mode=True)
if args.save_model and (runner.t_env - model_save_time >= args.save_model_interval or model_save_time == 0):
model_save_time = runner.t_env
save_path = os.path.join(args.local_results_path, "models", args.unique_token, str(runner.t_env))
os.makedirs(save_path, exist_ok=True)
logger.console_logger.info("Saving models to {}".format(save_path))
# learner should handle saving/loading -- delegate actor save/load to mac,
# use appropriate filenames to do critics, optimizer states
learner.save_models(save_path)
episode += args.batch_size_run
if (runner.t_env - last_log_T) >= args.log_interval:
logger.log_stat("episode", episode, runner.t_env)
logger.print_recent_stats()
last_log_T = runner.t_env
runner.close_env()
logger.console_logger.info("Finished Training")
def args_sanity_check(config, _log):
# set CUDA flags
# config["use_cuda"] = True # Use cuda whenever possible!
if config["use_cuda"] and not th.cuda.is_available():
config["use_cuda"] = False
_log.warning("CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!")
if config["test_nepisode"] < config["batch_size_run"]:
config["test_nepisode"] = config["batch_size_run"]
else:
config["test_nepisode"] = (config["test_nepisode"]//config["batch_size_run"]) * config["batch_size_run"]
return config
| 8,281 | 33.65272 | 116 | py |
CADP | CADP-main/CADP-VD/src/modules/mixers/qmix.py | import torch as th
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class QMixer(nn.Module):
def __init__(self, args):
super(QMixer, self).__init__()
self.args = args
self.n_agents = args.n_agents
self.state_dim = int(np.prod(args.state_shape))
self.embed_dim = args.mixing_embed_dim
if getattr(args, "hypernet_layers", 1) == 1:
self.hyper_w_1 = nn.Linear(self.state_dim, self.embed_dim * self.n_agents)
self.hyper_w_final = nn.Linear(self.state_dim, self.embed_dim)
elif getattr(args, "hypernet_layers", 1) == 2:
hypernet_embed = self.args.hypernet_embed
self.hyper_w_1 = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
nn.ReLU(),
nn.Linear(hypernet_embed, self.embed_dim * self.n_agents))
self.hyper_w_final = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
nn.ReLU(),
nn.Linear(hypernet_embed, self.embed_dim))
elif getattr(args, "hypernet_layers", 1) > 2:
raise Exception("Sorry >2 hypernet layers is not implemented!")
else:
raise Exception("Error setting number of hypernet layers.")
# State dependent bias for hidden layer
self.hyper_b_1 = nn.Linear(self.state_dim, self.embed_dim)
# V(s) instead of a bias for the last layers
self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim),
nn.ReLU(),
nn.Linear(self.embed_dim, 1))
def forward(self, agent_qs, states):
bs = agent_qs.size(0)
states = states.reshape(-1, self.state_dim)
agent_qs = agent_qs.view(-1, 1, self.n_agents)
# First layer
w1 = th.abs(self.hyper_w_1(states))
b1 = self.hyper_b_1(states)
w1 = w1.view(-1, self.n_agents, self.embed_dim)
b1 = b1.view(-1, 1, self.embed_dim)
hidden = F.elu(th.bmm(agent_qs, w1) + b1)
# Second layer
w_final = th.abs(self.hyper_w_final(states))
w_final = w_final.view(-1, self.embed_dim, 1)
# State-dependent bias
v = self.V(states).view(-1, 1, 1)
# Compute final output
y = th.bmm(hidden, w_final) + v
# Reshape and return
q_tot = y.view(bs, -1, 1)
return q_tot
| 2,505 | 40.081967 | 101 | py |
CADP | CADP-main/CADP-VD/src/modules/mixers/vdn.py | import torch as th
import torch.nn as nn
class VDNMixer(nn.Module):
def __init__(self):
super(VDNMixer, self).__init__()
def forward(self, agent_qs, batch):
return th.sum(agent_qs, dim=2, keepdim=True) | 228 | 21.9 | 52 | py |
CADP | CADP-main/CADP-VD/src/modules/mixers/qplex.py | import torch as th
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class DMAQ_QattenMixer(nn.Module):
def __init__(self, args):
super(DMAQ_QattenMixer, self).__init__()
self.args = args
self.n_agents = args.n_agents
self.n_actions = args.n_actions
self.state_dim = int(np.prod(args.state_shape))
self.action_dim = args.n_agents * self.n_actions
self.state_action_dim = self.state_dim + self.action_dim + 1
self.attention_weight = Qatten_Weight(args)
self.si_weight = DMAQ_SI_Weight(args)
def calc_v(self, agent_qs):
agent_qs = agent_qs.view(-1, self.n_agents)
v_tot = th.sum(agent_qs, dim=-1)
return v_tot
def calc_adv(self, agent_qs, states, actions, max_q_i):
states = states.reshape(-1, self.state_dim)
actions = actions.reshape(-1, self.action_dim)
agent_qs = agent_qs.view(-1, self.n_agents)
max_q_i = max_q_i.view(-1, self.n_agents)
adv_q = (agent_qs - max_q_i).view(-1, self.n_agents).clone().detach()
adv_w_final = self.si_weight(states, actions)
adv_w_final = adv_w_final.view(-1, self.n_agents)
if self.args.is_minus_one:
adv_tot = th.sum(adv_q * (adv_w_final - 1.), dim=1)
else:
adv_tot = th.sum(adv_q * adv_w_final, dim=1)
return adv_tot
def calc(self, agent_qs, states, actions=None, max_q_i=None, is_v=False):
if is_v:
v_tot = self.calc_v(agent_qs)
return v_tot
else:
adv_tot = self.calc_adv(agent_qs, states, actions, max_q_i)
return adv_tot
def forward(self, agent_qs, states, actions=None, max_q_i=None, is_v=False):
bs = agent_qs.size(0)
#agent_qs.retain_grad()
#global_Grad.x = agent_qs
w_final, v, attend_mag_regs, head_entropies = self.attention_weight(agent_qs, states, actions)
w_final = w_final.view(-1, self.n_agents) + 1e-10
v = v.view(-1, 1).repeat(1, self.n_agents)
v /= self.n_agents
agent_qs = agent_qs.view(-1, self.n_agents)
agent_qs = w_final * agent_qs + v
if not is_v:
max_q_i = max_q_i.view(-1, self.n_agents)
max_q_i = w_final * max_q_i + v
y = self.calc(agent_qs, states, actions=actions, max_q_i=max_q_i, is_v=is_v)
v_tot = y.view(bs, -1, 1)
return v_tot, attend_mag_regs, head_entropies
class Qatten_Weight(nn.Module):
def __init__(self, args):
super(Qatten_Weight, self).__init__()
self.name = 'qatten_weight'
self.args = args
self.n_agents = args.n_agents
self.state_dim = int(np.prod(args.state_shape))
self.unit_dim = args.unit_dim
self.n_actions = args.n_actions
self.sa_dim = self.state_dim + self.n_agents * self.n_actions
self.n_head = args.n_head # attention head num
self.embed_dim = args.mixing_embed_dim
self.attend_reg_coef = args.attend_reg_coef
self.key_extractors = nn.ModuleList()
self.selector_extractors = nn.ModuleList()
hypernet_embed = self.args.hypernet_embed
for i in range(self.n_head): # multi-head attention
selector_nn = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
nn.ReLU(),
nn.Linear(hypernet_embed, self.embed_dim, bias=False))
self.selector_extractors.append(selector_nn) # query
if self.args.nonlinear: # add qs
self.key_extractors.append(nn.Linear(self.unit_dim + 1, self.embed_dim, bias=False)) # key
else:
self.key_extractors.append(nn.Linear(self.unit_dim, self.embed_dim, bias=False)) # key
if self.args.weighted_head:
self.hyper_w_head = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
nn.ReLU(),
nn.Linear(hypernet_embed, self.n_head))
# V(s) instead of a bias for the last layers
self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim),
nn.ReLU(),
nn.Linear(self.embed_dim, 1))
def forward(self, agent_qs, states, actions):
states = states.reshape(-1, self.state_dim)
unit_states = states[:, : self.unit_dim * self.n_agents] # get agent own features from state
unit_states = unit_states.reshape(-1, self.n_agents, self.unit_dim)
unit_states = unit_states.permute(1, 0, 2)
agent_qs = agent_qs.view(-1, 1, self.n_agents) # agent_qs: (batch_size, 1, agent_num)
if self.args.nonlinear:
unit_states = th.cat((unit_states, agent_qs.permute(2, 0, 1)), dim=2)
# states: (batch_size, state_dim)
all_head_selectors = [sel_ext(states) for sel_ext in self.selector_extractors]
# all_head_selectors: (head_num, batch_size, embed_dim)
# unit_states: (agent_num, batch_size, unit_dim)
all_head_keys = [[k_ext(enc) for enc in unit_states] for k_ext in self.key_extractors]
# all_head_keys: (head_num, agent_num, batch_size, embed_dim)
# calculate attention per head
head_attend_logits = []
head_attend_weights = []
for curr_head_keys, curr_head_selector in zip(all_head_keys, all_head_selectors):
# curr_head_keys: (agent_num, batch_size, embed_dim)
# curr_head_selector: (batch_size, embed_dim)
# (batch_size, 1, embed_dim) * (batch_size, embed_dim, agent_num)
attend_logits = th.matmul(curr_head_selector.view(-1, 1, self.embed_dim),
th.stack(curr_head_keys).permute(1, 2, 0))
# attend_logits: (batch_size, 1, agent_num)
# scale dot-products by size of key (from Attention is All You Need)
scaled_attend_logits = attend_logits / np.sqrt(self.embed_dim)
if self.args.mask_dead:
# actions: (episode_batch, episode_length - 1, agent_num, 1)
actions = actions.reshape(-1, 1, self.n_agents)
# actions: (batch_size, 1, agent_num)
scaled_attend_logits[actions == 0] = -99999999 # action == 0 means the unit is dead
attend_weights = F.softmax(scaled_attend_logits, dim=2) # (batch_size, 1, agent_num)
head_attend_logits.append(attend_logits)
head_attend_weights.append(attend_weights)
head_attend = th.stack(head_attend_weights, dim=1) # (batch_size, self.n_head, self.n_agents)
head_attend = head_attend.view(-1, self.n_head, self.n_agents)
v = self.V(states).view(-1, 1) # v: (bs, 1)
# head_qs: [head_num, bs, 1]
if self.args.weighted_head:
w_head = th.abs(self.hyper_w_head(states)) # w_head: (bs, head_num)
w_head = w_head.view(-1, self.n_head, 1).repeat(1, 1, self.n_agents) # w_head: (bs, head_num, self.n_agents)
head_attend *= w_head
head_attend = th.sum(head_attend, dim=1)
if not self.args.state_bias:
v *= 0.
# regularize magnitude of attention logits
attend_mag_regs = self.attend_reg_coef * sum((logit ** 2).mean() for logit in head_attend_logits)
head_entropies = [(-((probs + 1e-8).log() * probs).squeeze().sum(1).mean()) for probs in head_attend_weights]
return head_attend, v, attend_mag_regs, head_entropies
class DMAQ_SI_Weight(nn.Module):
def __init__(self, args):
super(DMAQ_SI_Weight, self).__init__()
self.args = args
self.n_agents = args.n_agents
self.n_actions = args.n_actions
self.state_dim = int(np.prod(args.state_shape))
self.action_dim = args.n_agents * self.n_actions
self.state_action_dim = self.state_dim + self.action_dim
self.num_kernel = args.num_kernel
self.key_extractors = nn.ModuleList()
self.agents_extractors = nn.ModuleList()
self.action_extractors = nn.ModuleList()
adv_hypernet_embed = self.args.adv_hypernet_embed
for i in range(self.num_kernel): # multi-head attention
if getattr(args, "adv_hypernet_layers", 1) == 1:
self.key_extractors.append(nn.Linear(self.state_dim, 1)) # key
self.agents_extractors.append(nn.Linear(self.state_dim, self.n_agents)) # agent
self.action_extractors.append(nn.Linear(self.state_action_dim, self.n_agents)) # action
elif getattr(args, "adv_hypernet_layers", 1) == 2:
self.key_extractors.append(nn.Sequential(nn.Linear(self.state_dim, adv_hypernet_embed),
nn.ReLU(),
nn.Linear(adv_hypernet_embed, 1))) # key
self.agents_extractors.append(nn.Sequential(nn.Linear(self.state_dim, adv_hypernet_embed),
nn.ReLU(),
nn.Linear(adv_hypernet_embed, self.n_agents))) # agent
self.action_extractors.append(nn.Sequential(nn.Linear(self.state_action_dim, adv_hypernet_embed),
nn.ReLU(),
nn.Linear(adv_hypernet_embed, self.n_agents))) # action
elif getattr(args, "adv_hypernet_layers", 1) == 3:
self.key_extractors.append(nn.Sequential(nn.Linear(self.state_dim, adv_hypernet_embed),
nn.ReLU(),
nn.Linear(adv_hypernet_embed, adv_hypernet_embed),
nn.ReLU(),
nn.Linear(adv_hypernet_embed, 1))) # key
self.agents_extractors.append(nn.Sequential(nn.Linear(self.state_dim, adv_hypernet_embed),
nn.ReLU(),
nn.Linear(adv_hypernet_embed, adv_hypernet_embed),
nn.ReLU(),
nn.Linear(adv_hypernet_embed, self.n_agents))) # agent
self.action_extractors.append(nn.Sequential(nn.Linear(self.state_action_dim, adv_hypernet_embed),
nn.ReLU(),
nn.Linear(adv_hypernet_embed, adv_hypernet_embed),
nn.ReLU(),
nn.Linear(adv_hypernet_embed, self.n_agents))) # action
else:
raise Exception("Error setting number of adv hypernet layers.")
def forward(self, states, actions):
states = states.reshape(-1, self.state_dim)
actions = actions.reshape(-1, self.action_dim)
data = th.cat([states, actions], dim=1)
all_head_key = [k_ext(states) for k_ext in self.key_extractors]
all_head_agents = [k_ext(states) for k_ext in self.agents_extractors]
all_head_action = [sel_ext(data) for sel_ext in self.action_extractors]
head_attend_weights = []
for curr_head_key, curr_head_agents, curr_head_action in zip(all_head_key, all_head_agents, all_head_action):
x_key = th.abs(curr_head_key).repeat(1, self.n_agents) + 1e-10
x_agents = F.sigmoid(curr_head_agents)
x_action = F.sigmoid(curr_head_action)
weights = x_key * x_agents * x_action
head_attend_weights.append(weights)
head_attend = th.stack(head_attend_weights, dim=1)
head_attend = head_attend.view(-1, self.num_kernel, self.n_agents)
head_attend = th.sum(head_attend, dim=1)
return head_attend | 12,201 | 48.601626 | 121 | py |
CADP | CADP-main/CADP-VD/src/modules/agents/rnn_agent.py | import torch.nn as nn
import torch.nn.functional as F
class RNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
q = self.fc2(h)
return q, h
| 770 | 31.125 | 71 | py |
CADP | CADP-main/CADP-VD/src/modules/agents/atten_rnn_agent.py | import torch.nn as nn
import torch.nn.functional as F
import torch as th
import numpy as np
import torch.nn.init as init
from modules.layers.self_atten import SelfAttention
class ATTRNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(ATTRNNAgent, self).__init__()
self.args = args
self.use_q_v = False
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.att = SelfAttention(input_shape, args.att_heads, args.att_embed_dim)
self.fc2 = nn.Linear(args.att_heads * args.att_embed_dim, args.rnn_hidden_dim)
self.fc_inter = nn.Sequential(nn.Linear(args.rnn_hidden_dim * 2, args.rnn_hidden_dim),
nn.ReLU(inplace=True))
self.fc_last = nn.Sequential(nn.Linear(args.rnn_hidden_dim, args.rnn_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(args.rnn_hidden_dim,args.n_actions))
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
# INPUT
e = inputs.shape[-1]
inputs = inputs.reshape(-1, self.args.n_agents,e)
b, a, e = inputs.size()
# RNN
x = F.relu(self.fc1(inputs.view(-1, e)), inplace=True)
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
# ATT
att = self.att(inputs.view(b, a, -1))
att = F.relu(self.fc2(att), inplace=True).view(-1, self.args.rnn_hidden_dim)
att_v = F.relu(self.fc2(self.att.values), inplace=True).view(-1, self.args.rnn_hidden_dim)
# Q
q = th.cat((h, att), dim=-1)
q_v = th.cat((h, att_v), dim=-1)
inter = self.fc_inter(q)
q = self.fc_last(inter)
inter_v = self.fc_inter(q_v)
q_v = self.fc_last(inter_v).view(b,a,-1)
self.q_v = q_v
if self.use_q_v:
return q_v.view(b, a, -1), inter.view(b, a, -1), h.view(b, a, -1)
else:
return q.view(b, a, -1), inter.view(b,a,-1), h.view(b, a, -1)
| 2,252 | 37.844828 | 98 | py |
CADP | CADP-main/CADP-VD/src/modules/layers/self_atten.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class SelfAttention(nn.Module):
def __init__(self, input_size, heads, embed_size):
super().__init__()
self.input_size = input_size
self.heads = heads
self.emb_size = embed_size
self.tokeys = nn.Linear(self.input_size, self.emb_size * heads, bias = False)
self.toqueries = nn.Linear(self.input_size, self.emb_size * heads, bias = False)
self.tovalues = nn.Linear(self.input_size, self.emb_size * heads, bias = False)
def forward(self, x):
b, t, hin = x.size()
assert hin == self.input_size, f'Input size {{hin}} should match {{self.input_size}}'
h = self.heads
e = self.emb_size
keys = self.tokeys(x).view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x).view(b, t, h, e)
# dot-product attention
# folding heads to batch dimensions
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4))
keys = keys / (e ** (1/4))
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b*h, t, t)
# row wise self attention probabilities
dot = F.softmax(dot, dim=2)
self.dot = dot
out = torch.bmm(dot, values).view(b, h, t, e)
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
values = values.view(b, h, t, e)
values = values.transpose(1, 2).contiguous().view(b, t, h * e)
self.values = values
return out | 1,762 | 36.510638 | 93 | py |
CADP | CADP-main/CADP-VD/src/envs/gfootball/gfootball.py | import numpy as np
import gfootball.env as football_env
from gfootball.env import observation_preprocessing
from ..multiagentenv import MultiAgentEnv
import gym
import torch as th
import logging.config
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
class GoogleFootballEnv(MultiAgentEnv):
def __init__(
self,
write_full_episode_dumps=False,
write_goal_dumps=False,
dump_freq=0,
render=False,
time_limit=150,
time_step=0,
map_name='academy_counterattack_easy',
stacked=False,
representation="simple115v2",
rewards='scoring',
logdir='football_dumps',
write_video=False,
number_of_right_players_agent_controls=0,
seed=0,
):
if map_name == 'academy_3_vs_1_with_keeper':
self.obs_dim = 26
self.n_agents = 3
self.n_enemies = 2
elif map_name == 'academy_counterattack_hard':
self.obs_dim = 34
self.n_agents = 4
self.n_enemies = 3
elif map_name == 'academy_counterattack_easy':
self.obs_dim = 30
self.n_agents = 4
self.n_enemies = 2
else:
raise ValueError("Not Support Map")
self.write_full_episode_dumps = write_full_episode_dumps
self.write_goal_dumps = write_goal_dumps
self.dump_freq = dump_freq
self.render = render
self.episode_limit = time_limit
self.time_step = time_step
self.env_name = map_name
self.stacked = stacked
self.representation = representation
self.rewards = rewards
self.logdir = logdir
self.write_video = write_video
self.number_of_right_players_agent_controls = number_of_right_players_agent_controls
self.seed = seed
self.env = football_env.create_environment(
write_full_episode_dumps=self.write_full_episode_dumps,
write_goal_dumps=self.write_goal_dumps,
env_name=self.env_name,
stacked=self.stacked,
representation=self.representation,
rewards=self.rewards,
logdir=self.logdir,
render=self.render,
write_video=self.write_video,
dump_frequency=self.dump_freq,
number_of_left_players_agent_controls=self.n_agents,
number_of_right_players_agent_controls=self.number_of_right_players_agent_controls,
channel_dimensions=(observation_preprocessing.SMM_WIDTH, observation_preprocessing.SMM_HEIGHT))
self.env.seed(self.seed)
obs_space_low = self.env.observation_space.low[0][:self.obs_dim]
obs_space_high = self.env.observation_space.high[0][:self.obs_dim]
self.action_space = [gym.spaces.Discrete(
self.env.action_space.nvec[1]) for _ in range(self.n_agents)]
self.observation_space = [
gym.spaces.Box(low=obs_space_low, high=obs_space_high, dtype=self.env.observation_space.dtype) for _ in range(self.n_agents)
]
self.n_actions = self.action_space[0].n
self.obs = None
def check_if_done(self):
cur_obs = self.env.unwrapped.observation()[0]
ball_loc = cur_obs['ball']
ours_loc = cur_obs['left_team'][-self.n_agents:]
if ball_loc[0] < 0 or any(ours_loc[:, 0] < 0):
"""
This is based on the CDS paper:
'We make a small and reasonable change to the half-court offensive scenarios: our players will lose if
they or the ball returns to our half-court.'
"""
return True
return False
def step(self, _actions):
"""Returns reward, terminated, info."""
if th.is_tensor(_actions):
actions = _actions.cpu().numpy()
else:
actions = _actions
self.time_step += 1
obs, rewards, done, info = self.env.step(actions.tolist())
info["battle_won"] = False
self.obs = obs
if self.time_step >= self.episode_limit:
info["episode_limit"] = True
done = True
if self.env_name in ['academy_3_vs_1_with_keeper', 'academy_counterattack_hard', 'academy_counterattack_easy']:
if self.check_if_done():
done = True
"""
This is based on the CDS paper:
"Environmental reward only occurs at the end of the game.
They will get +100 if they win, else get -1."
If done=False, the reward is -1,
If done=True and sum(rewards)<=0 the reward is 1.
If done=True and sum(rewards)>0 the reward is 100.
"""
if sum(rewards) <= 0:
return -int(done), done, info
info["battle_won"] = True
return 100, done, info
def get_simple_obs(self, index=-1):
full_obs = self.env.unwrapped.observation()[0]
simple_obs = []
if self.env_name == 'academy_3_vs_1_with_keeper':
if index == -1:
# global state, absolute position
simple_obs.append(full_obs['left_team'][-self.n_agents:].reshape(-1))
simple_obs.append(full_obs['left_team_direction'][-self.n_agents:].reshape(-1))
simple_obs.append(full_obs['right_team'].reshape(-1))
simple_obs.append(full_obs['right_team_direction'].reshape(-1))
simple_obs.append(full_obs['ball'])
simple_obs.append(full_obs['ball_direction'])
else:
# local state, relative position
ego_position = full_obs['left_team'][-self.n_agents + index].reshape(-1)
simple_obs.append(ego_position)
simple_obs.append(
(np.delete(full_obs['left_team'][-self.n_agents:], index, axis=0) - ego_position).reshape(-1)
)
simple_obs.append(full_obs['left_team_direction'][-self.n_agents + index].reshape(-1))
simple_obs.append(
np.delete(full_obs['left_team_direction'][-self.n_agents:], index, axis=0).reshape(-1)
)
simple_obs.append((full_obs['right_team'] - ego_position).reshape(-1))
simple_obs.append(full_obs['right_team_direction'].reshape(-1))
simple_obs.append(full_obs['ball'][:2] - ego_position)
simple_obs.append(full_obs['ball'][-1].reshape(-1))
simple_obs.append(full_obs['ball_direction'])
elif self.env_name == 'academy_counterattack_hard':
if index == -1:
# global state, absolute position
simple_obs.append(full_obs['left_team'][-self.n_agents:].reshape(-1))
simple_obs.append(full_obs['left_team_direction'][-self.n_agents:].reshape(-1))
simple_obs.append(full_obs['right_team'][0])
simple_obs.append(full_obs['right_team'][1])
simple_obs.append(full_obs['right_team'][2])
simple_obs.append(full_obs['right_team_direction'][0])
simple_obs.append(full_obs['right_team_direction'][1])
simple_obs.append(full_obs['right_team_direction'][2])
simple_obs.append(full_obs['ball'])
simple_obs.append(full_obs['ball_direction'])
else:
# local state, relative position
ego_position = full_obs['left_team'][-self.n_agents + index].reshape(-1)
simple_obs.append(ego_position)
simple_obs.append(
(np.delete(full_obs['left_team'][-self.n_agents:], index, axis=0) - ego_position).reshape(-1)
)
simple_obs.append(full_obs['left_team_direction'][-self.n_agents + index].reshape(-1))
simple_obs.append(
np.delete(full_obs['left_team_direction'][-self.n_agents:], index, axis=0).reshape(-1)
)
simple_obs.append(full_obs['right_team'][0] - ego_position)
simple_obs.append(full_obs['right_team'][1] - ego_position)
simple_obs.append(full_obs['right_team'][2] - ego_position)
simple_obs.append(full_obs['right_team_direction'][0])
simple_obs.append(full_obs['right_team_direction'][1])
simple_obs.append(full_obs['right_team_direction'][2])
simple_obs.append(full_obs['ball'][:2] - ego_position)
simple_obs.append(full_obs['ball'][-1].reshape(-1))
simple_obs.append(full_obs['ball_direction'])
elif self.env_name == 'academy_counterattack_easy':
if index == -1:
# global state, absolute position
simple_obs.append(full_obs['left_team'][-self.n_agents:].reshape(-1))
simple_obs.append(full_obs['left_team_direction'][-self.n_agents:].reshape(-1))
simple_obs.append(full_obs['right_team'][0])
simple_obs.append(full_obs['right_team'][1])
simple_obs.append(full_obs['right_team_direction'][0])
simple_obs.append(full_obs['right_team_direction'][1])
simple_obs.append(full_obs['ball'])
simple_obs.append(full_obs['ball_direction'])
else:
# local state, relative position
ego_position = full_obs['left_team'][-self.n_agents + index].reshape(-1)
simple_obs.append(ego_position)
simple_obs.append(
(np.delete(full_obs['left_team'][-self.n_agents:], index, axis=0) - ego_position).reshape(-1)
)
simple_obs.append(full_obs['left_team_direction'][-self.n_agents + index].reshape(-1))
simple_obs.append(
np.delete(full_obs['left_team_direction'][-self.n_agents:], index, axis=0).reshape(-1)
)
simple_obs.append(full_obs['right_team'][0] - ego_position)
simple_obs.append(full_obs['right_team'][1] - ego_position)
simple_obs.append(full_obs['right_team_direction'][0])
simple_obs.append(full_obs['right_team_direction'][1])
simple_obs.append(full_obs['ball'][:2] - ego_position)
simple_obs.append(full_obs['ball'][-1].reshape(-1))
simple_obs.append(full_obs['ball_direction'])
simple_obs = np.concatenate(simple_obs)
return simple_obs
def get_obs(self):
"""Returns all agent observations in a list."""
obs = [self.get_simple_obs(i) for i in range(self.n_agents)]
return obs
def get_obs_agent(self, agent_id):
"""Returns observation for agent_id."""
return self.get_simple_obs(agent_id)
def get_obs_size(self):
"""Returns the size of the observation."""
return self.obs_dim
def get_state(self):
"""Returns the global state."""
return self.get_simple_obs(-1)
def get_state_size(self):
"""Returns the size of the global state."""
return self.obs_dim
def get_avail_actions(self):
"""Returns the available actions of all agents in a list."""
return [[1 for _ in range(self.n_actions)] for agent_id in range(self.n_agents)]
def get_avail_agent_actions(self, agent_id):
"""Returns the available actions for agent_id."""
return self.get_avail_actions()[agent_id]
def get_total_actions(self):
"""Returns the total number of actions an agent could ever take."""
return self.action_space[0].n
def reset(self):
"""Returns initial observations and states."""
self.time_step = 0
self.env.reset()
return self.get_obs(), self.get_state()
def render(self):
pass
def close(self):
self.env.close()
def seed(self):
pass
def save_replay(self):
"""Save a replay."""
pass
def get_env_info(self):
env_info = super().get_env_info()
env_info["n_agents"] = self.n_agents
env_info["n_enemies"] = self.n_enemies
return env_info | 12,325 | 38.633441 | 136 | py |
CADP | CADP-main/CADP-VD/src/components/episode_buffer.py | import torch as th
import numpy as np
from types import SimpleNamespace as SN
class EpisodeBatch:
def __init__(self,
scheme,
groups,
batch_size,
max_seq_length,
data=None,
preprocess=None,
device="cpu"):
self.scheme = scheme.copy()
self.groups = groups
self.batch_size = batch_size
self.max_seq_length = max_seq_length
self.preprocess = {} if preprocess is None else preprocess
self.device = device
if data is not None:
self.data = data
else:
self.data = SN()
self.data.transition_data = {}
self.data.episode_data = {}
self._setup_data(self.scheme, self.groups, batch_size, max_seq_length, self.preprocess)
def _setup_data(self, scheme, groups, batch_size, max_seq_length, preprocess):
if preprocess is not None:
for k in preprocess:
assert k in scheme
new_k = preprocess[k][0]
transforms = preprocess[k][1]
vshape = self.scheme[k]["vshape"]
dtype = self.scheme[k]["dtype"]
for transform in transforms:
vshape, dtype = transform.infer_output_info(vshape, dtype)
self.scheme[new_k] = {
"vshape": vshape,
"dtype": dtype
}
if "group" in self.scheme[k]:
self.scheme[new_k]["group"] = self.scheme[k]["group"]
if "episode_const" in self.scheme[k]:
self.scheme[new_k]["episode_const"] = self.scheme[k]["episode_const"]
assert "filled" not in scheme, '"filled" is a reserved key for masking.'
scheme.update({
"filled": {"vshape": (1,), "dtype": th.long},
})
for field_key, field_info in scheme.items():
assert "vshape" in field_info, "Scheme must define vshape for {}".format(field_key)
vshape = field_info["vshape"]
episode_const = field_info.get("episode_const", False)
group = field_info.get("group", None)
dtype = field_info.get("dtype", th.float32)
if isinstance(vshape, int):
vshape = (vshape,)
if group:
assert group in groups, "Group {} must have its number of members defined in _groups_".format(group)
shape = (groups[group], *vshape)
else:
shape = vshape
if episode_const:
self.data.episode_data[field_key] = th.zeros((batch_size, *shape), dtype=dtype, device=self.device)
else:
self.data.transition_data[field_key] = th.zeros((batch_size, max_seq_length, *shape), dtype=dtype, device=self.device)
def extend(self, scheme, groups=None):
self._setup_data(scheme, self.groups if groups is None else groups, self.batch_size, self.max_seq_length)
def to(self, device):
for k, v in self.data.transition_data.items():
self.data.transition_data[k] = v.to(device)
for k, v in self.data.episode_data.items():
self.data.episode_data[k] = v.to(device)
self.device = device
def update(self, data, bs=slice(None), ts=slice(None), mark_filled=True):
slices = self._parse_slices((bs, ts))
for k, v in data.items():
if k in self.data.transition_data:
target = self.data.transition_data
if mark_filled:
target["filled"][slices] = 1
mark_filled = False
_slices = slices
elif k in self.data.episode_data:
target = self.data.episode_data
_slices = slices[0]
else:
raise KeyError("{} not found in transition or episode data".format(k))
dtype = self.scheme[k].get("dtype", th.float32)
v = th.tensor(v, dtype=dtype, device=self.device)
self._check_safe_view(v, target[k][_slices])
target[k][_slices] = v.view_as(target[k][_slices])
if k in self.preprocess:
new_k = self.preprocess[k][0]
v = target[k][_slices]
for transform in self.preprocess[k][1]:
v = transform.transform(v)
target[new_k][_slices] = v.view_as(target[new_k][_slices])
def _check_safe_view(self, v, dest):
idx = len(v.shape) - 1
for s in dest.shape[::-1]:
if v.shape[idx] != s:
if s != 1:
raise ValueError("Unsafe reshape of {} to {}".format(v.shape, dest.shape))
else:
idx -= 1
def __getitem__(self, item):
if isinstance(item, str):
if item in self.data.episode_data:
return self.data.episode_data[item]
elif item in self.data.transition_data:
return self.data.transition_data[item]
else:
raise ValueError
elif isinstance(item, tuple) and all([isinstance(it, str) for it in item]):
new_data = self._new_data_sn()
for key in item:
if key in self.data.transition_data:
new_data.transition_data[key] = self.data.transition_data[key]
elif key in self.data.episode_data:
new_data.episode_data[key] = self.data.episode_data[key]
else:
raise KeyError("Unrecognised key {}".format(key))
# Update the scheme to only have the requested keys
new_scheme = {key: self.scheme[key] for key in item}
new_groups = {self.scheme[key]["group"]: self.groups[self.scheme[key]["group"]]
for key in item if "group" in self.scheme[key]}
ret = EpisodeBatch(new_scheme, new_groups, self.batch_size, self.max_seq_length, data=new_data, device=self.device)
return ret
else:
item = self._parse_slices(item)
new_data = self._new_data_sn()
for k, v in self.data.transition_data.items():
new_data.transition_data[k] = v[item]
for k, v in self.data.episode_data.items():
new_data.episode_data[k] = v[item[0]]
ret_bs = self._get_num_items(item[0], self.batch_size)
ret_max_t = self._get_num_items(item[1], self.max_seq_length)
ret = EpisodeBatch(self.scheme, self.groups, ret_bs, ret_max_t, data=new_data, device=self.device)
return ret
def _get_num_items(self, indexing_item, max_size):
if isinstance(indexing_item, list) or isinstance(indexing_item, np.ndarray):
return len(indexing_item)
elif isinstance(indexing_item, slice):
_range = indexing_item.indices(max_size)
return 1 + (_range[1] - _range[0] - 1)//_range[2]
def _new_data_sn(self):
new_data = SN()
new_data.transition_data = {}
new_data.episode_data = {}
return new_data
def _parse_slices(self, items):
parsed = []
# Only batch slice given, add full time slice
if (isinstance(items, slice) # slice a:b
or isinstance(items, int) # int i
or (isinstance(items, (list, np.ndarray, th.LongTensor, th.cuda.LongTensor))) # [a,b,c]
):
items = (items, slice(None))
# Need the time indexing to be contiguous
if isinstance(items[1], list):
raise IndexError("Indexing across Time must be contiguous")
for item in items:
#TODO: stronger checks to ensure only supported options get through
if isinstance(item, int):
# Convert single indices to slices
parsed.append(slice(item, item+1))
else:
# Leave slices and lists as is
parsed.append(item)
return parsed
def max_t_filled(self):
return th.sum(self.data.transition_data["filled"], 1).max(0)[0]
def __repr__(self):
return "EpisodeBatch. Batch Size:{} Max_seq_len:{} Keys:{} Groups:{}".format(self.batch_size,
self.max_seq_length,
self.scheme.keys(),
self.groups.keys())
class ReplayBuffer(EpisodeBatch):
def __init__(self, scheme, groups, buffer_size, max_seq_length, preprocess=None, device="cpu"):
super(ReplayBuffer, self).__init__(scheme, groups, buffer_size, max_seq_length, preprocess=preprocess, device=device)
self.buffer_size = buffer_size # same as self.batch_size but more explicit
self.buffer_index = 0
self.episodes_in_buffer = 0
def insert_episode_batch(self, ep_batch):
if self.buffer_index + ep_batch.batch_size <= self.buffer_size:
self.update(ep_batch.data.transition_data,
slice(self.buffer_index, self.buffer_index + ep_batch.batch_size),
slice(0, ep_batch.max_seq_length),
mark_filled=False)
self.update(ep_batch.data.episode_data,
slice(self.buffer_index, self.buffer_index + ep_batch.batch_size))
self.buffer_index = (self.buffer_index + ep_batch.batch_size)
self.episodes_in_buffer = max(self.episodes_in_buffer, self.buffer_index)
self.buffer_index = self.buffer_index % self.buffer_size
assert self.buffer_index < self.buffer_size
else:
buffer_left = self.buffer_size - self.buffer_index
self.insert_episode_batch(ep_batch[0:buffer_left, :])
self.insert_episode_batch(ep_batch[buffer_left:, :])
def can_sample(self, batch_size):
return self.episodes_in_buffer >= batch_size
def sample(self, batch_size):
assert self.can_sample(batch_size)
if self.episodes_in_buffer == batch_size:
return self[:batch_size]
else:
# Uniform sampling only atm
ep_ids = np.random.choice(self.episodes_in_buffer, batch_size, replace=False)
return self[ep_ids]
def __repr__(self):
return "ReplayBuffer. {}/{} episodes. Keys:{} Groups:{}".format(self.episodes_in_buffer,
self.buffer_size,
self.scheme.keys(),
self.groups.keys())
| 10,894 | 42.75502 | 134 | py |
CADP | CADP-main/CADP-VD/src/components/action_selectors.py | import torch as th
from torch.distributions import Categorical
from .epsilon_schedules import DecayThenFlatSchedule
REGISTRY = {}
class MultinomialActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
self.test_greedy = getattr(args, "test_greedy", True)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
masked_policies = agent_inputs.clone()
masked_policies[avail_actions == 0.0] = 0.0
self.epsilon = self.schedule.eval(t_env)
if test_mode and self.test_greedy:
picked_actions = masked_policies.max(dim=2)[1]
else:
picked_actions = Categorical(masked_policies).sample().long()
return picked_actions
REGISTRY["multinomial"] = MultinomialActionSelector
class EpsilonGreedyActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
# Assuming agent_inputs is a batch of Q-Values for each agent bav
self.epsilon = self.schedule.eval(t_env)
if test_mode:
# Greedy action selection only
self.epsilon = 0.0
# mask actions that are excluded from selection
masked_q_values = agent_inputs.clone()
masked_q_values[avail_actions == 0.0] = -float("inf") # should never be selected!
random_numbers = th.rand_like(agent_inputs[:, :, 0])
pick_random = (random_numbers < self.epsilon).long()
random_actions = Categorical(avail_actions.float()).sample().long()
picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]
return picked_actions
REGISTRY["epsilon_greedy"] = EpsilonGreedyActionSelector
| 2,225 | 32.727273 | 112 | py |
CADP | CADP-main/CADP-VD/src/components/transforms.py | import torch as th
class Transform:
def transform(self, tensor):
raise NotImplementedError
def infer_output_info(self, vshape_in, dtype_in):
raise NotImplementedError
class OneHot(Transform):
def __init__(self, out_dim):
self.out_dim = out_dim
def transform(self, tensor):
y_onehot = tensor.new(*tensor.shape[:-1], self.out_dim).zero_()
y_onehot.scatter_(-1, tensor.long(), 1)
return y_onehot.float()
def infer_output_info(self, vshape_in, dtype_in):
return (self.out_dim,), th.float32 | 568 | 24.863636 | 71 | py |
CADP | CADP-main/CADP-VD/src/runners/parallel_runner.py | from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
class ParallelRunner:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
# Make subprocesses for the envs
self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])
env_fn = env_REGISTRY[self.args.env]
self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args))))
for worker_conn in self.worker_conns]
for p in self.ps:
p.daemon = True
p.start()
self.parent_conns[0].send(("get_env_info", None))
self.env_info = self.parent_conns[0].recv()
self.episode_limit = self.env_info["episode_limit"]
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = -100000
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
def get_env_info(self):
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(("close", None))
def reset(self):
self.batch = self.new_batch()
# Reset the envs
for parent_conn in self.parent_conns:
parent_conn.send(("reset", None))
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Get the obs, state and avail_actions back
for parent_conn in self.parent_conns:
data = parent_conn.recv()
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
self.batch.update(pre_transition_data, ts=0)
self.t = 0
self.env_steps_this_run = 0
def run(self, test_mode=False):
self.reset()
all_terminated = False
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
terminated = [False for _ in range(self.batch_size)]
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
while True:
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch for each un-terminated env
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to("cpu").numpy()
# Update the actions taken
actions_chosen = {
"actions": actions.unsqueeze(1)
}
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Send actions to each env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if idx in envs_not_terminated: # We produced actions for this env
if not terminated[idx]: # Only send the actions to the env if it hasn't terminated
parent_conn.send(("step", cpu_actions[action_idx]))
action_idx += 1 # actions is not a list over every env
# Update envs_not_terminated
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
all_terminated = all(terminated)
if all_terminated:
break
# Post step data we will insert for the current timestep
post_transition_data = {
"reward": [],
"terminated": []
}
# Data for the next step we will insert in order to select an action
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Receive data back for each unterminated env
for idx, parent_conn in enumerate(self.parent_conns):
if not terminated[idx]:
data = parent_conn.recv()
# Remaining data for this current timestep
post_transition_data["reward"].append((data["reward"],))
episode_returns[idx] += data["reward"]
episode_lengths[idx] += 1
if not test_mode:
self.env_steps_this_run += 1
env_terminated = False
if data["terminated"]:
final_env_infos.append(data["info"])
if data["terminated"] and not data["info"].get("episode_limit", False):
env_terminated = True
terminated[idx] = data["terminated"]
post_transition_data["terminated"].append((env_terminated,))
# Data for the next timestep needed to select an action
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
# Add post_transiton data into the batch
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Move onto the next timestep
self.t += 1
# Add the pre-transition data
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if not test_mode:
self.t_env += self.env_steps_this_run
# Get stats back for each env
for parent_conn in self.parent_conns:
parent_conn.send(("get_stats",None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
infos = [cur_stats] + final_env_infos
cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})
cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0)
cur_returns.extend(episode_returns)
n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size
if test_mode and (len(self.test_returns) == n_test_runs):
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
def env_worker(remote, env_fn):
# Make environment
env = env_fn.x()
while True:
cmd, data = remote.recv()
if cmd == "step":
actions = data
# Take a step in the environment
reward, terminated, env_info = env.step(actions)
# Return the observations, avail_actions and state to make the next action
state = env.get_state()
avail_actions = env.get_avail_actions()
obs = env.get_obs()
remote.send({
# Data for the next timestep needed to pick an action
"state": state,
"avail_actions": avail_actions,
"obs": obs,
# Rest of the data for the current timestep
"reward": reward,
"terminated": terminated,
"info": env_info
})
elif cmd == "reset":
env.reset()
remote.send({
"state": env.get_state(),
"avail_actions": env.get_avail_actions(),
"obs": env.get_obs()
})
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_env_info":
remote.send(env.get_env_info())
elif cmd == "get_stats":
remote.send(env.get_stats())
else:
raise NotImplementedError
class CloudpickleWrapper():
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
| 10,322 | 37.518657 | 133 | py |
CADP | CADP-main/CADP-VD/src/controllers/basic_controller.py | from modules.agents import REGISTRY as agent_REGISTRY
from components.action_selectors import REGISTRY as action_REGISTRY
import torch as th
# This multi-agent controller shares parameters between agents
class BasicMAC:
def __init__(self, scheme, groups, args):
self.n_agents = args.n_agents
self.args = args
input_shape = self._get_input_shape(scheme)
self._build_agents(input_shape)
self.agent_output_type = args.agent_output_type
self.action_selector = action_REGISTRY[args.action_selector](args)
self.hidden_states = None
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
agent_outputs = self.forward(ep_batch, t_ep, test_mode=test_mode)
chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
return chosen_actions
def forward(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def init_hidden(self, batch_size):
self.hidden_states = self.agent.init_hidden().unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
def parameters(self):
return self.agent.parameters()
def load_state(self, other_mac):
self.agent.load_state_dict(other_mac.agent.state_dict())
def cuda(self):
self.agent.cuda()
def save_models(self, path):
th.save(self.agent.state_dict(), "{}/agent.th".format(path))
def load_models(self, path):
self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
def _build_agents(self, input_shape):
self.agent = agent_REGISTRY[self.args.agent](input_shape, self.args)
def _build_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs = []
inputs.append(batch["obs"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
return inputs
def _get_input_shape(self, scheme):
input_shape = scheme["obs"]["vshape"]
if self.args.obs_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape
| 4,411 | 42.254902 | 125 | py |
CADP | CADP-main/CADP-VD/src/controllers/n_controller.py | from modules.agents import REGISTRY as agent_REGISTRY
from components.action_selectors import REGISTRY as action_REGISTRY
from .basic_controller import BasicMAC
import torch as th
import numpy as np
# This multi-agent controller shares parameters between agents
class NMAC(BasicMAC):
def __init__(self, scheme, groups, args):
super(NMAC, self).__init__(scheme, groups, args)
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
qvals = self.forward(ep_batch, t_ep, test_mode=test_mode)
chosen_actions = self.action_selector.select_action(qvals[bs], avail_actions[bs], t_env, test_mode=test_mode)
return chosen_actions
def forward(self, ep_batch, t, test_mode=False):
if test_mode:
self.agent.eval()
agent_inputs = self._build_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, agent_inter, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
self.inter = agent_inter
return agent_outs | 1,211 | 43.888889 | 117 | py |
CADP | CADP-main/CADP-VD/src/utils/th_utils.py | import torch
import numpy as np
from torch import nn
def get_params_size(params_list):
params_size = sum([np.prod(list(p.size())) for p in params_list]) * 4 / 1024
return "{:.0f}KB".format(params_size)
def clip_by_tensor(t, t_min, t_max):
"""
clip_by_tensor
:param t: tensor
:param t_min: min
:param t_max: max
:return: cliped tensor
"""
t = t.float()
t_min = t_min.float()
t_max = t_max.float()
result = (t >= t_min).float() * t + (t < t_min).float() * t_min
result = (result <= t_max).float() * result + (result > t_max).float() * t_max
return result
def get_parameters_num(param_list):
return str(sum(p.numel() for p in param_list) / 1000) + 'K'
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def orthogonal_init_(m, gain=1):
if isinstance(m, nn.Linear):
init(m, nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0), gain=gain) | 1,031 | 24.8 | 82 | py |
CADP | CADP-main/CADP-VD/src/utils/rl_utils.py | import torch as th
def build_td_lambda_targets(rewards, terminated, mask, target_qs, n_agents, gamma, td_lambda):
# Assumes <target_qs > in B*T*A and <reward >, <terminated >, <mask > in (at least) B*T-1*1
# Initialise last lambda -return for not terminated episodes
ret = target_qs.new_zeros(*target_qs.shape)
ret[:, -1] = target_qs[:, -1] * (1 - th.sum(terminated, dim=1))
# Backwards recursive update of the "forward view"
for t in range(ret.shape[1] - 2, -1, -1):
ret[:, t] = td_lambda * gamma * ret[:, t + 1] + mask[:, t] \
* (rewards[:, t] + (1 - td_lambda) * gamma * target_qs[:, t + 1] * (1 - terminated[:, t]))
# Returns lambda-return from t=0 to t=T-1, i.e. in B*T-1*A
return ret[:, 0:-1]
| 774 | 47.4375 | 110 | py |
CADP | CADP-main/CADP-VD/src/learners/q_learner_teacher.py | import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.vdn import VDNMixer
from modules.mixers.qmix import QMixer
import torch as th
from torch.optim import RMSprop, Adam
import numpy as np
import torch.nn.functional as F
class QLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "vdn":
self.mixer = VDNMixer()
elif args.mixer == "qmix":
self.mixer = QMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
if self.args.optimizer == 'adam':
self.optimiser = Adam(params=self.params, lr=args.lr, weight_decay=getattr(args, "weight_decay", 0))
else:
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
# Calculate estimated Q-Values
mac_out = []
onehot_out = []
eps = 1e-8
eye = th.eye(self.args.n_agents).reshape(-1).to(self.args.device)
eye = th.cat([eye] * self.args.att_heads, dim=0)
self.mac.init_hidden(batch.batch_size)
att_out = []
for t in range(batch.max_seq_length):
agent_outs = self.mac.forward(batch, t=t)
att = self.mac.agent.att.dot
att = att.view(batch.batch_size,-1)
onehot_out.append(F.kl_div((att+eps).log(), eye, reduction='none').mean(dim=-1))
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
onehot_out = th.stack(onehot_out, dim=1)
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:])
chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach()) # target 网络不参与 反向传播更新
mask = mask.expand_as(td_error) #
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error ** 2).sum() / mask.sum()
if self.args.learner=="q_learner_teacher" and t_env>self.args.breakpoint:
onehot_out = onehot_out[:,:-1].unsqueeze(-1) * mask
loss = loss + self.args.alpha*onehot_out.sum()/mask.sum()
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm.item(), t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
| 6,785 | 41.679245 | 132 | py |
CADP | CADP-main/CADP-VD/src/learners/qplex_learner_teacher.py | import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.qplex import DMAQ_QattenMixer
import torch as th
import numpy as np
from torch.optim import RMSprop, Adam
from utils.th_utils import get_params_size
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
def entropy(x, dim=-1):
max_entropy = np.log(x.shape[dim])
x = (x+1e-8) / th.sum(x+1e-8, dim, keepdim=True)
return (-th.log(x)*x).sum(dim) / max_entropy
class DMAQ_qattenLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == 'dmaq_qatten':
self.mixer = DMAQ_QattenMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
if self.args.optimizer == 'adam':
self.optimiser = Adam(params=self.params, lr=args.lr)
else:
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
self.n_actions = self.args.n_actions
def sub_train(self, batch: EpisodeBatch, t_env: int, episode_num: int, mac, mixer, optimiser, params,
show_demo=False, save_data=None):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
actions_onehot = batch["actions_onehot"][:, :-1]
# Calculate estimated Q-Values
mac_out = []
mac_out = []
onehot_out = []
eps = 1e-8
eye = th.eye(self.args.n_agents).reshape(-1).to(self.args.device)
eye = th.cat([eye] * self.args.att_heads, dim=0)
self.mac.init_hidden(batch.batch_size)
att_out = []
for t in range(batch.max_seq_length):
agent_outs = mac.forward(batch, t=t)
att = self.mac.agent.att.dot
att = att.view(batch.batch_size,-1)
onehot_out.append(F.kl_div((att+eps).log(), eye, reduction='none').mean(dim=-1))
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
onehot_out = th.stack(onehot_out, dim=1)
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
x_mac_out = mac_out.clone().detach()
x_mac_out[avail_actions == 0] = -9999999
max_action_qvals, max_action_index = x_mac_out[:, :-1].max(dim=3)
max_action_index = max_action_index.detach().unsqueeze(3)
is_max_action = (max_action_index == actions).int().float()
if show_demo:
q_i_data = chosen_action_qvals.detach().cpu().numpy()
q_data = (max_action_qvals - chosen_action_qvals).detach().cpu().numpy()
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_chosen_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
target_max_qvals = target_mac_out.max(dim=3)[0]
target_next_actions = cur_max_actions.detach()
cur_max_actions_onehot = th.zeros(cur_max_actions.squeeze(3).shape + (self.n_actions,)).cuda()
cur_max_actions_onehot = cur_max_actions_onehot.scatter_(3, cur_max_actions, 1)
else:
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if mixer is not None:
if self.args.mixer == "dmaq_qatten":
ans_chosen, q_attend_regs, head_entropies = \
mixer(chosen_action_qvals, batch["state"][:, :-1], is_v=True)
ans_adv, _, _ = mixer(chosen_action_qvals, batch["state"][:, :-1], actions=actions_onehot,
max_q_i=max_action_qvals, is_v=False)
chosen_action_qvals = ans_chosen + ans_adv
else:
ans_chosen = mixer(chosen_action_qvals, batch["state"][:, :-1], is_v=True)
ans_adv = mixer(chosen_action_qvals, batch["state"][:, :-1], actions=actions_onehot,
max_q_i=max_action_qvals, is_v=False)
chosen_action_qvals = ans_chosen + ans_adv
if self.args.double_q:
if self.args.mixer == "dmaq_qatten":
target_chosen, _, _ = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:], is_v=True)
target_adv, _, _ = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:],
actions=cur_max_actions_onehot,
max_q_i=target_max_qvals, is_v=False)
target_max_qvals = target_chosen + target_adv
else:
target_chosen = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:], is_v=True)
target_adv = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:],
actions=cur_max_actions_onehot,
max_q_i=target_max_qvals, is_v=False)
target_max_qvals = target_chosen + target_adv
else:
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:], is_v=True)
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
if show_demo:
tot_q_data = chosen_action_qvals.detach().cpu().numpy()
tot_target = targets.detach().cpu().numpy()
print('action_pair_%d_%d' % (save_data[0], save_data[1]), np.squeeze(q_data[:, 0]),
np.squeeze(q_i_data[:, 0]), np.squeeze(tot_q_data[:, 0]), np.squeeze(tot_target[:, 0]))
self.logger.log_stat('action_pair_%d_%d' % (save_data[0], save_data[1]),
np.squeeze(tot_q_data[:, 0]), t_env)
return
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
if self.args.mixer == "dmaq_qatten":
loss = (masked_td_error ** 2).sum() / mask.sum() + q_attend_regs
else:
loss = (masked_td_error ** 2).sum() / mask.sum()
masked_hit_prob = th.mean(is_max_action, dim=2) * mask
hit_prob = masked_hit_prob.sum() / mask.sum()
# Optimise
if self.args.learner == "qplex_learner" and t_env > self.args.breakpoint:
onehot_out = onehot_out[:, :-1].unsqueeze(-1) * mask
loss = loss + self.args.alpha * onehot_out.sum() / mask.sum()
optimiser.zero_grad()
loss.backward(retain_graph=True)
grad_norm = th.nn.utils.clip_grad_norm_(params, self.args.grad_norm_clip)
optimiser.step()
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("train/loss", loss.item(), t_env)
self.logger.log_stat("train/hit_prob", hit_prob.item(), t_env)
self.logger.log_stat("train/grad_norm", grad_norm.item(), t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("train/td_error_abs", (masked_td_error.abs().sum().item() / mask_elems), t_env)
self.logger.log_stat("train/q_taken_mean", (chosen_action_qvals * mask).sum().item() / (mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("train/target_mean", (targets * mask).sum().item() / (mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int, show_demo=False, save_data=None):
self.sub_train(batch, t_env, episode_num, self.mac, self.mixer, self.optimiser, self.params,
show_demo=show_demo, save_data=save_data)
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
def sub_evaluate(self, batch: EpisodeBatch, t_env: int, episode_num: int, mac, mixer, optimiser, params,
show_demo=False, save_data=None):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
actions_onehot = batch["actions_onehot"][:, :-1]
# Calculate estimated Q-Values
mac_out = []
mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs = mac.forward(batch, t=t)
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
x_mac_out = mac_out.clone().detach()
x_mac_out[avail_actions == 0] = -9999999
max_action_qvals, max_action_index = x_mac_out[:, :-1].max(dim=3)
max_action_index = max_action_index.detach().unsqueeze(3)
is_max_action = (max_action_index == actions).int().float()
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_chosen_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
target_max_qvals = target_mac_out.max(dim=3)[0]
target_next_actions = cur_max_actions.detach()
cur_max_actions_onehot = th.zeros(cur_max_actions.squeeze(3).shape + (self.n_actions,)).cuda()
cur_max_actions_onehot = cur_max_actions_onehot.scatter_(3, cur_max_actions, 1)
else:
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
for i in range(chosen_action_qvals.shape[-2]):
# print("i=",i," agent_qvals= ",chosen_action_qvals[0][i])
draw.value.append(chosen_action_qvals[0][i])
draw.step.append(i)
if mixer is not None:
if self.args.mixer == "dmaq_qatten":
ans_chosen, q_attend_regs, head_entropies = \
mixer(chosen_action_qvals, batch["state"][:, :-1], is_v=True)
ans_adv, _, _ = mixer(chosen_action_qvals, batch["state"][:, :-1], actions=actions_onehot,
max_q_i=max_action_qvals, is_v=False)
chosen_action_qvals = ans_chosen + ans_adv
for t in range(batch.max_seq_length - 1):
td_error1 = (chosen_action_qvals[0][t][0])
loss1 = td_error1
# Optimise
self.optimiser.zero_grad()
loss1.backward(retain_graph=True)
k = copy.deepcopy(global_Grad.x.grad[0,t])
k = k.view(global_Grad.x.grad.shape[-1])
draw.value_grad.append(k)
En = th.stack(draw.value_grad)
grad_entropy = entropy(En).unsqueeze(-1) * mask
grad_entropy = grad_entropy.sum(dim=-1) # / mask.sum()
fl = 0
for i in range(batch.max_seq_length - 1):
if(mask[0][i][0]==0.0):
fl =i
break
grad_entropy = grad_entropy[0][:fl]
print("grad_entropy:", grad_entropy.mean())
draw.main(self.args)
return
else:
ans_chosen = mixer(chosen_action_qvals, batch["state"][:, :-1], is_v=True)
ans_adv = mixer(chosen_action_qvals, batch["state"][:, :-1], actions=actions_onehot,
max_q_i=max_action_qvals, is_v=False)
chosen_action_qvals = ans_chosen + ans_adv
if self.args.double_q:
if self.args.mixer == "dmaq_qatten":
target_chosen, _, _ = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:], is_v=True)
target_adv, _, _ = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:],
actions=cur_max_actions_onehot,
max_q_i=target_max_qvals, is_v=False)
target_max_qvals = target_chosen + target_adv
else:
target_chosen = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:], is_v=True)
target_adv = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:],
actions=cur_max_actions_onehot,
max_q_i=target_max_qvals, is_v=False)
target_max_qvals = target_chosen + target_adv
else:
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:], is_v=True)
def evaluate(self,batch: EpisodeBatch, t_env = None, episode_num = None, show_demo=False, save_data=None):
self.sub_evaluate(batch, t_env, episode_num, self.mac, self.mixer, self.optimiser, self.params,
show_demo=show_demo, save_data=save_data)
def output_grad(self,mac_out,batch):
actions = batch["actions"][:, :-1]
avail_actions = batch["avail_actions"]
actions_onehot = batch["actions_onehot"][:, :-1]
x_mac_out = mac_out.clone().detach()
x_mac_out[avail_actions == 0] = -9999999
max_action_qvals, max_action_index = x_mac_out[:, :-1].max(dim=3)
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
qvals = chosen_action_qvals
ans_chosen, q_attend_regs, head_entropies = \
self.mixer(qvals, batch["state"][:, :-1], is_v=True)
ans_adv, _, _ = self.mixer(qvals, batch["state"][:, :-1], actions=actions_onehot,
max_q_i=max_action_qvals, is_v=False)
chosen_action_qvals = ans_chosen + ans_adv
# for classifier
grad = th.autograd.grad(chosen_action_qvals.sum(), qvals, create_graph=True)[0]
return grad
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
#self.classifier.cuda()
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.target_mixer.load_state_dict(th.load("{}/mixer.th".format(path),
map_location=lambda storage, loc: storage))
| 19,168 | 48.5323 | 140 | py |
CADP | CADP-main/CADP-VD/src/learners/q_learner.py | import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.vdn import VDNMixer
from modules.mixers.qmix import QMixer
import torch as th
from torch.optim import RMSprop
class QLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "vdn":
self.mixer = VDNMixer()
elif args.mixer == "qmix":
self.mixer = QMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
# Calculate estimated Q-Values
mac_out = []
self.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs = self.mac.forward(batch, t=t)
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:])
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error ** 2).sum() / mask.sum()
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm.item(), t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
| 5,998 | 40.659722 | 132 | py |
lbox-open | lbox-open-main/lbox_open/pipeline/lbox_open_pipeline.py | # LBox Open
# Copyright (c) 2022-present LBox Co. Ltd.
# CC BY-NC 4.0
from pathlib import Path
import pytorch_lightning as pl
import torch
from lbox_open import openprompt_wrapper
from lbox_open.data_module.data_precedent import PrecedentDataModule
from lbox_open.model.generative_baseline_model import GenerativeParser
from lbox_open.template import prompt_generation_utils
from lbox_open.utils import general_utils as gu
def get_data_module(
cfg,
plm_tokenizer,
TokenizerWrapper,
input_templates,
):
if cfg.data.use_local_data:
raw_data = {
"train": gu.load_jsonl(cfg.data.path_train, None),
"valid": gu.load_jsonl(cfg.data.path_valid, None),
}
if cfg.data.path_test is not None:
raw_data["test"] = gu.load_jsonl(cfg.data.path_test, None)
else:
raw_data = None
if cfg.model.task in [
"ljp_civil",
"ljp_criminal",
"casename_classification",
"statute_classification",
"summarization",
]:
data_module = PrecedentDataModule(
cfg,
plm_tokenizer,
TokenizerWrapper,
input_templates,
raw_data,
)
else:
raise NotImplementedError
return data_module
def get_plm(cfg):
(
plm,
plm_tokenizer,
plm_model_config,
TokenizerWrapperClass,
) = openprompt_wrapper.load_plm_wrapper(
model_name=cfg.model.plm.name,
model_path=cfg.model.plm.path,
revision=cfg.model.plm.revision,
do_not_load_pretrained_weight=cfg.train.weight.do_not_load_pretrained_weight,
use_custom_loader=True,
)
return plm, plm_tokenizer, plm_model_config, TokenizerWrapperClass
def gen_input_templates(cfg, plm, plm_tokenizer):
input_templates = {}
for target_parse, target_sub_parses in cfg.model.target_parses_dict.items():
input_templates[target_parse] = prompt_generation_utils.gen_template(
cfg.model.task,
target_parse,
cfg.model.input_template_type,
plm,
plm_tokenizer,
)
return input_templates
def get_model(cfg, plm, plm_tokenizer, input_templates):
if cfg.model.model_type == "generative":
model = GenerativeParser(cfg, plm, plm_tokenizer, input_templates)
else:
raise NotImplementedError
if cfg.train.weight.trained:
path_load = Path(cfg.train.weight.path)
if cfg.model.task in [
"ljp_civil",
"ljp_criminal",
"casename_classification",
"statute_classification",
"summarization",
]:
ckpt = torch.load(path_load)
if "state_dict" in ckpt:
ckpt_state_dict = ckpt["state_dict"]
else:
ckpt_state_dict = ckpt
model.load_state_dict(ckpt_state_dict, strict=False)
else:
raise NotImplementedError
print(f"The model weights are loaded from {path_load}.")
return model
def get_trainer(cfg):
from pytorch_lightning import loggers as pl_loggers
tparam = cfg.train
mparam = cfg.model
log_dir = Path(cfg.train.log_dir) / cfg.name
tb_logger = pl_loggers.TensorBoardLogger(log_dir)
pl.utilities.seed.seed_everything(seed=cfg.train.seed, workers=False)
n_gpus = torch.cuda.device_count()
callbacks = [
pl.callbacks.ModelCheckpoint(
monitor=f"{cfg.train.validation_metric}_{cfg.train.validation_sub_param.method}",
dirpath=gu.get_model_saving_path(tparam.weight.save_path_dir, cfg.name),
save_top_k=1,
mode="max",
save_last=not True,
)
]
if tparam.optim.swa.use:
callbacks.append(
pl.callbacks.StochasticWeightAveraging(
swa_epoch_start=tparam.optim.swa.swa_epoch_start,
swa_lrs=tparam.optim.swa.lr,
annealing_epochs=tparam.optim.swa.annealing_epochs,
)
)
trainer = pl.Trainer(
logger=tb_logger,
accelerator=tparam.accelerator,
strategy=tparam.strategy,
max_epochs=tparam.max_epochs,
precision=mparam.precision if torch.cuda.is_available() else 32,
num_sanity_val_steps=tparam.num_sanity_val_steps,
gpus=n_gpus,
check_val_every_n_epoch=tparam.check_val_every_n_epoch,
gradient_clip_val=tparam.optim.gradient_clip_val,
gradient_clip_algorithm=tparam.optim.gradient_clip_algorithm,
accumulate_grad_batches=tparam.accumulate_grad_batches,
val_check_interval=tparam.val_check_interval,
profiler=tparam.profiler,
fast_dev_run=tparam.fast_dev_run,
callbacks=callbacks,
limit_train_batches=tparam.get("limit_train_batches", 1.0),
limit_val_batches=tparam.get("limit_val_batches", 1.0),
)
return trainer
def prepare_modules(mode, cfg):
# get pretrained language models
plm, plm_tokenizer, plm_model_config, TokenizerWrapperClass = get_plm(cfg)
# gen templates
input_templates = gen_input_templates(cfg, plm, plm_tokenizer)
# get data module
data_module = get_data_module(
cfg, plm_tokenizer, TokenizerWrapperClass, input_templates
)
# get model
model = get_model(cfg, plm, plm_tokenizer, input_templates)
# get trainer
trainer = get_trainer(cfg)
return data_module, model, trainer
| 5,491 | 28.212766 | 93 | py |
lbox-open | lbox-open-main/lbox_open/openprompt_wrapper/pipeline_base.py | # Wonseok add PromptForGenerationCustom by copying and tweak OpenPrompt-v1.0.0 PromptForGeneration class.
# We modify two things: (1) L343--L345 for the compatibility with transformesr 4.19.4, and
# (2) recover "confidences" which was available in the initial version of OpenPrompt
from copy import deepcopy
from typing import Any, Dict, Optional, Union
import numpy as np
import torch
from openprompt.data_utils import InputFeatures
from openprompt.pipeline_base import PromptForGeneration, PromptModel
from openprompt.prompt_base import Template, Verbalizer
from openprompt.utils import round_list, signature
from openprompt.utils.logging import logger
from torch import nn
from transformers.generation_utils import GenerationMixin
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.utils.dummy_pt_objects import PreTrainedModel
from yacs.config import CfgNode
class PromptForGenerationCustom(torch.nn.Module, GenerationMixin):
r"""``PromptModel`` with generation loss caculation and generation utils integrated.
Args:
plm (:obj:`PretrainedModel`): A pre-traiend model you decide to use for generation, e.g. GPT.
template (:obj:`Template`): A ``Template`` object you use to wrap the input text for classification, e.g. ``PrefixTemplate``.
tokenizer (:obj:`Tokenizer`): A ``Tokenizer`` of the current model.
gen_config (:obj:`CfgNode`): The generation configs to pass into `GenerationMixin.generate <https://huggingface.co/transformers/_modules/transformers/generation_utils.html#GenerationMixin.generate>`_
freeze_plm (:obj:`bool`): whether or not to freeze the pretrained language model
plm_eval_mode (:obj:`bool`): this is a stronger freezing mode than freeze_plm, i.e. the dropout of the model is turned off. No matter whether the other part is set to train.
"""
def __init__(
self,
plm: PreTrainedModel,
template: Template,
freeze_plm: bool = False,
plm_eval_mode: bool = False,
gen_config: Optional[CfgNode] = None,
tokenizer: Optional[PreTrainedTokenizer] = None,
):
super().__init__()
self.freeze_plm = freeze_plm
if tokenizer is None:
assert (
template.tokenizer is not None
), "Tokenizer can't be set from input args or template"
self.tokenizer = template.tokenizer
else:
self.tokenizer = tokenizer
self.prompt_model = PromptModel(plm, template, freeze_plm, plm_eval_mode)
self.loss_fct = nn.CrossEntropyLoss(reduction="none")
self.config = plm.config
if gen_config:
for key in gen_config:
setattr(self.config, key, gen_config[key])
self.in_generation_function = False
self.main_input_name = (
self.prompt_model.main_input_name
) # for transformers 4.17.0 and higher.
@property
def plm(self):
return self.prompt_model.plm
@property
def template(self):
return self.prompt_model.template
@property
def device(self):
return self.plm.device
def shift_logits_and_labels(self, logits, loss_ids, reference_ids):
r"""
Left shift the label, and make label of the positions that are
not loss position to -100, which is the ignore index in pytorch's
loss function.
Args:
logits (:obj:`torch.Tensor`):
batch (:obj:`InputFeatures`): The input features of batchified data sequences.
Returns:
shift_logits (:obj:`torch.Tensor`):
shift_input_ids (:obj:`List[int]`):
"""
shift_logits = logits[..., :-1, :].contiguous()
shift_loss_ids = loss_ids[..., 1:].contiguous()
shift_input_ids = reference_ids[..., 1:].contiguous()
shift_input_ids = torch.where(shift_loss_ids > 0, shift_input_ids, -100)
return shift_logits, shift_input_ids
def forward(self, *args, **kwargs):
r"""In generation process, it will use the plm's forward function.
This is because, in the first step we will directly call the process_batch function to
generate initial input with the template, after that the all template
have been processed into the past_key_value,
then we can use the normal generation function.
In learning process, the forward is linked to ``_forward`` functions.
in which the loss will be calculated for all the positions in the same time.
"""
if self.in_generation_function:
return self.plm.forward(*args, **kwargs)
else:
return self._forward(*args, **kwargs)
def _forward(self, batch: Union[Dict, InputFeatures]) -> torch.Tensor:
r"""
This is the forward method of the training of generation in prompt-learning framework.
Args:
batch (:obj:`Union[Dict, InputFeatures]`): The input features of batchified data sequences.
Returns:
loss(:obj:torch.Tensor): The loss of the current generation procedure.
"""
if self.config.is_encoder_decoder:
reference_ids = batch["decoder_input_ids"]
else:
reference_ids = batch[
"input_ids"
] # in case in some template, these field is dropped
outputs = self.prompt_model(batch)
logits = outputs.logits
logits, labels = self.shift_logits_and_labels(
logits, batch["loss_ids"], reference_ids
)
batch_size, seq_len, vocab_size = logits.shape
loss = self.loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
loss = loss.view(batch_size, -1).sum(dim=-1) # TODO support more objectives
loss = loss.mean()
return loss
def generate(
self,
batch: Union[Dict, InputFeatures],
verbose: Optional[bool] = False,
**generation_kwargs,
):
r"""This function wraps the generate() methods in parent class ``GenerationMixin``.
Forward uses the ``PretrainedModel``'s forward method.
generation_kwargs include all the parameters that are passed in to
``transformers.generation_util.GenerationMixin.generate``
Args:
batch (:obj:`Union[Dict, InputFeatures]`): The input features of batchified data sequences.
verbose (:obj:`Optional[bool]`): Set to true to verbose the generated sentence.
Returns:
output_sequences (:obj:`List[torch.Tensor]`): The raw sequences generated by the generation model.
generated_sentences (:obj:`List[torch.Tensor]`): The generated sentences that have been post-processed.
"""
input_generation_kwargs = {
key: value
for key, value in generation_kwargs.items()
if key in signature(GenerationMixin.generate).args
}
if self.config.is_encoder_decoder:
loss_ids_start = batch["loss_ids"].argmax(dim=-1)
assert (
loss_ids_start.min() == loss_ids_start.max()
), "The generation start from different position in a batch."
batch["decoder_input_ids"] = batch["decoder_input_ids"][
:, : loss_ids_start.min() + 1
]
input_length = batch["decoder_input_ids"].size(1)
batch_size = batch["decoder_input_ids"].size(0)
self.generate_ith_token = 0
self.in_generation_function = True
output_dict = super().generate(
**batch,
**input_generation_kwargs,
pad_token_id=self.tokenizer.pad_token_id,
eos_token_id=self.tokenizer.eos_token_id,
output_scores=True,
return_dict_in_generate=True,
)
output_sequences = output_dict["sequences"]
output_scores = output_dict[
"scores"
] # (L tuples, (B batches, N tokens)). each tuple = (B,
self.in_generation_function = False
output_sequences = output_sequences.cpu().tolist()
generated_sentences, confidences = self.post_processing_with_confidence(
output_sequences=output_sequences,
input_lengths=input_length,
output_scores=output_scores,
)
# output_sequences = super().generate(**batch, **input_generation_kwargs, pad_token_id=self.tokenizer.pad_token_id, eos_token_id=self.tokenizer.eos_token_id)
# self.in_generation_function = False
# output_sequences = output_sequences.cpu().tolist()
# generated_sentences = self.post_processing(output_sequences=output_sequences, input_lengths=input_length)
else:
input_length = batch["input_ids"].size(1)
batch_size = batch["input_ids"].size(0)
# Currently huggingface transformers only support single sample generation, or padding to the left (instead of the right).
# because it will only extract the last position of the output
# generate one_by_one
if "input_ids_len" in batch:
input_real_lens = batch["input_ids_len"]
else:
input_real_lens = torch.sum(
(batch["input_ids"] != self.tokenizer.pad_token_id).to(torch.int),
dim=-1,
)
output_sequences = []
output_scores = []
for instance_id in range(batch_size):
# remove the pad token
instance = {
key: batch[key][instance_id : instance_id + 1][
:, : input_real_lens[instance_id]
]
for key in batch
if isinstance(batch[key], torch.Tensor)
and batch[key].shape[:2] == torch.Size([batch_size, input_length])
}
self.generate_ith_token = 0
self.in_generation_function = True
output_dict = super().generate(
**instance,
**input_generation_kwargs,
pad_token_id=self.tokenizer.pad_token_id,
eos_token_id=self.tokenizer.eos_token_id,
output_scores=True,
return_dict_in_generate=True,
)
output_sequence = output_dict["sequences"]
self.in_generation_function = False
output_sequences.extend(
output_sequence.cpu().tolist()
) # TODO: to support generate multiple sentence
output_score = output_dict["scores"]
output_scores.append(output_score)
generated_sentences, confidences = self.post_processing_with_confidence(
output_sequences=output_sequences,
input_lengths=input_real_lens.cpu().tolist(),
output_scores=output_scores,
)
# for instance_id in range(batch_size):
# # remove the pad token
# instance = {key: batch[key][instance_id:instance_id+1][:,:input_real_lens[instance_id]] for key in batch if isinstance(batch[key], torch.Tensor) and batch[key].shape[:2]==torch.Size([batch_size, input_length])}
# self.generate_ith_token = 0
# self.in_generation_function = True
# output_sequence = super().generate(**instance, **input_generation_kwargs, pad_token_id=self.tokenizer.pad_token_id, eos_token_id=self.tokenizer.eos_token_id)
# self.in_generation_function = False
# output_sequences.extend(output_sequence.cpu().tolist()) # TODO: to support generate multiple sentence
# generated_sentences = self.post_processing(output_sequences=output_sequences, input_lengths=input_real_lens.cpu().tolist())
if verbose:
logger.info(f"Generated:{generated_sentences}")
return output_sequences, generated_sentences, confidences
def post_processing(self, output_sequences, input_lengths):
r"""
Post-process the sequences generated by the generation model.
Args:
output_sequences (:obj:`torch.Tensor`): The raw sequences generated by the generation model.
input_lengths (:obj:`int` or `list`): The length(s) of the input sequence.
Returns:
:obj:`List`: The generated sentences that have been post-processed.
"""
generated_sentences = []
if type(input_lengths) == int:
input_lengths = [input_lengths] * len(output_sequences)
for sent_id, seq in enumerate(output_sequences):
seq = seq[input_lengths[sent_id] :]
if (
hasattr(self.tokenizer, "eos_token")
and self.tokenizer.eos_token is not None
):
text_output = self.tokenizer.decode(
seq, clean_up_tokenization_spaces=True, skip_special_tokens=False
)
idx = text_output.find(self.tokenizer.eos_token)
if idx >= 0:
text_output = text_output[:idx]
else:
text_output = self.tokenizer.decode(
seq, clean_up_tokenization_spaces=True, skip_special_tokens=True
)
text_output = text_output.strip()
generated_sentences.append(text_output)
return generated_sentences
def prepare_inputs_for_generation(
self, input_ids: Optional[torch.Tensor] = None, **model_kwargs
):
r"""This function wraps the ``prepare_inputs_for_generation`` function in the huggingface transformers.
When the `past` not in model_kwargs, we prepare the input from scratch.
When `past` is in model_kwargs, we don't need to prepare the template wrapped input,
instead we use the inner pretrain_models' function to prepare the next step's input.
`model_kwargs` includes all the argument passed in the `batch`: InputFeatures, except ``input_ids``
, as long as they do not conflict with keywords in ``generation_kwargs``. if 'past' not in model_kwargs: # the past_key_value not in model_kwargs, then we need to prepare input from scrath
, as long as they do not conflict with keywords in ``generation_kwargs``.
Args:
input_ids(:obj:`torch.Tensor`): Indices of input sequence tokens in the vocabulary.
"""
if (
self.generate_ith_token == 0 and "encoder_outputs" not in model_kwargs
): # generating the first token in decoder only setting.
batch = InputFeatures(input_ids=input_ids, **model_kwargs)
model_inputs = self.prompt_model.prepare_model_inputs(batch)
# check the compatibility for more models. Having checked gpt2, T5
else: # generating the subsequence generation can use the default setting
model_inputs = self.plm.prepare_inputs_for_generation(
input_ids, **model_kwargs
)
self.last_model_inputs = model_inputs # to update the model_kwargs in _update_model_kwargs_for_generation, in-place operation.
return model_inputs
def _update_model_kwargs_for_generation(
self, outputs, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False
) -> Dict[str, Any]:
r"""The parents class's ``_update_model_kwargs_for_generation`` method will
add ``past_key_values`` to model_kwargs, and update ``token_type_ids``, and ``attention_mask_ids``.
In case some of the model_kwargs are modified in the prepare_inputs_for_generation function
and should be used as the subsequent model_kwargs, we upate these kwargs before the parent class
call.
Other updates should be added here after the parent's function call.
Args:
outputs (:obj:`torch.Tensor`):
is_encoder_decoder (:obj:`bool`, defaults to False):
"""
if self.generate_ith_token == 0:
for key in self.last_model_inputs:
if key in model_kwargs:
model_kwargs[key] = self.last_model_inputs[key]
model_kwargs = super(
PromptForGeneration, PromptForGeneration
)._update_model_kwargs_for_generation(
outputs=outputs,
model_kwargs=model_kwargs,
is_encoder_decoder=is_encoder_decoder,
)
self.generate_ith_token += 1
return model_kwargs
def _prepare_encoder_decoder_kwargs_for_generation(
self,
input_ids: torch.LongTensor,
model_kwargs,
model_input_name: Optional[str] = None,
) -> Dict[str, Any]:
r"""This function resemble the function in GeneraionMix
Args:
input_ids (:obj:`torch.LongTensor`) The input ids for
"""
if "encoder_outputs" not in model_kwargs:
# retrieve encoder hidden states
encoder = self.plm.get_encoder()
encoder_kwargs = {
argument: value
for argument, value in model_kwargs.items()
if not (
argument.startswith("decoder_") or argument.startswith("cross_attn")
)
}
model_input_name = (
model_input_name
if model_input_name is not None
else self.main_input_name
)
batch = {model_input_name: input_ids, **encoder_kwargs}
model_inputs = self.prompt_model.prepare_model_inputs(
batch
) # This line differs from the orinigal code base, we should process the input
# with our template, then pass it into the model.
# some of the arguments may have been changed by the template,
# e.g. the attention mask. Here we update the model_kwargs
for key in model_kwargs:
if key in model_inputs:
model_kwargs[key] = model_inputs[key]
model_inputs_with_use_cache_false = deepcopy(model_inputs)
model_inputs_with_use_cache_false["use_cache"] = False
model_kwargs["encoder_outputs"] = encoder(
return_dict=True, **model_inputs_with_use_cache_false
)
return model_kwargs
## We comment this code since it conflict with [OpenDelta](https://github.com/thunlp/OpenDelta)
# def state_dict(self, *args, **kwargs):
# """ Save the model using template and plm's save methods. """
# _state_dict = {}
# if not self.prompt_model.freeze_plm:
# _state_dict['plm'] = self.plm.state_dict(*args, **kwargs)
# _state_dict['template'] = self.template.state_dict(*args, **kwargs)
# return _state_dict
# def load_state_dict(self, state_dict, *args, **kwargs):
# """ Load the model using template and plm's load methods. """
# if 'plm' in state_dict and not self.prompt_model.freeze_plm:
# self.plm.load_state_dict(state_dict['plm'], *args, **kwargs)
# self.template.load_state_dict(state_dict['template'], *args, **kwargs)
def _reorder_cache(self, past, beam_idx):
r"""Use the plm's default _reorder_cache function"""
return self.plm._reorder_cache(past, beam_idx)
def parallelize(self, device_map=None):
r"""Parallelize the model across device"""
if hasattr(self.plm, "parallelize"):
self.plm.parallelize(device_map)
self.device_map = self.plm.device_map
else:
raise NotImplementedError(
"parallelize method was not implemented for this plm."
)
def deparallelize(self):
r"""Deparallelize the model across device"""
if hasattr(self.plm, "deparallelize"):
self.plm.deparallelize()
self.device_map = None
else:
raise NotImplementedError(
"parallelize method was not implemented for this plm."
)
def post_processing_with_confidence(
self, output_sequences, input_lengths, output_scores
):
r"""
Post-process the sequences generated by the generation model.
Args:
output_sequences (:obj:`torch.Tensor`): The raw sequences generated by the generation model.
input_lengths (:obj:`int` or `list`): The length(s) of the input sequence.
Returns:
:obj:`List`: The generated sentences that have been post-processed.
"""
generated_sentences = []
if type(input_lengths) == int:
input_lengths = [input_lengths] * len(output_sequences)
confidences = []
confidences_list = []
for sent_id, seq in enumerate(output_sequences):
seq = seq[input_lengths[sent_id] :]
if self.config.is_encoder_decoder:
# [T, B, Ntoken]
assert len(seq) == len(
output_scores
) # (T, B, Ntoken), T is a length of sequence.
else:
# [B, T, Ntoken]
assert len(seq) == len(output_scores[sent_id])
text_output = self.tokenizer.decode(seq, clean_up_tokenization_spaces=True)
idx = text_output.find(self.tokenizer.eos_token)
if idx >= 0:
text_output = text_output[:idx]
text_output = text_output.strip()
generated_sentences.append(text_output)
if self.tokenizer.eos_token_id in seq:
idx_token = seq.index(self.tokenizer.eos_token_id)
else:
idx_token = -1
if idx_token >= 0:
seq_trimmed = seq[:idx_token]
else:
seq_trimmed = seq
confidence_list = []
for i_tok, tok_id in enumerate(seq_trimmed):
if self.config.is_encoder_decoder:
# [T, B, Ntoken]
scores = output_scores[i_tok] # [B, Ntok]
prob = scores[sent_id, :].softmax(-1)
else:
# [B, T, Ntoken]
scores = output_scores[sent_id] # [L, Ntok]
prob = scores[i_tok].softmax(-1)[0]
confidence_list.append(prob[tok_id].item())
confidences_list.append(confidence_list)
confidences.append(np.mean(confidence_list))
return generated_sentences, confidences
| 22,706 | 44.143141 | 228 | py |
lbox-open | lbox-open-main/lbox_open/utils/general_utils.py | # LBox Open
# Copyright (c) 2022-present LBox Co. Ltd.
# CC BY-NC 4.0
import json
import os
import pickle
import subprocess
import time
from pathlib import Path
from tqdm import tqdm
def stop_flag(idx, toy_size):
# idx + 1 = length
data_size = idx + 1
if toy_size is not None:
if toy_size <= data_size:
return True
else:
return False
def save_pkl(path_save, data):
with open(path_save, "wb") as f:
pickle.dump(data, f)
def load_pkl(path_load):
with open(path_load, "rb") as f:
data = pickle.load(f)
return data
def save_json(path_save, data):
with open(path_save, "w") as f:
json.dump(data, f, ensure_ascii=False)
def load_json(fpath):
with open(fpath) as f:
return json.load(f)
def save_jsonl(path_save, data):
with open(path_save, "w") as f:
for t1 in data:
f.writelines(json.dumps(t1, ensure_ascii=False))
f.writelines("\n")
def load_jsonl(fpath, toy_size=None):
data = []
with open(fpath) as f:
for i, line in tqdm(enumerate(f)):
try:
data1 = json.loads(line)
except:
print(f"{i}th sample failed.")
print(f"We will wkip this!")
print(line)
data1 = None
if data1 is not None:
data.append(data1)
if stop_flag(i, toy_size):
break
return data
def my_timeit(func):
def wrapped_func(*args, **kwargs):
st = time.time()
results = func(*args, **kwargs)
ed = time.time()
print(f"func {func.__name__} taks {ed - st} sec.")
return results
return wrapped_func
def flatten_list(list_):
out = []
for x in list_:
if isinstance(x, list):
out += flatten_list(x)
else:
out += [x]
return out
def load_cfg(path_cfg):
import munch
import yaml
with open(path_cfg) as f:
cfg = yaml.full_load(f)
cfg = munch.munchify(cfg)
cfg.name = path_cfg.__str__().split("/")[-1]
return cfg
def get_model_saving_path(save_dir, cfg_name):
return Path(save_dir) / cfg_name
def download_url(path_save, url):
p = subprocess.Popen(["wget", "-q", "-O", path_save.__str__(), url])
sts = os.waitpid(p.pid, 0)
def get_local_rank():
"""
Pytorch lightning save local rank to environment variable "LOCAL_RANK".
From rank_zero_only
"""
local_rank = int(os.environ.get("LOCAL_RANK", 0))
return local_rank
| 2,569 | 20.239669 | 75 | py |
lbox-open | lbox-open-main/lbox_open/model/model_optimizer.py | # LBox Open
# Copyright (c) 2022-present LBox Co. Ltd.
# CC BY-NC 4.0
import torch
import transformers
map_optimizers_name_to_type = {
"sgd": torch.optim.SGD,
"adam": torch.optim.Adam,
"adamw": torch.optim.AdamW,
}
def get_optimizer(mparam, tparam, model):
# todo: plm training part
_lr_type, lr_param = get_lr_type_and_param(tparam, "prompt")
# prompt
optimizer_type = map_optimizers_name_to_type[tparam.optim.prompt.optimizer_type]
if model.task in [
"ljp_civil",
"ljp_criminal",
"casename_classification",
"statute_classification",
"summarization",
]:
optimizer_grouped_parameters = []
if not mparam.plm.freeze:
optimizer_grouped_parameters.append(
{
"params": list(
filter(lambda p: p.requires_grad, model.plm.parameters())
),
"lr": tparam.optim.plm.lr,
}
)
for target_parse, _target_sub_parses in model.target_parses_dict.items():
optimizer_grouped_parameters.append(
{
"params": [
p
for n, p in model.prompt_models[
target_parse
].template.named_parameters()
if "raw_embedding" not in n
]
}
)
optimizer = optimizer_type(
optimizer_grouped_parameters, lr=tparam.optim.prompt.lr, weight_decay=0
)
else:
raise NotImplementedError
return optimizer
def get_lr_type_and_param(tparam, key):
lr_type = tparam.optim[key].lr_scheduler_type
lr_param = tparam.optim[key].lr_scheduler_param[lr_type]
return lr_type, lr_param
def gen_lr_scheduler(tparam, optimizer, lr_type, lr_param):
if lr_type == "constant":
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=[lambda epoch: 1, lambda epoch: 1], verbose=True
)
elif lr_type == "multi_step_lr":
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=lr_param["milestones"],
gamma=lr_param["gamma"],
verbose=True,
)
elif lr_type == "warmup_constant":
lr_scheduler = transformers.get_constant_schedule_with_warmup(
optimizer, num_warmup_steps=lr_param.num_warmup_steps
)
elif lr_type == "cos_with_hard_restarts":
lr_scheduler = transformers.get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer,
num_warmup_steps=lr_param.num_warmup_steps,
num_training_steps=lr_param.num_training_steps,
num_cycles=lr_param.num_cycles,
)
elif lr_type == "linear":
lr_scheduler = transformers.get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=lr_param.num_warmup_steps,
num_training_steps=tparam.max_epochs,
)
else:
raise NotImplementedError
return lr_scheduler
def get_lr_dict(optimizer, tparam, key):
lr_type, lr_param = get_lr_type_and_param(tparam, key)
lr_scheduler = gen_lr_scheduler(tparam, optimizer, lr_type, lr_param)
lr_dict = {
"scheduler": lr_scheduler,
"interval": "epoch",
"frequency": 1,
"monitor": "val_loss",
"strict": True,
"name": None,
}
return lr_dict
| 3,533 | 28.949153 | 87 | py |
lbox-open | lbox-open-main/lbox_open/model/generative_baseline_model.py | # LBox Open
# Copyright (c) 2022-present LBox Co. Ltd.
# CC BY-NC 4.0
import os
from collections import defaultdict
from itertools import zip_longest
from pathlib import Path
from pprint import pprint
import datasets
import pytorch_lightning as pl
import torch
from openprompt.utils.metrics import generation_metric
from transformers.generation_utils import GenerationMixin
from rouge_score import rouge_scorer
import numpy as np
import lbox_open.utils.general_utils as gu
from lbox_open import openprompt_wrapper
from lbox_open.model.model_optimizer import get_lr_dict, get_optimizer
from lbox_open.parser.output_parser_utils import (
cal_em_from_parses,
get_parses_from_eval_results,
)
from lbox_open.metric import rouge_metric_utils
class GenerativeParser(pl.LightningModule, GenerationMixin):
def __init__(self, cfg, plm, plm_tokenizer, input_templates):
super().__init__()
self.task = cfg.model.task
self.mparam = cfg.model
self.tparam = cfg.train
self.iparam = cfg.infer
self.cfg_name = cfg.name
self.target_parses_dict = cfg.model.target_parses_dict
self.prompt_models = {}
self.plm = plm
for target_parse, target_sub_parses in cfg.model.target_parses_dict.items():
# keep them for just in case we tune plm
prompt_model = openprompt_wrapper.PromptForGenerationCustom(
plm=plm,
template=input_templates[target_parse],
freeze_plm=cfg.model.plm.freeze,
tokenizer=plm_tokenizer,
plm_eval_mode=cfg.model.plm.eval_mode,
)
self.prompt_models[target_parse] = prompt_model
self.prompt_models = torch.nn.ModuleDict(self.prompt_models)
# if self.plm.config.is_encoder_decoder:
self.generation_arguments = {
"max_length": cfg.infer.max_length,
"max_new_tokens": cfg.infer.get("max_new_tokens", None),
"min_length": cfg.infer.min_length,
"temperature": cfg.infer.temperature,
"do_sample": cfg.infer.do_sample,
"top_k": cfg.infer.top_k,
"top_p": cfg.infer.top_p,
"repetition_penalty": cfg.infer.repetition_penalty,
"num_beams": cfg.infer.num_beams,
"bad_words_ids": cfg.infer.bad_words_ids,
"use_cache": True,
}
if plm.config.is_encoder_decoder:
# remove max_new_tokens
print(f"The model is of is_encoder_decoder. Thus we remove max new tokens.")
self.generation_arguments.pop("max_new_tokens")
else:
if cfg.infer.get("max_new_tokens", None):
print(
f"Max length in generation option shall be ignored as max_new_tokens presents."
)
self.generation_arguments["max_length"] = None
self.rouge_scorer = rouge_scorer.RougeScorer(
["rouge1", "rouge2", "rougeL"], tokenizer=rouge_metric_utils.WhiteSpaceTokenizer()
)
def forward(self, target_parse, batch):
loss = self.prompt_models[target_parse](batch[target_parse])
return loss
def training_step(self, batch, batch_idx):
n_keys = len(self.target_parses_dict)
loss = 0
for i_target, (target_parse, _) in enumerate(self.target_parses_dict.items()):
loss += self.forward(target_parse, batch)
return {"loss": loss / n_keys}
def training_epoch_end(self, outputs):
loss_all = torch.stack(self.gather_loss(outputs))
ave_loss = torch.mean(loss_all)
self.log("training__ave_loss", ave_loss)
def gather_loss(self, outputs):
loss_all = []
for output in outputs:
loss_all.append(output["loss"])
return loss_all
def validation_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def validation_epoch_end(self, outputs):
(
eval_score,
doc_ids_all,
pr_texts_all,
gt_texts_all,
confidences_all,
) = self._eval_epoch_end(outputs)
print("\nValidation!-----------------------------------------")
pprint(eval_score)
pprint(f"GT: {gt_texts_all[self.tparam.validation_target_parse][0:2]}")
pprint(f"PR: {pr_texts_all[self.tparam.validation_target_parse][0:2]}")
if self.tparam.validation_metric in ["sentence_bleu"]:
validation_score = eval_score[self.tparam.validation_target_parse]
elif self.tparam.validation_metric in ["rougeL"]:
validation_score = eval_score[self.tparam.validation_target_parse]
elif self.tparam.validation_metric in ["em"]:
if self.tparam.validation_sub_param.method == "single_parse":
sub_parse_name = self.tparam.validation_sub_param.target_sub_parse
validation_score = eval_score[self.tparam.validation_target_parse][
"f1"
][sub_parse_name]
elif self.tparam.validation_sub_param.method == "average":
validation_score = 0
cnt = 0
for sub_parse_name, score in eval_score[
self.tparam.validation_target_parse
]["f1"].items():
validation_score += score
cnt += 1
validation_score /= cnt
elif self.tparam.validation_sub_param.method == "text_em":
validation_score = eval_score[self.tparam.validation_target_parse][
"text_em"
]
else:
raise ValueError
for sub_parse_name, score in eval_score[
self.tparam.validation_target_parse
]["f1"].items():
self.log(sub_parse_name, score)
self.log(
f"{self.tparam.validation_target_parse}_text_em",
eval_score[self.tparam.validation_target_parse]["text_em"],
)
else:
raise ValueError
self.log(
f"{self.tparam.validation_metric}_{self.tparam.validation_sub_param.method}",
validation_score,
)
def test_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def test_epoch_end(self, outputs):
output_save_dir = (
Path(self.tparam.weight.path).parent / "analysis" / self.cfg_name
)
os.makedirs(output_save_dir, exist_ok=True)
(
eval_score,
doc_ids_all,
pr_texts_all,
gt_texts_all,
confidences_all,
) = self._eval_epoch_end(
outputs, save=True, output_save_dir=output_save_dir, verbose=True
)
print("Test!-----------------------------------------------")
print(eval_score)
output_save_path_eval_score = output_save_dir / "eval_score.json"
gu.save_json(output_save_path_eval_score, eval_score)
eval_result = {
"doc_ids": doc_ids_all,
"pr_texts": pr_texts_all,
"gt_texts": gt_texts_all,
}
output_save_path_eval_result = output_save_dir / "eval_result.json"
gu.save_json(output_save_path_eval_result, eval_result)
output_save_path_confidences = output_save_dir / "confidences.json"
gu.save_json(output_save_path_confidences, confidences_all)
# add doc_ids to confidences_all
confidences_all_with_doc_ids = {}
for key_target_parse, confidences in confidences_all.items():
c_with_ids = [
(doc_id, c)
for doc_id, c in zip_longest(doc_ids_all[key_target_parse], confidences)
]
confidences_all_with_doc_ids[key_target_parse] = c_with_ids
output_save_path_confidences_with_doc_ids = (
output_save_dir / "confidences_with_doc_ids.json"
)
gu.save_json(
output_save_path_confidences_with_doc_ids, confidences_all_with_doc_ids
)
def _eval_step(self, batch, batch_idx):
out = defaultdict(dict)
for target_parse, _ in self.target_parses_dict.items():
_prs, _gts, confidences = self.evaluate(target_parse, batch)
# add confidences as a saved output.
out[target_parse]["pr_texts"] = _prs
out[target_parse]["gt_texts"] = _gts
out[target_parse]["doc_ids"] = batch[target_parse]["guid"]
out[target_parse]["confidences"] = confidences
return out
def _eval_epoch_end(self, outputs, save=False, output_save_dir=None, verbose=False):
# outputs = [list of each step outputs]
pr_texts_all = self.gather_step_outputs("pr_texts", outputs)
gt_texts_all = self.gather_step_outputs("gt_texts", outputs)
doc_ids_all = self.gather_step_outputs("doc_ids", outputs)
confidences_all = self.gather_step_outputs("confidences", outputs)
eval_score = self.cal_score(
doc_ids_all,
pr_texts_all,
gt_texts_all,
save=save,
output_save_dir=output_save_dir,
confidences=confidences_all,
threshold=0.0,
verbose=False,
)
return eval_score, doc_ids_all, pr_texts_all, gt_texts_all, confidences_all
def cal_score(
self,
doc_ids_all,
pr_texts_all,
gt_texts_all,
save=False,
output_save_dir=None,
confidences=None,
threshold=0.0,
verbose=False,
input_texts=None,
):
if self.tparam.validation_metric == "sentence_bleu":
eval_score = {}
for target_parse, _ in self.target_parses_dict.items():
groundtruth_sentence = gt_texts_all[target_parse]
generated_sentence = pr_texts_all[target_parse]
eval_score[target_parse] = generation_metric(
generated_sentence, groundtruth_sentence, "sentence_bleu"
)
elif self.tparam.validation_metric == "rougeL":
eval_score = {}
for target_parse, _ in self.target_parses_dict.items():
pr_texts = pr_texts_all[target_parse]
gt_texts = gt_texts_all[target_parse]
target_scores = []
for pr_text, gt_text in zip_longest(pr_texts, gt_texts):
r_score = self.rouge_scorer.score(
prediction=pr_text, target=gt_text
)
target_scores.append(
r_score[self.tparam.validation_metric].fmeasure
)
eval_score[target_parse] = np.mean(
target_scores
)
print(eval_score)
elif self.tparam.validation_metric == "em":
# EM score
parses = get_parses_from_eval_results(
self.iparam,
self.target_parses_dict,
doc_ids_all,
gt_texts_all,
pr_texts_all,
)
# analysis
eval_score = cal_em_from_parses(
self.iparam,
self.target_parses_dict,
parses,
verbose=verbose,
save=save,
output_save_dir=output_save_dir,
input_texts=input_texts,
confidences=confidences,
threshold=threshold,
)
# text exact matching
for target_parse, target_sub_parses in self.target_parses_dict.items():
gt_texts = gt_texts_all[target_parse]
pr_texts = pr_texts_all[target_parse]
corrects = [str(x) == str(y) for x, y in zip(gt_texts, pr_texts)]
text_em_score = sum(corrects) / len(corrects)
eval_score[target_parse]["text_em"] = text_em_score
else:
raise ValueError
return eval_score
def gather_step_outputs(self, key, outputs):
outputs_all = defaultdict(list)
for target_parse, _ in self.target_parses_dict.items():
for output in outputs:
outputs_all[target_parse] += output[target_parse][key]
return outputs_all
def configure_optimizers(self):
optimizer = get_optimizer(self.mparam, self.tparam, self)
lr_dict = get_lr_dict(optimizer, self.tparam, "prompt")
return {"optimizer": optimizer, "lr_scheduler": lr_dict}
def evaluate(self, target_parse, batch):
generated_sentence = []
groundtruth_sentence = []
seqs, output_sentence, confidences = self.prompt_models[target_parse].generate(
batch[target_parse], **self.generation_arguments
)
generated_sentence.extend(output_sentence)
groundtruth_sentence.extend(batch[target_parse]["tgt_text"])
return generated_sentence, groundtruth_sentence, confidences
| 13,092 | 35.985876 | 99 | py |
lbox-open | lbox-open-main/lbox_open/data_module/data_precedent.py | # LBox Open
# Copyright (c) 2022-present LBox Co. Ltd.
# CC BY-NC 4.0
import datasets
import pytorch_lightning as pl
from openprompt import PromptDataLoader
from pytorch_lightning.trainer.supporters import CombinedLoader
from lbox_open import openprompt_wrapper
from lbox_open.template import prompt_generation_utils
class PrecedentData(object):
def __init__(self, cfg, mode, target_parse, target_sub_parses, raw_data):
assert mode in ["train", "valid", "test", "predict"]
self.cfg = cfg
self.mode = mode
self.target_parse = target_parse
self.label_key = self._get_label_key(target_parse)
self.target_sub_parses = target_sub_parses
self.data_aug_param = cfg.train.get("data_aug_param", None)
self.doc_id_key = self._get_doc_id(cfg.model.task)
if raw_data is not None:
self.features = self._gen_input_features(raw_data)
def __getitem__(self, idx):
return self.features[idx]
def get_text_a(self, raw_data1):
if isinstance(self.cfg.model.target_field, list):
text_a = ""
if self.cfg.model.task == "ljp_civil":
for i, k in enumerate(self.cfg.model.target_field):
if k == "facts":
text_a += f"사실관계: {raw_data1[k]}\n"
elif k == "claim":
text_a += f"청구취지: {raw_data1[k]['text']}\n"
else:
raise NotImplementedError
text_a = text_a.strip()
else:
for i, k in enumerate(self.cfg.model.target_field):
text_a += f"{raw_data1[k]}\n"
text_a = text_a.strip()
else:
text_a = raw_data1[self.cfg.model.target_field]
return text_a
def _get_label_key(self, target_parse):
if target_parse in ["claim_acceptance_lv"]:
label_key = "claim_acceptance_lv"
elif target_parse in ["casename_classification"]:
label_key = "casename"
elif target_parse in ["statute_classification"]:
label_key = "statutes"
elif target_parse in ["summarization"]:
label_key = "summary"
else:
label_key = "label"
return label_key
def _gen_input_features(self, raw_data):
features = []
for i, raw_data1 in enumerate(raw_data):
try:
text_a = self.get_text_a(raw_data1)
if self.label_key in raw_data1:
tgt_text = prompt_generation_utils.gen_output_template(
self.cfg.model.task,
self.target_parse,
self.target_sub_parses,
raw_data1[self.label_key],
self.cfg.infer.parse_sep_token,
)
else:
assert self.mode == "predict"
tgt_text = "This is a dummy text."
feature = openprompt_wrapper.InputExampleWrapper(
text_a=text_a,
text_b="",
tgt_text=tgt_text,
guid=str(raw_data1[self.doc_id_key]),
)
except Exception as e:
print(f"doc_id: {self.doc_id_key}")
print(repr(e))
raise e
features.append(feature)
return features
def __len__(self):
if self.mode != "predict":
return len(self.features)
else:
return 0
def __iter__(self):
self.features.__iter__()
def _get_doc_id(self, task):
if task in [
"ljp_civil",
"ljp_criminal",
"casename_classification",
"statute_classification",
"summarization",
]:
doc_id_key = "id"
else:
raise NotImplementedError
return doc_id_key
class PrecedentDataModule(pl.LightningDataModule):
def __init__(
self, cfg, plm_tokenizer, TokenizerWrapper, input_templates, raw_data=None
):
super().__init__()
self.cfg = cfg
self.task = cfg.model.task
self.raw_data = raw_data
self.plm_tokenizer = plm_tokenizer
self.TokenizerWrapperClass = TokenizerWrapper
self.data_ts = {}
self.data_vs = {}
self.data_es = {}
self.input_templates = input_templates
self.target_parses_dict = cfg.model.target_parses_dict
if len(self.target_parses_dict) > 1:
raise Exception("Multitask learning is currently not supported!")
self.use_local_data = cfg.data.use_local_data
self.dataset_card = cfg.data.dataset_card
self.training_set_name = cfg.data.training_set_name
self.validation_set_name = cfg.data.validation_set_name
self.test_set_name = cfg.data.test_set_name
def setup(self, stage):
if not self.use_local_data:
assert self.raw_data is None
self.raw_data = datasets.load_dataset(self.dataset_card, self.task)
# Assign train/val datasets for use in dataloaders
if stage in ["fit", "test"] or stage is None:
for target_parse, target_sub_parses in self.target_parses_dict.items():
self.data_ts[target_parse] = PrecedentData(
self.cfg,
"train",
target_parse,
target_sub_parses,
self.raw_data[self.training_set_name],
).features
self.data_vs[target_parse] = PrecedentData(
self.cfg,
"valid",
target_parse,
target_sub_parses,
self.raw_data[self.validation_set_name],
).features
if "test" in self.raw_data:
self.data_es[target_parse] = PrecedentData(
self.cfg,
"test",
target_parse,
target_sub_parses,
self.raw_data[self.test_set_name],
).features
def train_dataloader(self):
data_loaders = {}
for target_parse, target_sub_parses in self.target_parses_dict.items():
data_loaders[target_parse] = PromptDataLoader(
dataset=self.data_ts[target_parse],
template=self.input_templates[target_parse],
tokenizer=self.plm_tokenizer,
tokenizer_wrapper_class=self.TokenizerWrapperClass,
max_seq_length=self.cfg.model.max_seq_length,
decoder_max_length=self.cfg.model.decoder_max_length,
batch_size=self.cfg.train.batch_size,
shuffle=True,
teacher_forcing=True,
predict_eos_token=True,
truncate_method="head",
).dataloader
return data_loaders
def val_dataloader(self):
data_loaders = {}
for target_parse, target_sub_parses in self.target_parses_dict.items():
data_loaders[target_parse] = PromptDataLoader(
dataset=self.data_vs[target_parse],
template=self.input_templates[target_parse],
tokenizer=self.plm_tokenizer,
tokenizer_wrapper_class=self.TokenizerWrapperClass,
max_seq_length=self.cfg.model.max_seq_length,
decoder_max_length=self.cfg.model.decoder_max_length,
batch_size=self.cfg.train.batch_size_prediction,
shuffle=False,
teacher_forcing=False,
predict_eos_token=True,
truncate_method="head",
).dataloader
data_loaders = CombinedLoader(data_loaders)
return data_loaders
def test_dataloader(self):
data_loaders = {}
for target_parse, target_sub_parses in self.target_parses_dict.items():
data_loaders[target_parse] = PromptDataLoader(
dataset=self.data_es[target_parse],
template=self.input_templates[target_parse],
tokenizer=self.plm_tokenizer,
tokenizer_wrapper_class=self.TokenizerWrapperClass,
max_seq_length=self.cfg.model.max_seq_length,
decoder_max_length=self.cfg.model.decoder_max_length,
batch_size=self.cfg.train.batch_size_prediction,
shuffle=False,
teacher_forcing=False,
predict_eos_token=True,
truncate_method="head",
).dataloader
data_loaders = CombinedLoader(data_loaders)
return data_loaders
| 8,799 | 35.514523 | 83 | py |
x-transformers | x-transformers-main/setup.py | from setuptools import setup, find_packages
setup(
name = 'x-transformers',
packages = find_packages(exclude=['examples']),
version = '1.16.21',
license='MIT',
description = 'X-Transformers - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/x-transformers',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'torch>=1.6',
'einops>=0.6.1'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 803 | 25.8 | 65 | py |
x-transformers | x-transformers-main/examples/enwik8_simple/train_nar.py | from x_transformers import (
TransformerWrapper,
Encoder,
NonAutoregressiveWrapper
)
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e8)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 250
SEQ_LEN = 256
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
model = TransformerWrapper(
num_tokens = 256 + 1,
logits_dim = 256,
max_seq_len = SEQ_LEN,
attn_layers = Encoder(
dim = 512,
depth = 8,
heads = 8,
dynamic_pos_bias = True
)
)
model = NonAutoregressiveWrapper(
model,
steps = 18,
schedule = 'cosine',
mask_id = 256, # mask id is last token, which is why num_tokens above has a +1 (special token)
self_token_critic = True
)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader)).loss
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
val_data = next(val_loader)
loss = model(val_data).loss
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
sample = model.generate()
output_str = decode_tokens(sample)
print(output_str)
| 2,980 | 24.478632 | 112 | py |
x-transformers | x-transformers-main/examples/enwik8_simple/train.py | from x_transformers import TransformerWrapper, Decoder
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = TransformerWrapper(
num_tokens = 256,
max_seq_len = SEQ_LEN,
attn_layers = Decoder(dim = 512, depth = 6, heads = 8)
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE, drop_last = True))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE, drop_last = True))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| 2,942 | 26.504673 | 91 | py |
x-transformers | x-transformers-main/examples/toy_tasks/enc_dec_copy.py | import tqdm
import torch
import torch.optim as optim
from x_transformers import XTransformer
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 32
LEARNING_RATE = 3e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64 + 1
# helpers
def cycle():
while True:
prefix = torch.ones((BATCH_SIZE, 1)).long().cuda()
src = torch.randint(2, NUM_TOKENS, (BATCH_SIZE, ENC_SEQ_LEN)).long().cuda()
tgt = torch.cat((prefix, src, src), 1)
src_mask = torch.ones(BATCH_SIZE, src.shape[1]).bool().cuda()
yield (src, tgt, src_mask)
# instantiate model
model = XTransformer(
dim = 512,
tie_token_emb = True,
return_tgt_loss = True,
enc_num_tokens=NUM_TOKENS,
enc_depth = 3,
enc_heads = 8,
enc_max_seq_len = ENC_SEQ_LEN,
dec_num_tokens = NUM_TOKENS,
dec_depth = 3,
dec_heads = 8,
dec_max_seq_len = DEC_SEQ_LEN
).cuda()
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
src, tgt, src_mask = next(cycle())
loss = model(src, tgt, mask=src_mask)
loss.backward()
print(f'{i}: {loss.item()}')
optim.step()
optim.zero_grad()
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
src, _, src_mask = next(cycle())
src, src_mask = src[:1], src_mask[:1]
start_tokens = (torch.ones((1, 1)) * 1).long().cuda()
sample = model.generate(src, start_tokens, ENC_SEQ_LEN, mask = src_mask)
incorrects = (src != sample).abs().sum()
print(f"input: ", src)
print(f"predicted output: ", sample)
print(f"incorrects: {incorrects}")
| 1,739 | 23.166667 | 83 | py |
x-transformers | x-transformers-main/x_transformers/x_transformers.py | import math
from random import random
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from functools import partial, wraps
from inspect import isfunction
from collections import namedtuple
from dataclasses import dataclass
from typing import List
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from x_transformers.attend import Attend, Intermediates, CascadingHeads
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
# constants
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: List[Tensor] = None
attn_intermediates: List[Intermediates] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert (dim % 2) == 0
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class LearnedAlibiPositionalBias(AlibiPositionalBias):
def __init__(self, heads, total_heads):
super().__init__(heads, total_heads)
log_slopes = torch.log(self.slopes)
self.learned_logslopes = nn.Parameter(log_slopes)
def forward(self, i, j):
h, device = self.heads, self.device
def get_slopes(param):
return pad_at_dim(param.exp(), (0, h - param.shape[0]), dim = -2)
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
bias = self.bias[..., :i, :j]
else:
bias = self.get_bias(i, j, device)
self.register_buffer('bias', bias, persistent = False)
slopes = get_slopes(self.learned_logslopes)
bias = bias * slopes
return bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.
):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
scale_fn = lambda t: t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(self, dim_in, dim_out, activation):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate)
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
) if not glu else GLU(dim, inner_dim, activation)
self.ff = nn.Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else nn.Identity(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
value_dim_head = default(value_dim_head, dim_head)
q_dim = k_dim = dim_head * heads
v_dim = out_dim = value_dim_head * heads
self.one_kv_head = one_kv_head
if one_kv_head:
k_dim = dim_head
v_dim = value_dim_head
out_dim = v_dim * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
flash = flash,
onnxable = onnxable
)
if cascading_heads:
# cascading heads - wrap the Attend logic
self.attend = CascadingHeads(self.attend)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if not self.one_kv_head:
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
scale = self.qk_norm_scale
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = default(context_mask, mask)
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
mask_value = max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
alibi_learned = False,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
alibi_pos_klass = LearnedAlibiPositionalBias if alibi_learned else AlibiPositionalBias
self.rel_pos = alibi_pos_klass(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
norm_class = RMSNorm if use_rmsnorm else norm_class
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
is_last = ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
dropout = 0.,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class TransformerWrapper(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1. # GLM-130B and Cogview successfully used this, set at 0.1
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class ContinuousTransformerWrapper(nn.Module):
def __init__(
self,
*,
max_seq_len,
attn_layers,
dim_in = None,
dim_out = None,
emb_dim = None,
max_mem_len = 0,
post_emb_norm = False,
emb_dropout = 0.,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity()
self.attn_layers = attn_layers
self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity()
def forward(
self,
x,
return_embeddings = False,
return_intermediates = False,
return_mems = False,
mask = None,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
**kwargs
):
x = self.project_in(x)
x = x + self.pos_emb(x, pos = pos)
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
_, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
x = self.emb_dropout(x)
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
out = self.project_out(x) if not return_embeddings else x
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), hiddens))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class XTransformer(nn.Module):
def __init__(
self,
*,
dim,
tie_token_emb = False,
ignore_index = -100,
pad_value = 0,
deepnorm = False,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
enc_kwargs, kwargs = groupby_prefix_and_trim('enc_', kwargs)
dec_kwargs, kwargs = groupby_prefix_and_trim('dec_', kwargs)
assert 'dim' not in enc_kwargs and 'dim' not in dec_kwargs, 'dimension of either encoder or decoder must be set with `dim` keyword'
enc_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], enc_kwargs)
enc_transformer_kwargs['emb_dropout'] = enc_kwargs.pop('emb_dropout', 0)
enc_transformer_kwargs['num_memory_tokens'] = enc_kwargs.pop('num_memory_tokens', None)
enc_transformer_kwargs['scaled_sinu_pos_emb'] = enc_kwargs.pop('scaled_sinu_pos_emb', False)
enc_transformer_kwargs['use_abs_pos_emb'] = enc_kwargs.pop('use_abs_pos_emb', True)
dec_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], dec_kwargs)
dec_transformer_kwargs['emb_dropout'] = dec_kwargs.pop('emb_dropout', 0)
dec_transformer_kwargs['scaled_sinu_pos_emb'] = dec_kwargs.pop('scaled_sinu_pos_emb', False)
dec_transformer_kwargs['use_abs_pos_emb'] = dec_kwargs.pop('use_abs_pos_emb', True)
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # how many tokens from the encoder to dropout when cross attending from decoder - seen in a couple papers, including Perceiver AR - this will also be very effective regularization when cross attending to very long memories
if deepnorm:
enc_kwargs['scale_residual'] = True
dec_kwargs['scale_residual'] = True
enc_depth = enc_kwargs['depth']
dec_depth = dec_kwargs['depth']
enc_kwargs['scale_residual_constant'] = 0.81 * ((enc_depth ** 4) * dec_depth) ** .0625
dec_kwargs['scale_residual_constant'] = (3 * dec_depth) ** 0.25
self.encoder = TransformerWrapper(
**enc_transformer_kwargs,
attn_layers = Encoder(dim = dim, **enc_kwargs)
)
self.decoder = TransformerWrapper(
**dec_transformer_kwargs,
attn_layers = Decoder(dim = dim, cross_attend = True, **dec_kwargs)
)
if deepnorm:
deepnorm_init(self.encoder, 0.87 * ((enc_depth ** 4) * dec_depth) ** -0.0625)
deepnorm_init(self.decoder, (12 * dec_depth) ** -0.25)
if tie_token_emb:
self.decoder.token_emb = self.encoder.token_emb
self.decoder = AutoregressiveWrapper(self.decoder, ignore_index=ignore_index, pad_value=pad_value)
@torch.no_grad()
def generate(self, seq_in, seq_out_start, seq_len, mask = None, attn_mask = None, **kwargs):
encodings = self.encoder(seq_in, mask = mask, attn_mask = attn_mask, return_embeddings = True)
return self.decoder.generate(seq_out_start, seq_len, context = encodings, context_mask = mask, **kwargs)
def forward(self, src, tgt, mask = None, attn_mask = None, src_prepend_embeds = None):
if exists(src_prepend_embeds) and exists(mask):
mask = pad_at_dim(mask, (src_prepend_embeds.shape[-2], 0), dim = -1, value = True)
enc = self.encoder(src, mask = mask, attn_mask = attn_mask, prepend_embeds = src_prepend_embeds, return_embeddings = True)
if self.training and self.cross_attn_tokens_dropout > 0:
enc, mask = dropout_seq(enc, mask, self.cross_attn_tokens_dropout)
out = self.decoder(tgt, context = enc, context_mask = mask)
return out
| 54,584 | 33.635152 | 290 | py |
x-transformers | x-transformers-main/x_transformers/attend.py | from functools import partial
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Tensor = None
pre_softmax_attn: Tensor = None
post_softmax_attn: Tensor = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
pre_softmax_attn = dots.clone()
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates
| 11,319 | 30.887324 | 163 | py |
x-transformers | x-transformers-main/x_transformers/nonautoregressive_wrapper.py | import math
from random import random
from contextlib import nullcontext
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange, repeat, pack, unpack
from x_transformers.x_transformers import TransformerWrapper
from typing import Optional
# constants
Losses = namedtuple('Losses', ['loss', 'generator_loss', 'critic_loss'])
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# sampling helpers
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = logits.topk(k, dim = -1)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(2, ind, val)
return probs
def log(t, eps = 1e-10):
return torch.log(t + eps)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
# prob helpers
def sample_prob(prob):
return random() < prob
def coin_flip():
return sample_prob(0.5)
# tensor helpers
def get_mask_subset_prob(mask, prob, min_mask = 0):
batch, seq, device = *mask.shape, mask.device
num_to_mask = (mask.sum(dim = -1, keepdim = True) * prob).clamp(min = min_mask)
logits = torch.rand((batch, seq), device = device)
logits = logits.masked_fill(~mask, -1)
randperm = logits.argsort(dim = -1).float()
num_padding = (~mask).sum(dim = -1, keepdim = True)
randperm -= num_padding
subset_mask = randperm < num_to_mask
subset_mask.masked_fill_(~mask, False)
return subset_mask
# schedules
def linear_schedule(t):
return 1 - t
def cosine_schedule(t):
""" https://arxiv.org/abs/2202.04200 """
return torch.cos(t * math.pi / 2)
# self token critic
# inspired by Nijkamp et al. - https://aclanthology.org/2021.naacl-main.409/
class SelfCritic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
dim = net.attn_layers.dim
self.to_logits = nn.Linear(dim, 1)
def forward(self, x):
embed = self.net(x, return_embeddings = True)
return self.to_logits(embed)
class NonAutoregressiveWrapper(nn.Module):
"""
https://arxiv.org/abs/1904.09324
https://arxiv.org/abs/2202.04200
"""
def __init__(
self,
net,
*,
mask_id,
steps = 18,
self_cond = False,
self_cond_train_prob = 0.75,
no_replace_prob = 0.15, # which percentage of the tokens masked will stay the same, done in original MLM paper
random_token_prob = 0.1, # which percentage of tokens to be replaced with random token, done in original MLM paper
schedule = 'linear',
can_mask_prev_unmasked = False, # when unmasking, whether it can remask previously unmasked
token_critic: Optional[TransformerWrapper] = None,
self_token_critic = False,
critic_loss_weight = 1.
):
super().__init__()
assert not (self_token_critic and exists(token_critic))
self.net = net
dim = net.emb_dim
self.dim = dim
self.num_tokens = net.num_tokens
self.mask_id = mask_id
# afaict, maskgit paper did not do this
# but may help for self conditioning, as used successfully in original BERT
self.no_replace_prob = no_replace_prob
self.random_token_prob = random_token_prob
self.max_seq_len = net.max_seq_len
self.steps = steps
if callable(schedule):
self.schedule_fn = schedule
if schedule == 'linear':
self.schedule_fn = linear_schedule
elif schedule == 'cosine':
self.schedule_fn = cosine_schedule
else:
raise ValueError(f'invalid schedule {schedule}')
self.can_mask_prev_unmasked = can_mask_prev_unmasked
# self conditioning
self.self_cond = self_cond
if self_cond:
self.null_embed = nn.Parameter(torch.randn(dim))
self.to_self_cond = nn.Linear(dim, dim, bias = False) if self_cond else None
self.self_cond_train_prob = self_cond_train_prob
# token critic
self.token_critic = token_critic
if self_token_critic:
self.token_critic = SelfCritic(net)
self.critic_loss_weight = critic_loss_weight
@torch.no_grad()
def generate(
self,
batch_size = None,
start_temperature = 1.,
filter_thres = 0.7,
noise_level_scale = 1.,
**kwargs
):
sample_one = not exists(batch_size)
batch_size = default(batch_size, 1)
device = next(self.net.parameters()).device
was_training = self.training
self.eval()
times = torch.linspace(0., 1., self.steps + 1)
# sequence starts off as all masked
shape = (batch_size, self.max_seq_len)
seq = torch.full(shape, self.mask_id, device = device)
mask = torch.full(shape, True, device = device)
# slowly demask
all_mask_num_tokens = (self.schedule_fn(times[1:]) * self.max_seq_len).long()
# self conditioning
has_self_cond = self.self_cond
last_embed = self.null_embed if has_self_cond else None
for mask_num_tokens, steps_until_x0 in zip(all_mask_num_tokens.tolist(), reversed(range(self.steps))):
self_cond = self.to_self_cond(last_embed) if has_self_cond else None
logits, embeds = self.net(
seq,
sum_embeds = self_cond,
return_logits_and_embeddings = True,
**kwargs
)
if has_self_cond:
last_embed = embeds
if exists(filter_thres):
logits = top_k(logits, filter_thres)
annealing_scale = steps_until_x0 / self.steps
temperature = start_temperature * annealing_scale
probs = (logits / max(temperature, 1e-3)).softmax(dim = -1)
sampled_ids = gumbel_sample(logits, temperature = max(temperature, 1e-3))
seq = torch.where(mask, sampled_ids, seq)
if exists(self.token_critic):
scores = self.token_critic(seq)
scores = rearrange(scores, 'b n 1 -> b n')
scores = scores + noise_level_scale * gumbel_noise(scores) * annealing_scale
else:
scores = 1 - logits.softmax(dim = -1)
scores = scores.gather(2, rearrange(sampled_ids, 'b n -> b n 1'))
scores = rearrange(scores, 'b n 1 -> b n')
if mask_num_tokens == 0:
pass
if not self.can_mask_prev_unmasked:
scores = scores.masked_fill(~mask, -torch.finfo(scores.dtype).max)
mask_indices = scores.topk(mask_num_tokens, dim = -1).indices
mask = torch.zeros_like(scores, dtype = torch.bool).scatter(1, mask_indices, True)
seq = seq.masked_fill(mask, self.mask_id)
self.train(was_training)
if sample_one:
seq = rearrange(seq, '1 n -> n')
return seq
def forward(
self,
x,
only_train_generator = False,
only_train_critic = False,
generator_sample_temperature = None,
**kwargs
):
b, n, device = *x.shape, x.device
assert n == self.max_seq_len
orig_seq = x.clone()
rand_times = torch.empty(b, device = device).uniform_(0, 1)
batched_randperm = torch.rand((b, n), device = device).argsort(dim = -1).float()
rand_probs = self.schedule_fn(rand_times)
num_tokens_mask = (rand_probs * n).clamp(min = 1.)
mask = batched_randperm < rearrange(num_tokens_mask, 'b -> b 1')
# to ensure all tokens produce embeddings, instead of just the ones with [mask] input, as done in seminal BERT MLM paper
# potentially needed for self-conditioning (on embedding) to work well
replace_mask_id_mask = mask.clone()
frac_seq_left = 1.
if self.no_replace_prob > 0. and coin_flip():
frac_seq_left -= self.no_replace_prob
no_replace_prob_mask = get_mask_subset_prob(mask, self.no_replace_prob)
replace_mask_id_mask &= ~no_replace_prob_mask
if self.random_token_prob > 0. and coin_flip():
random_token_prob_mask = get_mask_subset_prob(replace_mask_id_mask, self.random_token_prob * frac_seq_left)
random_tokens = torch.randint(0, self.num_tokens, (b, n), device = device)
x = torch.where(random_token_prob_mask, random_tokens, x)
replace_mask_id_mask &= ~random_token_prob_mask
masked = torch.where(replace_mask_id_mask, self.mask_id, x)
# self conditioning
if self.self_cond:
self_cond = self.null_embed
if sample_prob(self.self_cond_train_prob):
with torch.no_grad():
self_cond = self.net(masked, return_embeddings = True, **kwargs).detach()
kwargs.update(sum_embeds = self.to_self_cond(self_cond))
# logits
context = torch.no_grad if only_train_critic else nullcontext
with context():
logits = self.net(masked, **kwargs)
# cross entropy loss
loss = F.cross_entropy(
logits[mask],
orig_seq[mask]
)
if not exists(self.token_critic) or only_train_generator:
return Losses(loss, loss, None)
sampled_ids = gumbel_sample(logits, temperature = default(generator_sample_temperature, random()))
generated = torch.where(mask, sampled_ids, orig_seq)
critic_logits = self.token_critic(generated)
critic_labels = (sampled_ids != orig_seq).float()
critic_loss = F.binary_cross_entropy_with_logits(
rearrange(critic_logits, '... 1 -> ...'),
critic_labels
)
# determine losses to be returned based on what researcher wants to train
if only_train_critic:
total_loss = critic_loss
loss = None
else:
total_loss = loss + critic_loss * self.critic_loss_weight
return Losses(total_loss, loss, critic_loss)
| 10,414 | 29.453216 | 130 | py |
x-transformers | x-transformers-main/x_transformers/autoregressive_wrapper.py | from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
device = start_tokens.device
num_dims = start_tokens.ndim
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
return loss
| 4,446 | 28.646667 | 151 | py |
x-transformers | x-transformers-main/x_transformers/xl_autoregressive_wrapper.py | from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
from x_transformers.autoregressive_wrapper import top_p, top_k, eval_decorator
# helper functions
def exists(val):
return val is not None
def divisible_by(numer, denom):
return (numer % denom) == 0
# xl autoregressive wrapper class
class XLAutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
mems = None,
**kwargs
):
device, max_seq_len = start_tokens.device, self.max_seq_len
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
*all_leading_tokens, _ = start_tokens.split(max_seq_len, dim = -1)
# catch the memory up to the current segment
for leading_tokens in all_leading_tokens:
_, mems = self.net(
leading_tokens,
mems = mems,
return_mems = True,
**kwargs
)
# now start sampling from the current segment
curr_pos = len(all_leading_tokens) * max_seq_len
curr_mems = mems
out = start_tokens
for _ in range(seq_len):
curr_segment_len = out.shape[-1]
is_last_segment_tokens = divisible_by(curr_segment_len, max_seq_len)
x = out[:, curr_pos:]
logits, mems = self.net(
x,
mems = curr_mems,
return_mems = True,
**kwargs
)
logits = logits[:, -1]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
if is_last_segment_tokens:
curr_pos = curr_segment_len
curr_mems = mems
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(
self,
x,
mems = None,
**kwargs
):
ignore_index, max_seq_len = self.ignore_index, self.max_seq_len
x, labels = x[:, :-1], x[:, 1:]
seq_len = x.shape[1]
# prepare chunks
split_x = x.split(max_seq_len, dim = -1)
split_labels = labels.split(max_seq_len, dim = -1)
loss_weights = tuple(map(lambda t: t.shape[-1] / seq_len, split_x))
# go through each chunk and derive weighted losses
total_loss = 0.
for chunk, chunk_labels, loss_weight in zip(split_x, split_labels, loss_weights):
logits, mems = self.net(
chunk,
mems = mems,
return_mems = True,
**kwargs
)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
chunk_labels,
ignore_index = ignore_index
)
total_loss = total_loss + loss * loss_weight
return total_loss
| 3,988 | 25.071895 | 89 | py |
x-transformers | x-transformers-main/x_transformers/continuous_autoregressive_wrapper.py | import torch
from torch import nn
import torch.nn.functional as F
def exists(val):
return val is not None
class ContinuousAutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, **kwargs):
device = start_tokens.device
was_training = self.net.training
num_dims = len(start_tokens.shape)
assert num_dims >= 2, 'number of dimensions of your start tokens must be greater or equal to 2'
if num_dims == 2:
start_tokens = start_tokens[None, :]
b, t, _, device = *start_tokens.shape, start_tokens.device
self.net.eval()
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
last = self.net(x, **kwargs)[:, -1:]
out = torch.cat((out, last), dim = -2)
out = out[:, t:]
if num_dims == 2:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, **kwargs):
inp, target = x[:, :-1], x[:, 1:]
mask = kwargs.get('mask', None)
if exists(mask) and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
kwargs['mask'] = mask
out = self.net(inp, **kwargs)
loss = F.mse_loss(out, target, reduction = 'none')
if exists(mask):
loss = loss[mask]
return loss.mean()
| 1,575 | 25.711864 | 103 | py |
x-transformers | x-transformers-main/x_transformers/__init__.py | import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from x_transformers.x_transformers import XTransformer, Encoder, Decoder, CrossAttender, Attention, TransformerWrapper, ViTransformerWrapper, ContinuousTransformerWrapper
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
from x_transformers.nonautoregressive_wrapper import NonAutoregressiveWrapper
from x_transformers.continuous_autoregressive_wrapper import ContinuousAutoregressiveWrapper
from x_transformers.xl_autoregressive_wrapper import XLAutoregressiveWrapper
| 701 | 49.142857 | 170 | py |
PastNet | PastNet-main/utils.py | import os
import logging
import torch
import random
import numpy as np
import torch.backends.cudnn as cudnn
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.deterministic = True
def print_log(message):
print(message)
logging.info(message)
def output_namespace(namespace):
configs = namespace.__dict__
message = ''
for k, v in configs.items():
message += '\n' + k + ': \t' + str(v) + '\t'
return message
def check_dir(path):
if not os.path.exists(path):
os.makedirs(path) | 574 | 20.296296 | 52 | py |
PastNet | PastNet-main/exp_vq.py | import os
import os.path as osp
import json
import torch
import pickle
import logging
import numpy as np
from models.PastNet_Model import PastNetModel
from tqdm import tqdm
from API import *
from utils import *
def relative_l1_error(true_values, predicted_values):
error = torch.abs(true_values - predicted_values)
return torch.mean(error / torch.abs(true_values))
class PastNet_exp:
def __init__(self, args):
super(PastNet_exp, self).__init__()
self.args = args
self.config = self.args.__dict__
self.device = self._acquire_device()
self._preparation()
print_log(output_namespace(self.args))
self._get_data()
self._select_optimizer()
self._select_criterion()
def _acquire_device(self):
if self.args.use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.args.gpu)
device = torch.device('cuda:{}'.format(0))
print_log('Use GPU: {}'.format(self.args.gpu))
else:
device = torch.device('cpu')
print_log('Use CPU')
return device
def _preparation(self):
# seed
set_seed(self.args.seed)
# log and checkpoint
self.path = osp.join(self.args.res_dir, self.args.ex_name)
check_dir(self.path)
self.checkpoints_path = osp.join(self.path, 'checkpoints')
check_dir(self.checkpoints_path)
sv_param = osp.join(self.path, 'model_param.json')
with open(sv_param, 'w') as file_obj:
json.dump(self.args.__dict__, file_obj)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.INFO, filename=osp.join(self.path, 'log.log'),
filemode='a', format='%(asctime)s - %(message)s')
# prepare data
self._get_data()
# build the model
self._build_model()
def _build_model(self):
args = self.args
freeze = args.freeze_vqvae == 1
self.model = PastNetModel(args,
shape_in=tuple(args.in_shape),
hid_T=args.hid_T,
N_T=args.N_T,
res_units=args.res_units,
res_layers=args.res_layers,
embedding_nums=args.K,
embedding_dim=args.D).to(self.device)
def _get_data(self):
config = self.args.__dict__
self.train_loader, self.vali_loader, self.test_loader, self.data_mean, self.data_std = load_data(**config)
self.vali_loader = self.test_loader if self.vali_loader is None else self.vali_loader
def _select_optimizer(self):
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=self.args.lr)
self.scheduler = torch.optim.lr_scheduler.OneCycleLR(
self.optimizer, max_lr=self.args.lr, steps_per_epoch=len(self.train_loader), epochs=self.args.epochs)
return self.optimizer
def _select_criterion(self):
self.criterion = torch.nn.MSELoss()
def _save(self, name=''):
torch.save(self.model.state_dict(), os.path.join(
self.checkpoints_path, name + '.pth'))
state = self.scheduler.state_dict()
fw = open(os.path.join(self.checkpoints_path, name + '.pkl'), 'wb')
pickle.dump(state, fw)
def train(self, args):
config = args.__dict__
recorder = Recorder(verbose=True)
for epoch in range(config['epochs']):
train_loss = []
self.model.train()
train_pbar = tqdm(self.train_loader)
for batch_x, batch_y in train_pbar:
self.optimizer.zero_grad()
batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)
pred_y = self.model(batch_x)
loss = self.criterion(pred_y, batch_y)
train_loss.append(loss.item())
train_pbar.set_description('train loss: {:.4f}'.format(loss.item()))
loss.backward()
self.optimizer.step()
self.scheduler.step()
train_loss = np.average(train_loss)
if epoch % args.log_step == 0:
with torch.no_grad():
vali_loss = self.vali(self.vali_loader)
if epoch % (args.log_step * 100) == 0:
self._save(name=str(epoch))
print_log("Epoch: {0} | Train Loss: {1:.4f} Vali Loss: {2:.4f}\n".format(
epoch + 1, train_loss, vali_loss))
recorder(vali_loss, self.model, self.path)
best_model_path = self.path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
return self.model
# def vali(self, vali_loader):
# self.model.eval()
# preds_lst, trues_lst, total_loss = [], [], []
# vali_pbar = tqdm(vali_loader)
# for i, (batch_x, batch_y) in enumerate(vali_pbar):
# if i * batch_x.shape[0] > 1000:
# break
# batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)
# pred_y = self.model(batch_x)
# list(map(lambda data, lst: lst.append(data.detach().cpu().numpy()), [
# pred_y, batch_y], [preds_lst, trues_lst]))
# loss = self.criterion(pred_y, batch_y)
# vali_pbar.set_description(
# 'vali loss: {:.4f}'.format(loss.mean().item()))
# total_loss.append(loss.mean().item())
# total_loss = np.average(total_loss)
# preds = np.concatenate(preds_lst, axis=0)
# trues = np.concatenate(trues_lst, axis=0)
# mse, mae, ssim, psnr = metric(preds, trues, vali_loader.dataset.mean, vali_loader.dataset.std, True)
# print_log('vali mse:{:.4f}, mae:{:.4f}, ssim:{:.4f}, psnr:{:.4f}'.format(mse, mae, ssim, psnr))
# l2_error = torch.nn.MSELoss()(torch.tensor(preds), torch.tensor(trues)).item()
# relative_l2_error = l2_error / torch.nn.MSELoss()(torch.tensor(trues), torch.zeros_like(torch.tensor(trues))).item()
# l1_error = torch.nn.L1Loss()(torch.tensor(preds), torch.tensor(trues)).item()
# rel_l1_err = relative_l1_error(torch.tensor(trues), torch.tensor(preds)).item()
# # 计算RMSE
# rmse = torch.sqrt(torch.mean((torch.tensor(preds) - torch.tensor(trues)) ** 2))
# rmse = rmse.item()
# print_log('RMSE: {:.7f}'.format(rmse))
# print_log('L1 error: {:.7f}, Relative L1 Error: {:.7f}, L2 error: {:.7f}, Relative L2 error: {:.7f},'.format(l1_error, rel_l1_err, l2_error, relative_l2_error))
# self.model.train()
# return total_loss
def vali(self, vali_loader):
self.model.eval()
preds_lst, trues_lst, total_loss = [], [], []
vali_pbar = tqdm(vali_loader)
for i, (batch_x, batch_y) in enumerate(vali_pbar):
if i * batch_x.shape[0] > 1000:
break
batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)
pred_y = self.model(batch_x)
list(map(lambda data, lst: lst.append(data.detach().cpu().numpy()), [
pred_y, batch_y], [preds_lst, trues_lst]))
loss = self.criterion(pred_y, batch_y)
vali_pbar.set_description(
'vali loss: {:.4f}'.format(loss.mean().item()))
total_loss.append(loss.mean().item())
total_loss = np.average(total_loss)
preds = np.concatenate(preds_lst, axis=0)
trues = np.concatenate(trues_lst, axis=0)
mse, mae, ssim, psnr = metric(preds, trues, vali_loader.dataset.mean, vali_loader.dataset.std, True)
# print_log('vali mse:{:.4f}, mae:{:.4f}, ssim:{:.4f}, psnr:{:.4f}'.format(mse, mae, ssim, psnr))
l2_error = torch.nn.MSELoss()(torch.tensor(preds), torch.tensor(trues)).item()
relative_l2_error = l2_error / torch.nn.MSELoss()(torch.tensor(trues),
torch.zeros_like(torch.tensor(trues))).item()
l1_error = torch.nn.L1Loss()(torch.tensor(preds), torch.tensor(trues)).item()
rel_l1_err = relative_l1_error(torch.tensor(trues), torch.tensor(preds)).item()
# calculate the RMSE, MSE, and MAE
rmse = torch.sqrt(torch.mean((torch.tensor(preds) - torch.tensor(trues)) ** 2)).item()
mse = torch.mean((torch.tensor(preds) - torch.tensor(trues)) ** 2).item()
mae = torch.mean(torch.abs(torch.tensor(preds) - torch.tensor(trues))).item()
ape = torch.abs(torch.tensor(preds) - torch.tensor(trues)) / (trues + 1e-8)
ape[torch.tensor(trues) == 0] = 0 # set APE to zero where true value is zero
mape = torch.mean(ape).item() * 100
# ape = torch.abs(torch.tensor(preds) - torch.tensor(trues)) / torch.abs(torch.tensor(trues))
# mape = torch.mean(ape).item() * 100
print_log(
'L1 error: {:.7f}, Relative L1 Error: {:.7f}, L2 error: {:.7f}, Relative L2 error: {:.7f},'.format(l1_error,
rel_l1_err,
l2_error,
relative_l2_error))
print_log('RMSE: {:.7f}, MSE: {:.7f}, MAE: {:.7f}, MAPE: {:.7f}%'.format(rmse, mse, mae, mape))
self.model.train()
return total_loss
def test(self, args):
self.model.eval()
inputs_lst, trues_lst, preds_lst = [], [], []
for batch_x, batch_y in self.test_loader:
pred_y = self.model(batch_x.to(self.device))
list(map(lambda data, lst: lst.append(data.detach().cpu().numpy()), [
batch_x, batch_y, pred_y], [inputs_lst, trues_lst, preds_lst]))
inputs, trues, preds = map(lambda data: np.concatenate(
data, axis=0), [inputs_lst, trues_lst, preds_lst])
folder_path = self.path + '/results/{}/sv/'.format(args.ex_name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
mse, mae, ssim, psnr = metric(preds, trues, self.test_loader.dataset.mean, self.test_loader.dataset.std, True)
print_log('mse:{:.4f}, mae:{:.4f}, ssim:{:.4f}, psnr:{:.4f}'.format(mse, mae, ssim, psnr))
l2_error = torch.nn.MSELoss()(torch.tensor(preds), torch.tensor(trues)).item()
relative_l2_error = l2_error / torch.nn.MSELoss()(torch.tensor(trues),
torch.zeros_like(torch.tensor(trues))).item()
l1_error = torch.nn.L1Loss()(torch.tensor(preds), torch.tensor(trues)).item()
rel_l1_err = relative_l1_error(torch.tensor(trues), torch.tensor(preds)).item()
# 计算RMSE
# rmse = torch.sqrt(torch.mean((torch.tensor(preds) - torch.tensor(trues)) ** 2))
# rmse = rmse.item()
# print_log('RMSE: {:.7f}'.format(rmse))
# print_log('L1 error: {:.7f}, Relative L1 Error: {:.7f}, L2 error: {:.7f}, Relative L2 error: {:.7f},'.format(l1_error, rel_l1_err, l2_error, relative_l2_error))
# calculate the RMSE, MSE, and MAE
rmse = torch.sqrt(torch.mean((torch.tensor(preds) - torch.tensor(trues)) ** 2)).item()
mse = torch.mean((torch.tensor(preds) - torch.tensor(trues)) ** 2).item()
mae = torch.mean(torch.abs(torch.tensor(preds) - torch.tensor(trues))).item()
ape = torch.abs(torch.tensor(preds) - torch.tensor(trues)) / torch.abs(torch.tensor(trues))
mape = torch.mean(ape).item() * 100
print_log(
'L1 error: {:.7f}, Relative L1 Error: {:.7f}, L2 error: {:.7f}, Relative L2 error: {:.7f},'.format(l1_error,
rel_l1_err,
l2_error,
relative_l2_error))
print_log('RMSE: {:.7f}, MSE: {:.7f}, MAE: {:.7f}, MAPE: {:.7f}%'.format(rmse, mse, mae, mape))
for np_data in ['inputs', 'trues', 'preds']:
np.save(osp.join(folder_path, np_data + '.npy'), vars()[np_data])
return mse | 12,654 | 44.685921 | 170 | py |
PastNet | PastNet-main/modules/DiscreteSTModel_modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class VectorQuantizer(nn.Module):
def __init__(self, num_embeddings, embedding_dim, commitment_cost):
super(VectorQuantizer, self).__init__()
self._embedding_dim = embedding_dim # D
self._num_embeddings = num_embeddings # K
self._embedding = nn.Embedding(self._num_embeddings, self._embedding_dim) #
self._embedding.weight.data.uniform_(-1 / self._num_embeddings, 1 / self._num_embeddings)
self._commitment_cost = commitment_cost
def forward(self, inputs):
# convert inputs from B, C, H, W -> B, H, W, C
inputs = inputs.permute(0, 2, 3, 1).contiguous()
input_shape = inputs.shape
# Flatten input
flat_input = inputs.view(-1, self._embedding_dim)
# Calculate distances
distances = (torch.sum(flat_input ** 2, dim=1, keepdim=True)
+ torch.sum(self._embedding.weight ** 2, dim=1)
- 2 * torch.matmul(flat_input, self._embedding.weight.t()))
# Encoding
encoding_indices = torch.argmin(distances, dim=1).unsqueeze(1)
encodings = torch.zeros(encoding_indices.shape[0], self._num_embeddings, device=inputs.device)
encodings.scatter_(1, encoding_indices, 1)
# Quantize and unflatten
quantized = torch.matmul(encodings, self._embedding.weight).view(input_shape)
# Loss
e_latent_loss = F.mse_loss(quantized.detach(), inputs)
q_latent_loss = F.mse_loss(quantized, inputs.detach())
loss = q_latent_loss + self._commitment_cost * e_latent_loss
quantized = inputs + (quantized - inputs).detach()
avg_probs = torch.mean(encodings, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
# convert quantized from B, H, W, C -> B, C, H, W
return loss, quantized.permute(0, 3, 1, 2).contiguous(), perplexity, encodings
def lookup(self, x):
embeddings = F.embedding(x, self._embedding)
return embeddings
class VectorQuantizerEMA(nn.Module):
def __init__(self, num_embeddings, embedding_dim, commitment_cost, decay=0.99, epsilon=1e-5):
super(VectorQuantizerEMA, self).__init__()
self._embedding_dim = embedding_dim
self._num_embeddings = num_embeddings
self._embedding = nn.Embedding(self._num_embeddings, self._embedding_dim)
self._embedding.weight.data.normal_()
self._commitment_cost = commitment_cost
self.register_buffer('_ema_cluster_size', torch.zeros(num_embeddings))
self._ema_w = nn.Parameter(torch.Tensor(num_embeddings, self._embedding_dim))
self._ema_w.data.normal_()
self._decay = decay
self._epsilon = epsilon
def forward(self, inputs):
# convert inputs from BCHW -> BHWC
inputs = inputs.permute(0, 2, 3, 1).contiguous()
input_shape = inputs.shape
# Flatten input
flat_input = inputs.view(-1, self._embedding_dim)
# Calculate distances
distances = (torch.sum(flat_input ** 2, dim=1, keepdim=True)
+ torch.sum(self._embedding.weight ** 2, dim=1)
- 2 * torch.matmul(flat_input, self._embedding.weight.t()))
# Encoding
encoding_indices = torch.argmin(distances, dim=1).unsqueeze(1)
encodings = torch.zeros(encoding_indices.shape[0], self._num_embeddings, device=inputs.device)
encodings.scatter_(1, encoding_indices, 1)
# Quantize and unflatten
quantized = torch.matmul(encodings, self._embedding.weight).view(input_shape)
# Use EMA to update the embedding vectors
if self.training:
self._ema_cluster_size = self._ema_cluster_size * self._decay + \
(1 - self._decay) * torch.sum(encodings, 0)
# Laplace smoothing of the cluster size
n = torch.sum(self._ema_cluster_size.data)
self._ema_cluster_size = (
(self._ema_cluster_size + self._epsilon)
/ (n + self._num_embeddings * self._epsilon) * n)
dw = torch.matmul(encodings.t(), flat_input)
self._ema_w = nn.Parameter(self._ema_w * self._decay + (1 - self._decay) * dw)
self._embedding.weight = nn.Parameter(self._ema_w / self._ema_cluster_size.unsqueeze(1))
# Loss
e_latent_loss = F.mse_loss(quantized.detach(), inputs)
loss = self._commitment_cost * e_latent_loss
# Straight Through Estimator
quantized = inputs + (quantized - inputs).detach()
avg_probs = torch.mean(encodings, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
# convert quantized from BHWC -> BCHW
return loss, quantized.permute(0, 3, 1, 2).contiguous(), perplexity, encodings
def lookup(self, x):
embeddings = F.embedding(x, self._embedding)
return embeddings
class Residual(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_hiddens):
super(Residual, self).__init__()
self._block = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(in_channels=in_channels,
out_channels=num_residual_hiddens,
kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(num_residual_hiddens),
nn.ReLU(True),
nn.Conv2d(in_channels=num_residual_hiddens,
out_channels=num_hiddens,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(num_hiddens)
)
def forward(self, x):
return x + self._block(x)
class ResidualStack(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(ResidualStack, self).__init__()
self._num_residual_layers = num_residual_layers
self._layers = nn.ModuleList([Residual(in_channels, num_hiddens, num_residual_hiddens)
for _ in range(self._num_residual_layers)])
def forward(self, x):
for i in range(self._num_residual_layers):
x = self._layers[i](x)
return F.relu(x)
class Encoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(Encoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels,
out_channels=num_hiddens // 2,
kernel_size=4,
stride=2,
padding=1)
self._conv_2 = nn.Conv2d(in_channels=num_hiddens // 2,
out_channels=num_hiddens,
kernel_size=4,
stride=2,
padding=1)
self._conv_3 = nn.Conv2d(in_channels=num_hiddens,
out_channels=num_hiddens,
kernel_size=3,
stride=1, padding=1)
self._residual_stack = ResidualStack(in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
def forward(self, inputs):
# input shape: [B, C, W, H]
x = self._conv_1(inputs) # [B, hidden_units//2 , W//2, H//2]
x = F.relu(x)
x = self._conv_2(x) # [B, hidden_units, W//4, H//4]
x = F.relu(x)
x = self._conv_3(x)
return self._residual_stack(x)
class Decoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens, out_channels):
super(Decoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels,
out_channels=num_hiddens,
kernel_size=3,
stride=1, padding=1)
self._residual_stack = ResidualStack(in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
self._conv_trans_1 = nn.ConvTranspose2d(in_channels=num_hiddens,
out_channels=num_hiddens // 2,
kernel_size=4,
stride=2, padding=1)
self._conv_trans_2 = nn.ConvTranspose2d(in_channels=num_hiddens // 2,
out_channels=out_channels,
kernel_size=4,
stride=2, padding=1)
def forward(self, inputs):
x = self._conv_1(inputs)
x = self._residual_stack(x)
x = self._conv_trans_1(x)
x = F.relu(x)
return self._conv_trans_2(x)
| 9,313 | 40.212389 | 106 | py |
PastNet | PastNet-main/modules/Fourier_modules.py | from functools import partial
from collections import OrderedDict
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from torch.utils.checkpoint import checkpoint_sequential
from params import get_fourcastnet_args
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.fft
import numpy as np
import torch.optim as optimizer
class PatchEmbed(nn.Module):
def __init__(self, img_size=None, patch_size=8, in_c=13, embed_dim=768, norm_layer=None):
super(PatchEmbed, self).__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) # h, w
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.projection= nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Error..."
'''
[32, 3, 224, 224] -> [32, 768, 14, 14] -> [32, 768, 196] -> [32, 196, 768]
Conv2D: [32, 3, 224, 224] -> [32, 768, 14, 14]
Flatten: [B, C, H, W] -> [B, C, HW]
Transpose: [B, C, HW] -> [B, HW, C]
'''
x = self.projection(x).flatten(2).transpose(1, 2)
x = self.norm(x)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super(Mlp, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.fc3 = nn.AdaptiveAvgPool1d(out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc3(x)
x = self.drop(x)
return x
class LearnableFourierPositionalEncoding(nn.Module):
def __init__(self, M: int, F_dim: int, H_dim: int, D: int, gamma: float):
super().__init__()
self.M = M
self.F_dim = F_dim
self.H_dim = H_dim
self.D = D
self.gamma = gamma
self.Wr = nn.Linear(self.M, self.F_dim // 2, bias=False)
self.mlp = nn.Sequential(
nn.Linear(self.F_dim, self.H_dim, bias=True),
nn.GELU(),
nn.Linear(self.H_dim, self.D)
)
self.init_weights()
def init_weights(self):
nn.init.normal_(self.Wr.weight.data, mean=0, std=self.gamma ** -2)
def forward(self, x):
B, N, M = x.shape
projected = self.Wr(x)
cosines = torch.cos(projected)
sines = torch.sin(projected)
F = 1 / np.sqrt(self.F_dim) * torch.cat([cosines, sines], dim=-1)
Y = self.mlp(F)
PEx = Y.reshape((B, N, self.D))
return PEx
class AdativeFourierNeuralOperator(nn.Module):
def __init__(self, dim, h=14, w=14):
super(AdativeFourierNeuralOperator, self).__init__()
args = get_fourcastnet_args()
self.hidden_size = dim
self.h = h
self.w = w
self.num_blocks = args.fno_blocks
self.block_size = self.hidden_size // self.num_blocks
assert self.hidden_size % self.num_blocks == 0
self.scale = 0.02
self.w1 = torch.nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size, self.block_size))
self.b1 = torch.nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size))
self.w2 = torch.nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size, self.block_size))
self.b2 = torch.nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size))
self.relu = nn.ReLU()
if args.fno_bias:
self.bias = nn.Conv1d(self.hidden_size, self.hidden_size, 1)
else:
self.bias = None
self.softshrink = args.fno_softshrink
def multiply(self, input, weights):
return torch.einsum('...bd, bdk->...bk', input, weights)
def forward(self, x):
B, N, C = x.shape
if self.bias:
bias = self.bias(x.permute(0, 2, 1)).permute(0, 2, 1)
else:
bias = torch.zeros(x.shape, device=x.device)
x = x.reshape(B, self.h, self.w, C)
x = torch.fft.rfft2(x, dim=(1, 2), norm='ortho')
x = x.reshape(B, x.shape[1], x.shape[2], self.num_blocks, self.block_size)
x_real = F.relu(self.multiply(x.real, self.w1[0]) - self.multiply(x.imag, self.w1[1]) + self.b1[0], inplace=True)
x_imag = F.relu(self.multiply(x.real, self.w1[1]) + self.multiply(x.imag, self.w1[0]) + self.b1[1], inplace=True)
x_real = self.multiply(x_real, self.w2[0]) - self.multiply(x_imag, self.w2[1]) + self.b2[0]
x_imag = self.multiply(x_real, self.w2[1]) + self.multiply(x_imag, self.w2[0]) + self.b2[1]
x = torch.stack([x_real, x_imag], dim=-1)
x = F.softshrink(x, lambd=self.softshrink) if self.softshrink else x
x = torch.view_as_complex(x)
x = x.reshape(B, x.shape[1], x.shape[2], self.hidden_size)
x = torch.fft.irfft2(x, s=(self.h, self.w), dim=(1,2), norm='ortho')
x = x.reshape(B, N, C)
return x+bias
class FourierNetBlock(nn.Module):
def __init__(self,
dim,
mlp_ratio=4.,
drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
h=14,
w=14):
super(FourierNetBlock, self).__init__()
args = get_fourcastnet_args()
self.normlayer1 = norm_layer(dim)
self.filter = AdativeFourierNeuralOperator(dim, h=h, w=w)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.normlayer2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop)
self.double_skip = args.double_skip
def forward(self, x):
x = x + self.drop_path(self.filter(self.normlayer1(x)))
x = x + self.drop_path(self.mlp(self.normlayer2(x)))
return x | 6,604 | 36.95977 | 121 | py |
PastNet | PastNet-main/modules/DiscreteSTModel_modules_BN.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class VectorQuantizer(nn.Module):
def __init__(self, num_embeddings, embedding_dim, commitment_cost):
super(VectorQuantizer, self).__init__()
self._embedding_dim = embedding_dim # D
self._num_embeddings = num_embeddings # K
self._embedding = nn.Embedding(self._num_embeddings, self._embedding_dim)
self._embedding.weight.data.uniform_(-1 / self._num_embeddings, 1 / self._num_embeddings)
self._commitment_cost = commitment_cost
def forward(self, inputs):
# convert inputs from B, C, H, W -> B, H, W, C
inputs = inputs.permute(0, 2, 3, 1).contiguous()
input_shape = inputs.shape
# Flatten input
flat_input = inputs.view(-1, self._embedding_dim)
# Calculate distances
distances = (torch.sum(flat_input ** 2, dim=1, keepdim=True)
+ torch.sum(self._embedding.weight ** 2, dim=1)
- 2 * torch.matmul(flat_input, self._embedding.weight.t())) # 平方差公式优化
# Encoding
encoding_indices = torch.argmin(distances, dim=1).unsqueeze(1)
encodings = torch.zeros(encoding_indices.shape[0], self._num_embeddings, device=inputs.device)
encodings.scatter_(1, encoding_indices, 1)
# Quantize and unflatten
quantized = torch.matmul(encodings, self._embedding.weight).view(input_shape)
# Loss
e_latent_loss = F.mse_loss(quantized.detach(), inputs)
q_latent_loss = F.mse_loss(quantized, inputs.detach())
loss = q_latent_loss + self._commitment_cost * e_latent_loss
quantized = inputs + (quantized - inputs).detach()
avg_probs = torch.mean(encodings, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
# convert quantized from B, H, W, C -> B, C, H, W
return loss, quantized.permute(0, 3, 1, 2).contiguous(), perplexity, encodings
def lookup(self, x):
embeddings = F.embedding(x, self._embedding)
return embeddings
class Residual(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_hiddens):
super(Residual, self).__init__()
self._block = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(in_channels=in_channels,
out_channels=num_residual_hiddens,
kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(num_residual_hiddens),
nn.ReLU(True),
nn.Conv2d(in_channels=num_residual_hiddens,
out_channels=num_hiddens,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(num_hiddens)
)
def forward(self, x):
return x + self._block(x)
class ResidualStack(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(ResidualStack, self).__init__()
self._num_residual_layers = num_residual_layers
self._layers = nn.ModuleList([Residual(in_channels, num_hiddens, num_residual_hiddens)
for _ in range(self._num_residual_layers)])
def forward(self, x):
for i in range(self._num_residual_layers):
x = self._layers[i](x)
return F.relu(x)
class Encoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(Encoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels,
out_channels=num_hiddens // 2,
kernel_size=4,
stride=2,
padding=1)
self.norm_1 = nn.BatchNorm2d(num_hiddens // 2)
self._conv_2 = nn.Conv2d(in_channels=num_hiddens // 2,
out_channels=num_hiddens,
kernel_size=4,
stride=2,
padding=1)
self.norm_2 = nn.BatchNorm2d(num_hiddens)
self._conv_3 = nn.Conv2d(in_channels=num_hiddens,
out_channels=num_hiddens,
kernel_size=3,
stride=1, padding=1)
self.norm_3 = nn.BatchNorm2d(num_hiddens)
self._residual_stack = ResidualStack(in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
def forward(self, inputs):
# input shape: [B, C, W, H]
x = self._conv_1(inputs) # [B, hidden_units//2 , W//2, H//2]
x = F.relu(self.norm_1(x))
x = self._conv_2(x) # [B, hidden_units, W//4, H//4]
x = F.relu(self.norm_2(x))
x = self._conv_3(x)
x = self.norm_3(x)
return self._residual_stack(x)
class Decoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens, out_channels):
super(Decoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels,
out_channels=num_hiddens,
kernel_size=3,
stride=1, padding=1)
self._norm_1 = nn.BatchNorm2d(num_hiddens)
self._residual_stack = ResidualStack(in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
self._conv_trans_1 = nn.ConvTranspose2d(in_channels=num_hiddens,
out_channels=num_hiddens // 2,
kernel_size=4,
stride=2, padding=1)
self._norm_2 = nn.BatchNorm2d(num_hiddens // 2)
self._conv_trans_2 = nn.ConvTranspose2d(in_channels=num_hiddens // 2,
out_channels=out_channels,
kernel_size=4,
stride=2, padding=1)
def forward(self, inputs):
x = self._conv_1(inputs)
x = self._residual_stack(self._norm_1(x))
x = self._conv_trans_1(x)
x = F.relu(self._norm_2(x))
return self._conv_trans_2(x) | 6,713 | 41.764331 | 106 | py |
PastNet | PastNet-main/modules/DiscreteSTModel_modules_GN.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class VectorQuantizer(nn.Module):
def __init__(self, num_embeddings, embedding_dim, commitment_cost):
super(VectorQuantizer, self).__init__()
self._embedding_dim = embedding_dim # D
self._num_embeddings = num_embeddings # K
self._embedding = nn.Embedding(self._num_embeddings, self._embedding_dim)
self._embedding.weight.data.uniform_(-1 / self._num_embeddings, 1 / self._num_embeddings)
self._commitment_cost = commitment_cost
def forward(self, inputs):
# convert inputs from B, C, H, W -> B, H, W, C
inputs = inputs.permute(0, 2, 3, 1).contiguous()
input_shape = inputs.shape
# Flatten input
flat_input = inputs.view(-1, self._embedding_dim)
# Calculate distances
distances = (torch.sum(flat_input ** 2, dim=1, keepdim=True)
+ torch.sum(self._embedding.weight ** 2, dim=1)
- 2 * torch.matmul(flat_input, self._embedding.weight.t())) # 平方差公式优化
# Encoding
encoding_indices = torch.argmin(distances, dim=1).unsqueeze(1)
encodings = torch.zeros(encoding_indices.shape[0], self._num_embeddings, device=inputs.device)
encodings.scatter_(1, encoding_indices, 1)
# Quantize and unflatten
quantized = torch.matmul(encodings, self._embedding.weight).view(input_shape)
# Loss
e_latent_loss = F.mse_loss(quantized.detach(), inputs)
q_latent_loss = F.mse_loss(quantized, inputs.detach())
loss = q_latent_loss + self._commitment_cost * e_latent_loss
quantized = inputs + (quantized - inputs).detach()
avg_probs = torch.mean(encodings, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
# convert quantized from B, H, W, C -> B, C, H, W
return loss, quantized.permute(0, 3, 1, 2).contiguous(), perplexity, encodings
def lookup(self, x):
embeddings = F.embedding(x, self._embedding)
return embeddings
class Residual(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_hiddens):
super(Residual, self).__init__()
self._block = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(in_channels=in_channels,
out_channels=num_residual_hiddens,
kernel_size=3, stride=1, padding=1, bias=False),
nn.GroupNorm(2, num_residual_hiddens),
nn.ReLU(True),
nn.Conv2d(in_channels=num_residual_hiddens,
out_channels=num_hiddens,
kernel_size=1, stride=1, bias=False),
nn.GroupNorm(2, num_hiddens)
)
def forward(self, x):
return x + self._block(x)
class ResidualStack(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(ResidualStack, self).__init__()
self._num_residual_layers = num_residual_layers
self._layers = nn.ModuleList([Residual(in_channels, num_hiddens, num_residual_hiddens)
for _ in range(self._num_residual_layers)])
def forward(self, x):
for i in range(self._num_residual_layers):
x = self._layers[i](x)
return F.relu(x)
class Encoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(Encoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels,
out_channels=num_hiddens // 2,
kernel_size=4,
stride=2,
padding=1)
self.norm_1 = nn.GroupNorm(2, num_hiddens // 2)
self._conv_2 = nn.Conv2d(in_channels=num_hiddens // 2,
out_channels=num_hiddens,
kernel_size=4,
stride=2,
padding=1)
self.norm_2 = nn.GroupNorm(2, num_hiddens)
self._conv_3 = nn.Conv2d(in_channels=num_hiddens,
out_channels=num_hiddens,
kernel_size=3,
stride=1, padding=1)
self.norm_3 = nn.GroupNorm(2, num_hiddens)
self._residual_stack = ResidualStack(in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
def forward(self, inputs):
# input shape: [B, C, W, H]
x = self._conv_1(inputs) # [B, hidden_units//2 , W//2, H//2]
x = F.relu(self.norm_1(x))
x = self._conv_2(x) # [B, hidden_units, W//4, H//4]
x = F.relu(self.norm_2(x))
x = self._conv_3(x)
x = self.norm_3(x)
return self._residual_stack(x)
class Decoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens, out_channels):
super(Decoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels,
out_channels=num_hiddens,
kernel_size=3,
stride=1, padding=1)
self._norm_1 = nn.GroupNorm(2, num_hiddens)
self._residual_stack = ResidualStack(in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
self._conv_trans_1 = nn.ConvTranspose2d(in_channels=num_hiddens,
out_channels=num_hiddens // 2,
kernel_size=4,
stride=2, padding=1)
self._norm_2 = nn.GroupNorm(2, num_hiddens // 2)
self._conv_trans_2 = nn.ConvTranspose2d(in_channels=num_hiddens // 2,
out_channels=out_channels,
kernel_size=4,
stride=2, padding=1)
def forward(self, inputs):
x = self._conv_1(inputs)
x = self._residual_stack(self._norm_1(x))
x = self._conv_trans_1(x)
x = F.relu(self._norm_2(x))
return self._conv_trans_2(x)
| 6,722 | 41.283019 | 106 | py |
PastNet | PastNet-main/modules/STConvEncoderDecoder_modules.py | from torch import nn
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, transpose=False, act_norm=False):
super(BasicConv2d, self).__init__()
self.act_norm=act_norm
if not transpose:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
else:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,output_padding=stride //2 )
self.norm = nn.GroupNorm(2, out_channels)
self.act = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
y = self.conv(x)
if self.act_norm:
y = self.act(self.norm(y))
return y
class ConvSC(nn.Module):
def __init__(self, C_in, C_out, stride, transpose=False, act_norm=True):
super(ConvSC, self).__init__()
if stride == 1:
transpose = False
self.conv = BasicConv2d(C_in, C_out, kernel_size=3, stride=stride,
padding=1, transpose=transpose, act_norm=act_norm)
def forward(self, x):
y = self.conv(x)
return y
class GroupConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, groups, act_norm=False):
super(GroupConv2d, self).__init__()
self.act_norm = act_norm
if in_channels % groups != 0:
groups = 1
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,groups=groups)
self.norm = nn.GroupNorm(groups,out_channels)
self.activate = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
y = self.conv(x)
if self.act_norm:
y = self.activate(self.norm(y))
return y
class Inception(nn.Module):
def __init__(self, C_in, C_hid, C_out, incep_ker=[3,5,7,11], groups=8):
super(Inception, self).__init__()
self.conv1 = nn.Conv2d(C_in, C_hid, kernel_size=1, stride=1, padding=0)
layers = []
for ker in incep_ker:
layers.append(GroupConv2d(C_hid, C_out, kernel_size=ker, stride=1, padding=ker//2, groups=groups, act_norm=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
y = 0
for layer in self.layers:
y += layer(x)
return y | 2,469 | 36.424242 | 153 | py |
PastNet | PastNet-main/API/recorder.py | import numpy as np
import torch
class Recorder:
def __init__(self, verbose=False, delta=0):
self.verbose = verbose
self.best_score = None
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, path):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
elif score >= self.best_score + self.delta:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
def save_checkpoint(self, val_loss, model, path):
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), path+'/'+'checkpoint.pth')
self.val_loss_min = val_loss | 861 | 34.916667 | 111 | py |
PastNet | PastNet-main/API/dataloader_taxibj.py | import torch
import numpy as np
from torch.utils.data import Dataset
class TrafficDataset(Dataset):
def __init__(self, X, Y):
super(TrafficDataset, self).__init__()
self.X = (X + 1) / 2
self.Y = (Y + 1) / 2
self.mean = 0
self.std = 1
def __len__(self):
return self.X.shape[0]
def __getitem__(self, index):
data = torch.tensor(self.X[index, ::]).float()
labels = torch.tensor(self.Y[index, ::]).float()
return data, labels
def load_data(
batch_size, val_batch_size,
data_root, num_workers):
dataset = np.load(data_root+'taxibj/dataset.npz')
X_train, Y_train, X_test, Y_test = dataset['X_train'], dataset['Y_train'], dataset['X_test'], dataset['Y_test']
train_set = TrafficDataset(X=X_train, Y=Y_train)
test_set = TrafficDataset(X=X_test, Y=Y_test)
dataloader_train = torch.utils.data.DataLoader(
train_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers)
dataloader_test = torch.utils.data.DataLoader(
test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)
return dataloader_train, None, dataloader_test, 0, 1 | 1,231 | 32.297297 | 115 | py |
PastNet | PastNet-main/API/dataloader_caltech0.py |
import os
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
split_string = "\xFF\xD8\xFF\xE0\x00\x10\x4A\x46\x49\x46"
def read_seq(path):
f = open(path, 'rb+')
string = f.read().decode('latin-1')
str_list = string.split(split_string)
print(len(str_list))
f.close()
return str_list, len(str_list)
def seq_to_images(bytes_string):
res = split_string.encode('latin-1') + bytes_string.encode('latin-1')
img = cv2.imdecode(np.frombuffer(res, np.uint8), cv2.IMREAD_COLOR)
return img / 255.0
def load_caltech(root):
file_list = [file for file in os.listdir(root) if file.split('.')[-1] == "seq"]
print(file_list)
for file in file_list[:1]:
path = os.path.join(root, file)
str_list, len = read_seq(path)
imgs = np.zeros([len - 1, 480, 640, 3])
idx = 0
for str in str_list[1:]:
imgs[idx] = seq_to_images(str)
idx += 1
return imgs.transpose(0, 3, 1, 2)
class Caltech(Dataset):
def __init__(self, root, is_train=True, n_frames_input=4, n_frames_output=1):
super().__init__()
self.root = root
print("loading .seq file list")
self.file_list = [file for file in os.listdir(self.root) if file.split('.')[-1] == "seq"]
if is_train:
self.file_list = self.file_list[:-1]
else:
self.file_list = self.file_list[-1:]
print("loading file list done, file list: ", self.file_list)
self.length = 0
self.input_length = n_frames_input
self.output_length = n_frames_output
self.current_seq = None
self.current_length = 0
self.current_file_index = 0
self.get_next = True
self.get_total_len(root)
self.get_current_data()
def get_total_len(self, root):
print("calculating total length")
count = 0
for file in self.file_list:
path = os.path.join(root, file)
_, len = read_seq(path)
count += (len - 5)
self.length = count
print("calculating total length done, total length: ", self.length)
def get_current_data(self):
print("getting current sequence")
if self.current_file_index >= len(self.file_list):
self.get_next = False
return
current_file = os.path.join(self.root, self.file_list[self.current_file_index])
str_list, length = read_seq(current_file)
self.current_length = length - 5
self.current_seq = np.zeros([length - 1, 480, 640, 3])
for i, str in enumerate(str_list[1:]):
self.current_seq[i] = seq_to_images(str)
print("getting current sequence done, the shape:", self.current_seq.shape)
def get_next_seq(self):
print("getting next sequence")
self.current_file_index += 1
self.get_current_data()
self.get_next = False
def __getitem__(self, index):
if index >= self.current_length:
self.get_next = True
if self.get_next:
self.get_next_seq()
input = self.current_seq[index: index + self.input_length]
output = self.current_seq[index + self.input_length: index + self.input_length + self.output_length]
input = torch.from_numpy(input).contiguous().float()
output = torch.from_numpy(output).contiguous().float()
return input, output
def __len__(self):
return self.current_length
def load_data(
batch_size, val_batch_size,
data_root, num_workers):
train_set = Caltech(root=data_root, is_train=True,
n_frames_input=10, n_frames_output=10, num_objects=[2])
test_set = Caltech(root=data_root, is_train=False,
n_frames_input=10, n_frames_output=10, num_objects=[2])
dataloader_train = torch.utils.data.DataLoader(
train_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers)
dataloader_validation = torch.utils.data.DataLoader(
test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)
dataloader_test = torch.utils.data.DataLoader(
test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)
mean, std = 0, 1
return dataloader_train, dataloader_validation, dataloader_test, mean, std
if __name__ == "__main__":
# data = load_caltech("/home/pan/workspace/simvp/SimVP-Simpler-yet-Better-Video-Prediction-master/data/caltech/USA/set01")
dataset = Caltech(root="/home/pan/workspace/simvp/SimVP-Simpler-yet-Better-Video-Prediction-master/data/caltech/USA/set01")
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=16, shuffle=True, drop_last=True)
for input, output in dataloader:
print(input.shape, output.shape) | 4,898 | 35.288889 | 127 | py |
PastNet | PastNet-main/API/dataloader_sevir.py | import torch.nn as nn
import numpy as np
import random
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import torch
class VilDataset(Dataset):
def __init__(self, train=True, root='./data', transform=None):
super().__init__()
if train:
npy = ['SEVIR_IR069_STORMEVENTS_2018_0101_0630.npy', 'SEVIR_IR069_STORMEVENTS_2018_0701_1231.npy']
else:
npy = ['SEVIR_IR069_RANDOMEVENTS_2018_0101_0430.npy']
data = []
for file in npy:
data.append(np.load(f'{root}/{file}'))
self.data = np.concatenate(data)
#N, L, H, W = self.data.shape
# self.data = self.data.reshape([N L, H, W])
self.transform = transform
self.mean = 0
self.std = 1
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
img = self.data[index].reshape(20, 1, 128, 128)
if self.transform:
img = self.transform(img)
input_img = img[:10]
output_img = img[10:]
input_img = img[:10]
output_img = img[10:]
input_img = torch.from_numpy(input_img)
output_img = torch.from_numpy(output_img)
input_img = input_img.contiguous().float()
output_img = output_img.contiguous().float()
return input_img, output_img
def load_data(batch_size, val_batch_size,
data_root, num_workers):
train_set = VilDataset(train=True, root='./data', transform=None)
test_set = VilDataset(train=True, root='./data', transform=None)
dataloader_train = DataLoader(train_set, batch_size=batch_size, shuffle=True, pin_memory=True,
num_workers=num_workers)
dataloader_validation = DataLoader(test_set, batch_size=val_batch_size, shuffle=False,
pin_memory=True, num_workers=num_workers)
dataloader_test = DataLoader(test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True,
num_workers=num_workers)
mean, std = 0, 1
return dataloader_train, dataloader_validation, dataloader_test, mean, std
if __name__ == '__main__':
dataset = VilDataset(root='/root/Model_Phy/data')
input_img, output_img = dataset[1]
# Assuming `input_img` is a NumPy array of shape (10, 64, 64, 1)
fig, axes = plt.subplots(nrows=1, ncols=10)
for i in range(10):
axes[i].imshow(input_img[i, :, :, 0], cmap=None)
axes[i].axis('off')
plt.show()
fig, axes = plt.subplots(nrows=1, ncols=10)
for i in range(10):
axes[i].imshow(output_img[i, :, :, 0], cmap=None)
axes[i].axis('off')
plt.show() | 2,836 | 33.597561 | 110 | py |
PastNet | PastNet-main/API/dataloader_caltech.py |
import os
import cv2
import numpy as np
import torch
import bisect
from torch.utils.data import Dataset
split_string = "\xFF\xD8\xFF\xE0\x00\x10\x4A\x46\x49\x46"
def read_seq(path):
f = open(path, 'rb+')
string = f.read().decode('latin-1')
str_list = string.split(split_string)
# print(len(str_list))
f.close()
return str_list[1:]
def seq_to_images(bytes_string):
res = split_string.encode('latin-1') + bytes_string.encode('latin-1')
img = cv2.imdecode(np.frombuffer(res, np.uint8), cv2.IMREAD_COLOR)
return img / 255.0
def load_caltech(root):
file_list = [file for file in os.listdir(root) if file.split('.')[-1] == "seq"]
print(file_list)
for file in file_list[:1]:
path = os.path.join(root, file)
str_list, len = read_seq(path)
imgs = np.zeros([len - 1, 480, 640, 3])
idx = 0
for str in str_list[1:]:
imgs[idx] = seq_to_images(str)
idx += 1
return imgs
class Caltech(Dataset):
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, root, is_train=True, file_list=['V001.seq'], n_frames_input=4, n_frames_output=1):
super().__init__()
datasets = []
for file in file_list:
datasets.append(SingleCaltech(os.path.join(root, file), is_train=is_train, n_frames_input=n_frames_input, n_frames_output=n_frames_output))
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets[:1])
self.cumulative_sizes = self.cumsum(self.datasets)
self.mean = 0
self.std = 1
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
class SingleCaltech(Dataset):
def __init__(self, root, is_train=True, n_frames_input=4, n_frames_output=1):
super().__init__()
self.root = root
if is_train:
self.length = 100
else:
self.length = 50
self.input_length = n_frames_input
self.output_length = n_frames_output
self.sequence = None
self.get_current_data()
def get_current_data(self):
str_list = read_seq(self.root)
if self.length == 100:
str_list = str_list[:104]
self.sequence = np.zeros([104, 480, 640, 3])
else:
str_list = str_list[104:153]
self.sequence = np.zeros([54, 480, 640, 3])
for i, str in enumerate(str_list):
self.sequence[i] = seq_to_images(str)
def __getitem__(self, index):
input = self.sequence[index: index + self.input_length]
input = np.transpose(input, (0, 3, 1, 2))
output = self.sequence[index + self.input_length: index + self.input_length + self.output_length]
output = np.transpose(output, (0, 3, 1, 2))
input = torch.from_numpy(input).contiguous().float()
output = torch.from_numpy(output).contiguous().float()
return input, output
def __len__(self):
return self.length
def load_data(
batch_size, val_batch_size,
data_root, num_workers):
file_list = [file for file in os.listdir(data_root) if file.split('.')[-1] == "seq"]
# print(data_root)
train_set = Caltech(root=data_root, is_train=True,
n_frames_input=4, n_frames_output=1, file_list=file_list)
test_set = Caltech(root=data_root, is_train=False,
n_frames_input=4, n_frames_output=1, file_list=file_list)
dataloader_train = torch.utils.data.DataLoader(
train_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers)
dataloader_validation = torch.utils.data.DataLoader(
test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)
dataloader_test = torch.utils.data.DataLoader(
test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)
mean, std = 0, 1
return dataloader_train, dataloader_validation, dataloader_test, mean, std
if __name__ == "__main__":
# data = load_caltech("/home/pan/workspace/simvp/SimVP-Simpler-yet-Better-Video-Prediction-master/data/caltech/USA/set01")
file_list = [file for file in os.listdir("/home/pan/workspace/simvp/SimVP/data/caltech/USA/set01") if file.split('.')[-1] == "seq"]
dataset = Caltech(root="/home/pan/workspace/simvp/SimVP/data/caltech/USA/set01", is_train=False, file_list=file_list)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=16, shuffle=True)
for input, output in dataloader:
print(input.shape, output.shape) | 5,495 | 34.921569 | 152 | py |
PastNet | PastNet-main/API/dataloader_moving_mnist.py | import os
import gzip
import random
import numpy as np
import torch
import torch.utils.data as data
def load_mnist(root):
# Load MNIST dataset for generating training data.
path = os.path.join(root, 'moving_mnist/train-images-idx3-ubyte.gz')
with gzip.open(path, 'rb') as f:
mnist = np.frombuffer(f.read(), np.uint8, offset=16)
mnist = mnist.reshape(-1, 28, 28)
return mnist
def load_fixed_set(root):
# Load the fixed dataset
filename = 'moving_mnist/mnist_test_seq.npy'
path = os.path.join(root, filename)
dataset = np.load(path)
dataset = dataset[..., np.newaxis]
return dataset
class MovingMNIST(data.Dataset):
def __init__(self, root, is_train=True, n_frames_input=10, n_frames_output=10, num_objects=[2],
transform=None):
super(MovingMNIST, self).__init__()
self.dataset = None
if is_train:
self.mnist = load_mnist(root)
else:
if num_objects[0] != 2:
self.mnist = load_mnist(root)
else:
self.dataset = load_fixed_set(root)
self.length = int(1e4) if self.dataset is None else self.dataset.shape[1]
self.is_train = is_train
self.num_objects = num_objects
self.n_frames_input = n_frames_input
self.n_frames_output = n_frames_output
self.n_frames_total = self.n_frames_input + self.n_frames_output
self.transform = transform
# For generating data
self.image_size_ = 64
self.digit_size_ = 28
self.step_length_ = 0.1
self.mean = 0
self.std = 1
def get_random_trajectory(self, seq_length):
''' Generate a random sequence of a MNIST digit '''
canvas_size = self.image_size_ - self.digit_size_
x = random.random()
y = random.random()
theta = random.random() * 2 * np.pi
v_y = np.sin(theta)
v_x = np.cos(theta)
start_y = np.zeros(seq_length)
start_x = np.zeros(seq_length)
for i in range(seq_length):
# Take a step along velocity.
y += v_y * self.step_length_
x += v_x * self.step_length_
# Bounce off edges.
if x <= 0:
x = 0
v_x = -v_x
if x >= 1.0:
x = 1.0
v_x = -v_x
if y <= 0:
y = 0
v_y = -v_y
if y >= 1.0:
y = 1.0
v_y = -v_y
start_y[i] = y
start_x[i] = x
# Scale to the size of the canvas.
start_y = (canvas_size * start_y).astype(np.int32)
start_x = (canvas_size * start_x).astype(np.int32)
return start_y, start_x
def generate_moving_mnist(self, num_digits=2):
'''
Get random trajectories for the digits and generate a video.
'''
data = np.zeros((self.n_frames_total, self.image_size_,
self.image_size_), dtype=np.float32)
for n in range(num_digits):
# Trajectory
start_y, start_x = self.get_random_trajectory(self.n_frames_total)
ind = random.randint(0, self.mnist.shape[0] - 1)
digit_image = self.mnist[ind]
for i in range(self.n_frames_total):
top = start_y[i]
left = start_x[i]
bottom = top + self.digit_size_
right = left + self.digit_size_
# Draw digit
data[i, top:bottom, left:right] = np.maximum(
data[i, top:bottom, left:right], digit_image)
data = data[..., np.newaxis]
return data
def __getitem__(self, idx):
length = self.n_frames_input + self.n_frames_output
if self.is_train or self.num_objects[0] != 2:
# Sample number of objects
num_digits = random.choice(self.num_objects)
# Generate data on the fly
images = self.generate_moving_mnist(num_digits)
else:
images = self.dataset[:, idx, ...]
r = 1
w = int(64 / r)
images = images.reshape((length, w, r, w, r)).transpose(
0, 2, 4, 1, 3).reshape((length, r * r, w, w))
input = images[:self.n_frames_input]
if self.n_frames_output > 0:
output = images[self.n_frames_input:length]
else:
output = []
output = torch.from_numpy(output / 255.0).contiguous().float()
input = torch.from_numpy(input / 255.0).contiguous().float()
return input, output
def __len__(self):
return self.length
def load_data(
batch_size, val_batch_size,
data_root, num_workers):
train_set = MovingMNIST(root=data_root, is_train=True,
n_frames_input=10, n_frames_output=10, num_objects=[2])
test_set = MovingMNIST(root=data_root, is_train=False,
n_frames_input=10, n_frames_output=10, num_objects=[2])
dataloader_train = torch.utils.data.DataLoader(
train_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers)
dataloader_validation = torch.utils.data.DataLoader(
test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)
dataloader_test = torch.utils.data.DataLoader(
test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)
mean, std = 0, 1
return dataloader_train, dataloader_validation, dataloader_test, mean, std
| 5,613 | 33.441718 | 101 | py |
PastNet | PastNet-main/API/dataloader_moving_mnist_v2.py | import numpy as np
import torch
import torch.utils.data as data
class MovingMnistSequence(data.Dataset):
def __init__(self, train=True, shuffle=True, root='./data', transform=None):
super().__init__()
if train:
npz = 'mnist_train.npz'
self.data = np.load(f'{root}/{npz}')['input_raw_data']
else:
npz = 'mnist_train.npz'
self.data = np.load(f'{root}/{npz}')['input_raw_data'][:10000]
self.transform = transform
self.data = self.data.transpose(0, 2, 3, 1)
def __len__(self):
return self.data.shape[0] // 20
def __getitem__(self, index):
imgs = self.data[index * 20: (index + 1) * 20]
imgs_tensor = torch.zeros([20, 1, 64, 64])
if self.transform is not None:
for i in range(imgs.shape[0]):
imgs_tensor[i] = self.transform(imgs[i])
return imgs_tensor
def load_data(
batch_size, val_batch_size,
data_root, num_workers):
train_set = MovingMnistSequence(root=data_root, is_train=True,
n_frames_input=10, n_frames_output=10, num_objects=[2])
test_set = MovingMnistSequence(root=data_root, is_train=False,
n_frames_input=10, n_frames_output=10, num_objects=[2])
dataloader_train = torch.utils.data.DataLoader(
train_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers)
dataloader_validation = torch.utils.data.DataLoader(
test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)
dataloader_test = torch.utils.data.DataLoader(
test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)
mean, std = 0, 1
return dataloader_train, dataloader_validation, dataloader_test, mean, std
| 1,873 | 36.48 | 101 | py |
PastNet | PastNet-main/models/PastNet_Model.py | import torch
from utils import *
import logging
from torch import nn
from modules.DiscreteSTModel_modules import *
from modules.Fourier_modules import *
def stride_generator(N, reverse=False):
strides = [1, 2]*10
if reverse:
return list(reversed(strides[:N]))
else:
return strides[:N]
class GroupConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, groups, act_norm=False):
super(GroupConv2d, self).__init__()
self.act_norm = act_norm
if in_channels % groups != 0:
groups = 1
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, groups=groups)
self.norm = nn.GroupNorm(groups, out_channels)
self.activate = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
y = self.conv(x)
if self.act_norm:
y = self.activate(self.norm(y))
return y
class Inception(nn.Module):
def __init__(self, C_in, C_hid, C_out, incep_ker=[3, 5, 7, 11], groups=8):
super(Inception, self).__init__()
self.conv1 = nn.Conv2d(C_in, C_hid, kernel_size=1, stride=1, padding=0)
layers = []
for ker in incep_ker:
layers.append(GroupConv2d(C_hid, C_out, kernel_size=ker,
stride=1, padding=ker//2, groups=groups, act_norm=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
y = 0
for layer in self.layers:
y += layer(x)
return y
class FPG(nn.Module):
def __init__(self,
img_size=224,
patch_size=16,
in_channels=20,
out_channels=20,
input_frames=20,
embed_dim=768,
depth=12,
mlp_ratio=4.,
uniform_drop=False,
drop_rate=0.,
drop_path_rate=0.,
norm_layer=None,
dropcls=0.):
super(FPG, self).__init__()
self.embed_dim = embed_dim
self.num_frames = input_frames
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(img_size=img_size,
patch_size=patch_size,
in_c=in_channels,
embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) # [1, 196, 768]
self.pos_drop = nn.Dropout(p=drop_rate)
self.h = self.patch_embed.grid_size[0]
self.w = self.patch_embed.grid_size[1]
'''
stochastic depth decay rule
'''
if uniform_drop:
dpr = [drop_path_rate for _ in range(depth)]
else:
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.ModuleList([FourierNetBlock(
dim=embed_dim,
mlp_ratio=mlp_ratio,
drop=drop_rate,
drop_path=dpr[i],
act_layer=nn.GELU,
norm_layer=norm_layer,
h=self.h,
w=self.w)
for i in range(depth)
])
self.norm = norm_layer(embed_dim)
self.linearprojection = nn.Sequential(OrderedDict([
('transposeconv1', nn.ConvTranspose2d(embed_dim, out_channels * 16, kernel_size=(2, 2), stride=(2, 2))),
('act1', nn.Tanh()),
('transposeconv2', nn.ConvTranspose2d(out_channels * 16, out_channels * 4, kernel_size=(2, 2), stride=(2, 2))),
('act2', nn.Tanh()),
('transposeconv3', nn.ConvTranspose2d(out_channels * 4, out_channels, kernel_size=(4, 4), stride=(4, 4)))
]))
if dropcls > 0:
print('dropout %.2f before classifier' % dropcls)
self.final_dropout = nn.Dropout(p=dropcls)
else:
self.final_dropout = nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward_features(self, x):
'''
patch_embed:
[B, T, C, H, W] -> [B*T, num_patches, embed_dim]
'''
B,T,C,H,W = x.shape
x = x.view(B*T, C, H, W)
x = self.patch_embed(x)
#enc = LearnableFourierPositionalEncoding(768, 768, 64, 768, 10)
#fourierpos_embed = enc(x)
x = self.pos_drop(x + self.pos_embed)
#x = self.pos_drop(x + fourierpos_embed)
if not get_fourcastnet_args().checkpoint_activations:
for blk in self.blocks:
x = blk(x)
else:
x = checkpoint_sequential(self.blocks, 4, x)
x = self.norm(x).transpose(1, 2)
x = torch.reshape(x, [-1, self.embed_dim, self.h, self.w])
return x
def forward(self, x):
B, T, C, H, W = x.shape
x = self.forward_features(x)
x = self.final_dropout(x)
x = self.linearprojection(x)
x = x.reshape(B, T, C, H, W)
return x
class DST(nn.Module):
def __init__(self,
in_channel=1,
num_hiddens=128,
res_layers=2,
res_units=32,
embedding_nums=512, # K
embedding_dim=64, # D
commitment_cost=0.25):
super(DST, self).__init__()
self.embedding_dim = embedding_dim
self.num_embeddings = embedding_nums
self._encoder = Encoder(in_channel, num_hiddens,
res_layers, res_units) #
self._pre_vq_conv = nn.Conv2d(in_channels=num_hiddens,
out_channels=embedding_dim,
kernel_size=1,
stride=1)
# code book
self._vq_vae = VectorQuantizerEMA(embedding_nums,
embedding_dim,
commitment_cost,
decay=0.99)
self._decoder = Decoder(embedding_dim,
num_hiddens,
res_layers,
res_units,
in_channel)
def forward(self, x):
# input shape : [B, C, W, H]
z = self._encoder(x) # [B, hidden_units, W//4, H//4]
# [B, embedding_dims, W//4, H//4] z -> encoding
z = self._pre_vq_conv(z)
# quantized -> embedding, quantized相当于videoGPT中的 encoder输出
loss, quantized, perplexity, _ = self._vq_vae(z)
x_recon = self._decoder(quantized)
return loss, x_recon, perplexity
def get_embedding(self, x):
return self._pre_vq_conv(self._encoder(x))
def get_quantization(self, x):
z = self._encoder(x)
z = self._pre_vq_conv(z)
_, quantized, _, _ = self._vq_vae(z)
return quantized
def reconstruct_img_by_embedding(self, embedding):
loss, quantized, perplexity, _ = self._vq_vae(embedding)
return self._decoder(quantized)
def reconstruct_img(self, q):
return self._decoder(q)
@property
def pre_vq_conv(self):
return self._pre_vq_conv
@property
def encoder(self):
return self._encoder
class DynamicPropagation(nn.Module):
def __init__(self, channel_in, channel_hid, N_T, incep_ker=[3, 5, 7, 11], groups=8):
super(DynamicPropagation, self).__init__()
self.N_T = N_T
enc_layers = [Inception(
channel_in, channel_hid//2, channel_hid, incep_ker=incep_ker, groups=groups)]
for i in range(1, N_T-1):
enc_layers.append(Inception(
channel_hid, channel_hid//2, channel_hid, incep_ker=incep_ker, groups=groups))
enc_layers.append(Inception(channel_hid, channel_hid //
2, channel_hid, incep_ker=incep_ker, groups=groups))
dec_layers = [Inception(
channel_hid, channel_hid//2, channel_hid, incep_ker=incep_ker, groups=groups)]
for i in range(1, N_T-1):
dec_layers.append(Inception(
2*channel_hid, channel_hid//2, channel_hid, incep_ker=incep_ker, groups=groups))
dec_layers.append(Inception(2*channel_hid, channel_hid //
2, channel_in, incep_ker=incep_ker, groups=groups))
self.enc = nn.Sequential(*enc_layers)
self.dec = nn.Sequential(*dec_layers)
def forward(self, input_state):
B, T, C, H, W = input_state.shape
input_state = input_state.reshape(B, T*C, H, W)
# encoder
skips = []
hidden_embed = input_state
for i in range(self.N_T):
hidden_embed = self.enc[i](hidden_embed)
if i < self.N_T - 1:
skips.append(hidden_embed)
# decoder
hidden_embed = self.dec[0](hidden_embed)
for i in range(1, self.N_T):
hidden_embed = self.dec[i](torch.cat([hidden_embed, skips[-i]], dim=1))
output_state = hidden_embed.reshape(B, T, C, H, W)
return output_state
class PastNetModel(nn.Module):
def __init__(self,
args,
shape_in,
hid_T=256,
N_T=8,
incep_ker=[3, 5, 7, 11],
groups=8,
res_units=64,
res_layers=2,
embedding_nums=512,
embedding_dim=64):
super(PastNetModel, self).__init__()
T, C, H, W = shape_in
self.DST_module = DST(in_channel=C,
res_units=res_units,
res_layers=res_layers,
embedding_dim=embedding_dim,
embedding_nums=embedding_nums)
self.FPG_module = FPG(img_size=64,
patch_size=16,
in_channels=1,
out_channels=1,
embed_dim=128,
input_frames=10,
depth=1,
mlp_ratio=2.,
uniform_drop=False,
drop_rate=0.,
drop_path_rate=0.,
norm_layer=None,
dropcls=0.)
if args.load_pred_train:
print_log("Load Pre-trained Model.")
self.vq_vae.load_state_dict(torch.load("./models/vqvae.ckpt"), strict=False)
if args.freeze_vqvae:
print_log(f"Params of VQVAE is freezed.")
for p in self.vq_vae.parameters():
p.requires_grad = False
self.DynamicPro = DynamicPropagation(T*64, hid_T, N_T, incep_ker, groups)
def forward(self, input_frames):
B, T, C, H, W = input_frames.shape
pde_features = self.FPG_module(input_frames)
input_features = input_frames.view([B * T, C, H, W])
encoder_embed = self.DST_module._encoder(input_features)
z = self.DST_module._pre_vq_conv(encoder_embed)
vq_loss, Latent_embed, _, _ = self.DST_module._vq_vae(z)
_, C_, H_, W_ = Latent_embed.shape
Latent_embed = Latent_embed.reshape(B, T, C_, H_, W_)
hidden_dim = self.DynamicPro(Latent_embed)
B_, T_, C_, H_, W_ = hidden_dim.shape
hid = hidden_dim.reshape([B_ * T_, C_, H_, W_])
predicti_feature = self.DST_module._decoder(hid)
predicti_feature = predicti_feature.reshape([B, T, C, H, W]) + pde_features
return predicti_feature
| 12,255 | 34.627907 | 123 | py |
PastNet | PastNet-main/models/Fourier.py | from modules.Fourier_modules import *
class FPG(nn.Module):
def __init__(self,
img_size=224,
patch_size=16,
in_channels=20,
out_channels=20,
input_frames=20,
embed_dim=768,
depth=12,
mlp_ratio=4.,
uniform_drop=False,
drop_rate=0.,
drop_path_rate=0.,
norm_layer=None,
dropcls=0.):
super(FPG, self).__init__()
self.embed_dim = embed_dim
self.num_frames = input_frames
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(img_size=img_size,
patch_size=patch_size,
in_c=in_channels,
embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) # [1, 196, 768]
self.pos_drop = nn.Dropout(p=drop_rate)
self.h = self.patch_embed.grid_size[0]
self.w = self.patch_embed.grid_size[1]
'''
stochastic depth decay rule
'''
if uniform_drop:
dpr = [drop_path_rate for _ in range(depth)]
else:
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.ModuleList([FourierNetBlock(
dim=embed_dim,
mlp_ratio=mlp_ratio,
drop=drop_rate,
drop_path=dpr[i],
act_layer=nn.GELU,
norm_layer=norm_layer,
h=self.h,
w=self.w)
for i in range(depth)
])
self.norm = norm_layer(embed_dim)
self.linearprojection = nn.Sequential(OrderedDict([
('transposeconv1', nn.ConvTranspose2d(embed_dim, out_channels * 16, kernel_size=(2, 2), stride=(2, 2))),
('act1', nn.Tanh()),
('transposeconv2', nn.ConvTranspose2d(out_channels * 16, out_channels * 4, kernel_size=(2, 2), stride=(2, 2))),
('act2', nn.Tanh()),
('transposeconv3', nn.ConvTranspose2d(out_channels * 4, out_channels, kernel_size=(4, 4), stride=(4, 4)))
]))
if dropcls > 0:
print('dropout %.2f before classifier' % dropcls)
self.final_dropout = nn.Dropout(p=dropcls)
else:
self.final_dropout = nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward_features(self, x):
'''
patch_embed:
[B, T, C, H, W] -> [B*T, num_patches, embed_dim]
'''
B,T,C,H,W = x.shape
x = x.view(B*T, C, H, W)
x = self.patch_embed(x)
#enc = LearnableFourierPositionalEncoding(768, 768, 64, 768, 10)
#fourierpos_embed = enc(x)
x = self.pos_drop(x + self.pos_embed)
#x = self.pos_drop(x + fourierpos_embed)
if not get_fourcastnet_args().checkpoint_activations:
for blk in self.blocks:
x = blk(x)
else:
x = checkpoint_sequential(self.blocks, 4, x)
x = self.norm(x).transpose(1, 2)
x = torch.reshape(x, [-1, self.embed_dim, self.h, self.w])
return x
def forward(self, x):
B, T, C, H, W = x.shape
x = self.forward_features(x)
x = self.final_dropout(x)
x = self.linearprojection(x)
x = x.reshape(B, T, C, H, W)
return x
| 4,028 | 33.144068 | 123 | py |
maxent_base | maxent_base-master/cart_entropy_policy.py | import numpy as np
import time
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from torch.distributions import Normal
import gym
from gym import wrappers
import utils
# Get the initial zero-state for the env.
def init_state(env):
if env == "Pendulum-v0":
return [np.pi, 0]
elif env == "MountainCarContinuous-v0":
return [-0.50, 0]
class CartEntropyPolicy(nn.Module):
def __init__(self, env, gamma, lr, obs_dim, action_dim):
super(CartEntropyPolicy, self).__init__()
self.affine1 = nn.Linear(obs_dim, 128)
self.middle = nn.Linear(128, 128)
self.affine2 = nn.Linear(128, action_dim)
torch.nn.init.xavier_uniform_(self.affine1.weight)
torch.nn.init.xavier_uniform_(self.middle.weight)
torch.nn.init.xavier_uniform_(self.affine2.weight)
self.saved_log_probs = []
self.rewards = []
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.eps = np.finfo(np.float32).eps.item()
self.env = env
self.gamma = gamma
self.obs_dim = obs_dim
self.action_dim = action_dim
self.init_state = np.array(init_state(utils.args.env))
self.env.seed(int(time.time())) # seed environment
def init(self, init_policy):
print("init to policy")
self.load_state_dict(init_policy.state_dict())
def forward(self, x):
x = F.relu(self.affine1(x))
x = F.relu(self.middle(x))
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
def get_probs(self, state):
state = torch.from_numpy(state).float().unsqueeze(0)
probs = self.forward(state)
return probs
def select_action(self, state):
state = torch.from_numpy(state).float().unsqueeze(0)
probs = self.forward(state)
m = Categorical(probs)
action = m.sample()
self.saved_log_probs.append(m.log_prob(action))
if (action.item() == 1):
return [0]
elif (action.item() == 0):
return [-1]
return [1]
def update_policy(self):
R = 0
policy_loss = []
rewards = []
# Get discounted rewards from the episode.
for r in self.rewards[::-1]:
R = r + self.gamma * R
rewards.insert(0, R)
rewards = torch.tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + self.eps)
for log_prob, reward in zip(self.saved_log_probs, rewards):
policy_loss.append(-log_prob * reward.float())
self.optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum() # cost function?
policy_loss.backward()
self.optimizer.step()
self.rewards.clear()
self.saved_log_probs.clear()
return policy_loss
def get_initial_state(self):
if utils.args.env == "Pendulum-v0":
self.env.env.state = [np.pi, 0]
theta, thetadot = self.env.env.state
return np.array([np.cos(theta), np.sin(theta), thetadot])
elif utils.args.env == "MountainCarContinuous-v0":
self.env.env.state = [-0.50, 0]
return np.array(self.env.env.state)
def get_obs(self):
if utils.args.env == "Pendulum-v0":
theta, thetadot = self.env.env.state
return np.array([np.cos(theta), np.sin(theta), thetadot])
elif utils.args.env == "MountainCarContinuous-v0":
return np.array(self.env.env.state)
def learn_policy(self, reward_fn,
episodes=1000, train_steps=1000,
initial_state=[], start_steps=10000):
if len(initial_state) == 0:
initial_state = self.init_state
print("init: " + str(initial_state))
running_reward = 0
running_loss = 0
for i_episode in range(episodes):
if i_episode % 5 == 0:
self.env.env.reset_state = initial_state
self.env.reset()
state = self.get_obs()
ep_reward = 0
for t in range(train_steps): # Don't infinite loop while learning
action = self.select_action(state)
state, _, done, _ = self.env.step(action)
reward = reward_fn[tuple(utils.discretize_state(state))]
ep_reward += reward
self.rewards.append(reward)
if done:
# TODO: self.env.env.reset_state = initial_state ?
self.env.reset()
running_reward = running_reward * 0.99 + ep_reward * 0.01
if (i_episode == 0):
running_reward = ep_reward
loss = self.update_policy()
running_loss = running_loss * 0.99 + loss*.01
# Log to console.
if i_episode % 10 == 0:
print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}\tLoss: {:.2f}'.format(
i_episode, ep_reward, running_reward, running_loss))
def execute_internal(self, env, T, state, render):
print("Simulation starting at = " + str(state))
p = np.zeros(shape=(tuple(utils.num_states)))
for t in range(T):
action = self.select_action(state)[0]
state, reward, done, _ = env.step([action])
p[tuple(utils.discretize_state(state))] += 1
if render:
env.render()
if done:
break
env.close()
return p
def execute(self, T, initial_state=[], render=False, video_dir=''):
p = np.zeros(shape=(tuple(utils.num_states)))
if len(initial_state) == 0:
initial_state = self.env.reset() # get random starting location
print("initial_state= " + str(initial_state))
if render:
print("rendering env in execute()")
wrapped_env = wrappers.Monitor(self.env, video_dir)
wrapped_env.unwrapped.reset_state = initial_state
state = wrapped_env.reset()
state = self.get_obs()
# print(initial_state)
# print(state)
p = self.execute_internal(wrapped_env, T, state, render)
else:
self.env.env.reset_state = initial_state
state = self.env.reset()
state = self.get_obs()
print(state)
print(initial_state)
p = self.execute_internal(self.env, T, state, render)
return p/float(T)
def execute_random_internal(self, env, T, state, render):
p = np.zeros(shape=(tuple(utils.num_states)))
for t in range(T):
r = random.random()
action = -1
if (r < 1/3.):
action = 0
elif r < 2/3.:
action = 1
state, reward, done, _ = env.step([action])
p[tuple(utils.discretize_state(state))] += 1
if render:
env.render()
if done:
break
env.close()
return p
# TODO: render == True => record videos
def execute_random(self, T, initial_state=[], render=False, video_dir=''):
p = np.zeros(shape=(tuple(utils.num_states)))
if len(initial_state) == 0:
initial_state = self.env.reset() # get random starting location
initial_state = self.init_state
print("initial_state= " + str(initial_state))
if render:
print("rendering env in execute_random()")
wrapped_env = wrappers.Monitor(self.env, video_dir)
wrapped_env.unwrapped.reset_state = initial_state
state = wrapped_env.reset()
state = self.get_obs()
p = self.execute_random_internal(wrapped_env, T, state, render)
else:
self.env.env.reset_state = initial_state
state = self.env.reset()
state = self.get_obs()
print(state)
print(initial_state)
p = self.execute_random_internal(self.env, T, state, render)
return p/float(T)
def save(self, filename):
self.env.close()
torch.save(self, filename) | 8,320 | 32.019841 | 101 | py |
maxent_base | maxent_base-master/collect_baseline.py | # Collect entropy-based reward policies.
# Changed from using all-1 reward to init to one-hot at: 2018_11_30-10-00
# python collect_baseline.py --env="MountainCarContinuous-v0" --T=200 --train_steps=400 --episodes=300 --epochs=50 --exp_name=test
# USES LOCAL FORK OF GYM
import sys
import os
home_dir = os.getenv('HOME')
sys.path = [home_dir+'/gym-fork'] + sys.path
import time
from datetime import datetime
import logging
import numpy as np
import scipy.stats
from scipy.interpolate import interp2d
from scipy.interpolate import spline
from scipy.stats import norm
import gym
from cart_entropy_policy import CartEntropyPolicy
import utils
import curiosity
import plotting
import torch
from torch.distributions import Normal
import random
from itertools import islice
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def moving_averages(values, size):
for selection in window(values, size):
yield sum(selection) / size
args = utils.get_args()
Policy = CartEntropyPolicy
def grad_ent(pt):
if args.grad_ent:
grad_p = -np.log(pt)
grad_p[grad_p > 100] = 1000
return grad_p
eps = 1/np.sqrt(utils.total_state_space)
return 1/(pt + eps)
def online_rewards(average_p, average_ps, t):
eps = 1/np.sqrt(utils.total_state_space)
reward_fn = np.zeros(shape=average_p.shape)
for ap in average_ps:
reward_fn += 1/(ap + eps)
reward_fn += np.sqrt(t)*average_p
return reward_fn
# Get the initial zero-state for the env.
def init_state(env):
if env == "Pendulum-v0":
return [np.pi, 0]
elif env == "MountainCarContinuous-v0":
return [-0.50, 0]
# Main loop of maximum entropy program. Iteratively collect
# and learn T policies using policy gradients and a reward function
# based on entropy.
def collect_entropy_policies(env, epochs, T, MODEL_DIR):
video_dir = 'videos/' + args.exp_name
reward_fn = np.zeros(shape=(tuple(utils.num_states)))
online_reward_fn = np.zeros(shape=(tuple(utils.num_states)))
# set initial state to base, motionless state.
seed = []
if args.env == "Pendulum-v0":
env.env.state = [np.pi, 0]
seed = env.env._get_obs()
elif args.env == "MountainCarContinuous-v0":
env.env.state = [-0.50, 0]
seed = env.env.state
running_avg_p = np.zeros(shape=(tuple(utils.num_states)))
running_avg_ent = 0
running_avg_entropies = []
running_avg_ps = []
running_avg_p_online = np.zeros(shape=(tuple(utils.num_states)))
running_avg_ent_online = 0
running_avg_entropies_online = []
running_avg_ps_online = []
running_avg_p_baseline = np.zeros(shape=(tuple(utils.num_states)))
running_avg_ent_baseline = 0
running_avg_entropies_baseline = []
running_avg_ps_baseline = []
online_average_ps = []
policies = []
initial_state = init_state(args.env)
online_policies = []
online_initial_state = init_state(args.env)
for i in range(epochs):
# Learn policy that maximizes current reward function.
policy = Policy(env, args.gamma, args.lr, utils.obs_dim, utils.action_dim)
online_policy = Policy(env, args.gamma, args.lr, utils.obs_dim, utils.action_dim)
if i == 0:
policy.learn_policy(reward_fn,
episodes=0,
train_steps=0)
online_policy.learn_policy(online_reward_fn,
episodes=0,
train_steps=0)
else:
policy.learn_policy(reward_fn,
initial_state=initial_state,
episodes=args.episodes,
train_steps=args.train_steps)
online_policy.learn_policy(online_reward_fn,
initial_state=online_initial_state,
episodes=args.episodes,
train_steps=args.train_steps)
policies.append(policy)
online_policies.append(online_policy)
epoch = 'epoch_%02d/' % (i)
a = 10 # average over this many rounds
print("--- RANDOM ---")
p_baseline = policy.execute_random(T,
render=args.render, video_dir=video_dir+'/baseline/'+epoch)
round_entropy_baseline = scipy.stats.entropy(p_baseline.flatten())
for av in range(a - 1):
next_p_baseline = policy.execute_random(T)
p_baseline += next_p_baseline
round_entropy_baseline += scipy.stats.entropy(next_p_baseline.flatten())
p_baseline /= float(a)
round_entropy_baseline /= float(a) # running average of the entropy
# Execute the cumulative average policy thus far.
# Estimate distribution and entropy.
print("--- MAXENT ---")
average_p, round_avg_ent, initial_state = \
curiosity.execute_average_policy(env, policies, T,
initial_state=initial_state,
avg_runs=a,
render=False)
online_average_p, online_round_avg_ent, online_initial_state = \
curiosity.execute_average_policy(env, online_policies, T,
initial_state=online_initial_state,
avg_runs=a,
render=False)
# Get next distribution p by executing pi for T steps.
# ALSO: Collect video of each policy
p = policy.execute(T, initial_state=initial_state,
render=args.render, video_dir=video_dir+'/normal/'+epoch)
p_online = online_policy.execute(T, initial_state=initial_state,
render=args.render, video_dir=video_dir+'/online/'+epoch)
# Force first round to be equal
if i == 0:
average_p = p_baseline
round_avg_ent = round_entropy_baseline
online_average_p = p_baseline
online_round_avg_ent = round_entropy_baseline
# If in pendulum, set velocity to 0 with some probability
if args.env == "Pendulum-v0" and random.random() < 0.3:
initial_state[1] = 0
# goal: try online reward structure
online_reward_fn = online_rewards(online_average_p, online_average_ps, epochs)
online_average_ps.append(online_average_p)
reward_fn = grad_ent(average_p)
# Update experimental running averages.
running_avg_ent = running_avg_ent * (i)/float(i+1) + round_avg_ent/float(i+1)
running_avg_p = running_avg_p * (i)/float(i+1) + average_p/float(i+1)
running_avg_entropies.append(running_avg_ent)
running_avg_ps.append(running_avg_p)
# Update online running averages.
running_avg_ent_online = running_avg_ent_online * (i)/float(i+1) + online_round_avg_ent/float(i+1)
running_avg_p_online = running_avg_p_online * (i)/float(i+1) + online_average_p/float(i+1)
running_avg_entropies_online.append(running_avg_ent_online)
running_avg_ps_online.append(running_avg_p_online)
# Update baseline running averages.
running_avg_ent_baseline = running_avg_ent_baseline * (i)/float(i+1) + round_entropy_baseline/float(i+1)
running_avg_p_baseline = running_avg_p_baseline * (i)/float(i+1) + p_baseline/float(i+1)
running_avg_entropies_baseline.append(running_avg_ent_baseline)
running_avg_ps_baseline.append(running_avg_p_baseline)
print("--------------------------------")
print("p=")
print(p)
print("average_p =")
print(average_p)
print("online_average_p")
print(online_average_p)
print("---------------------")
print("round_avg_ent[%d] = %f" % (i, round_avg_ent))
print("running_avg_ent = %s" % running_avg_ent)
print("..........")
print("online_round_avg_ent[%d] = %f" % (i, online_round_avg_ent))
print("running_avg_ent_online = %s" % running_avg_ent_online)
print("..........")
print("round_entropy_baseline[%d] = %f" % (i, round_entropy_baseline))
print("running_avg_ent_baseline = %s" % running_avg_ent_baseline)
print("--------------------------------")
plotting.heatmap(running_avg_p, average_p, i)
plotting.running_average_entropy(running_avg_entropies, running_avg_entropies_baseline)
plotting.running_average_entropy3(running_avg_entropies, running_avg_entropies_baseline, running_avg_entropies_online)
indexes = [1,2,5,10]
plotting.heatmap4(running_avg_ps, running_avg_ps_baseline, indexes)
plotting.heatmap3x4(running_avg_ps, running_avg_ps_online, running_avg_ps_baseline, indexes)
return policies
def main():
# Suppress scientific notation.
np.set_printoptions(suppress=True, edgeitems=100)
# Make environment.
env = gym.make(args.env)
# TODO: limit acceleration (maybe also speed?) for Pendulum.
if args.env == "Pendulum-v0":
env.env.max_speed = 8
env.env.max_torque = 1
env.seed(int(time.time())) # seed environment
TIME = datetime.now().strftime('%Y_%m_%d-%H-%M')
MODEL_DIR = 'models-' + args.env + '/models_' + TIME + '/'
if args.save_models:
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
# save metadata from the run.
with open(MODEL_DIR + "metadata", "w") as metadata:
metadata.write("args: %s\n" % args)
metadata.write("num_states: %s\n" % str(utils.num_states))
metadata.write("state_bins: %s\n" % utils.state_bins)
plotting.FIG_DIR = 'figs/' + args.env + '/'
plotting.model_time = args.exp_name + '/'
if not os.path.exists(plotting.FIG_DIR+plotting.model_time):
os.makedirs(plotting.FIG_DIR+plotting.model_time)
policies = collect_entropy_policies(env, args.epochs, args.T, MODEL_DIR)
env.close()
print("DONE")
if __name__ == "__main__":
main()
| 10,116 | 33.294915 | 130 | py |
maxent_base | maxent_base-master/curiosity.py | # experimenting with curiosity exploration method.
# Code derived from: https://github.com/pytorch/examples/blob/master/reinforcement_learning/reinforce.py
# example command setting args in utils.py
# python curiosity.py --models_dir=models-MountainCarContinuous-v0/models_2018_11_28-17-45/ --env="MountainCarContinuous-v0"
# python curiosity.py --models_dir=models-Pendulum-v0/models_2018_11_29-09-48/ --env="Pendulum-v0"
import os
import sys
import time
import random
import numpy as np
import scipy.stats
import gym
from gym import wrappers
import torch
from torch.distributions import Categorical
import utils
args = utils.get_args()
def select_action(probs):
m = Categorical(probs)
action = m.sample()
if (action.item() == 1):
return [0]
elif (action.item() == 0):
return [-1]
return [1]
def get_obs(state):
if utils.args.env == "Pendulum-v0":
theta, thetadot = state
return np.array([np.cos(theta), np.sin(theta), thetadot])
elif utils.args.env == "MountainCarContinuous-v0":
return np.array(state)
# unroll for T steps and compute p
def execute_policy_internal(env, T, policies, state, render):
random_T = np.floor(random.random()*T)
p = np.zeros(shape=(tuple(utils.num_states)))
random_initial_state = []
for t in range(T):
# Compute average probability over action space for state.
probs = torch.tensor(np.zeros(shape=(1,utils.action_dim))).float()
var = torch.tensor(np.zeros(shape=(1,utils.action_dim))).float()
for policy in policies:
prob = policy.get_probs(state)
probs += prob
probs /= len(policies)
action = select_action(probs)
state, reward, done, _ = env.step(action)
p[tuple(utils.discretize_state(state))] += 1
if (t == random_T and not render):
random_initial_state = env.env.state
if render:
env.render()
if done:
break
p /= float(T)
return p, random_initial_state
# run a simulation to see how the average policy behaves.
def execute_average_policy(env, policies, T, initial_state=[], avg_runs=1, render=False):
average_p = np.zeros(shape=(tuple(utils.num_states)))
avg_entropy = 0
random_initial_state = []
last_run = avg_runs - 1
for i in range(avg_runs):
if len(initial_state) == 0:
initial_state = env.reset()
env.env.reset_state = initial_state
state = env.reset()
p, random_initial_state = execute_policy_internal(env, T, policies, state, False)
average_p += p
avg_entropy += scipy.stats.entropy(average_p.flatten())
env.close()
average_p /= float(avg_runs)
avg_entropy /= float(avg_runs) # running average of the entropy
entropy_of_final = scipy.stats.entropy(average_p.flatten())
return average_p, avg_entropy, random_initial_state
| 2,947 | 29.081633 | 125 | py |
AgML | AgML-main/scripts/convert_lightning_pytorch_ckpt.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts PyTorch Lightning checkpoints to `nn.Module` state dicts."""
import os
import shutil
import argparse
from fnmatch import fnmatch
from collections import OrderedDict
import torch
def convert_state_dict(fpath):
# load the file contents.
contents = torch.load(fpath)
# If the contents of the file are an `OrderedDict`, then
# we don't need to extract the `state_dict`. However, the
# nested key hierarchy might be different, so we check that.
if isinstance(contents, OrderedDict):
keys: list[str] = list(contents.keys())
if len(keys) == 0:
print('No state dict found.')
return
if keys[0].startswith('net'):
out_dict = OrderedDict()
for key in contents.keys():
value = contents[key]
out_dict[key.replace('net.', '')] = value
else:
return
# Otherwise, get the model state dict from the contents
# and re-save the file using the same name, just with only
# the state dict and no PyTorch Lightning values.
else:
state_dict: OrderedDict = contents.get('state_dict', None)
if state_dict is None:
print(f"No state dict found in file {fpath}.")
return
# Parse the state dict and drop the first level from the keys.
out_dict = OrderedDict()
for key in state_dict.keys():
value = state_dict[key]
out_dict[key.replace('net.', '')] = value
# Save the state dict.
temp_path = os.path.join(os.path.dirname(fpath), 'temp_state_dict.ckpt')
shutil.copy(fpath, temp_path) # save a copy in case an issue occurs
os.remove(fpath)
torch.save(out_dict, fpath.replace('.ckpt', '.pth'))
os.remove(temp_path)
print("Conversion Successful.")
# Parse input arguments (get the directory to search).
ap = argparse.ArgumentParser()
ap.add_argument('--search_dir', type = str, required = True,
help = 'The directory containing all of the checkpoints that you want'
'to convert. This will search for all nested folders and files '
'in the provided directory.')
search_dir = ap.parse_args().search_dir
# Search through and convert all of the files.
for path, subdirs, files in os.walk(os.path.abspath(os.path.normpath(search_dir))):
for name in files:
if fnmatch(name, '*.ckpt') or fnmatch(name, '*.pth'):
print(f"Converting checkpoint at '{os.path.join(path, name)}'... ", end = '')
convert_state_dict(os.path.join(path, name))
| 3,201 | 34.186813 | 89 | py |
AgML | AgML-main/experiments/benchmarking/detection_learning.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from mean_average_precision_torch import MeanAveragePrecision as MAP
import albumentations as A
from albumentations.pytorch import ToTensorV2
from effdet import get_efficientdet_config, DetBenchTrain, create_model_from_config
from effdet.efficientdet import HeadNet
from ensemble_boxes import ensemble_boxes_wbf
from tools import auto_move_data
# Constants
IMAGE_SIZE = 512
def get_transforms(mode = 'inference'):
"""Returns a set of transforms corresponding to the mode."""
if mode == 'train':
return A.Compose(
[A.HorizontalFlip(p = 0.5),
A.Resize(height = IMAGE_SIZE, width = IMAGE_SIZE, p = 1),
ToTensorV2(p = 1)], p = 1.0,
bbox_params = A.BboxParams(
format = "pascal_voc", min_area = 0,
min_visibility = 0, label_fields = ["labels"]))
elif mode in ['val', 'validation']:
return A.Compose(
[A.Resize(height = IMAGE_SIZE, width = IMAGE_SIZE, p = 1),
ToTensorV2(p = 1)], p = 1.0,
bbox_params = A.BboxParams(
format = "pascal_voc", min_area = 0,
min_visibility = 0, label_fields = ["labels"]))
elif mode == 'inference':
return A.Compose(
[A.Resize(height = IMAGE_SIZE, width = IMAGE_SIZE, p = 1),
ToTensorV2(p = 1)], p = 1.0)
class AgMLDatasetAdaptor(object):
"""Adapts an AgML dataset for use in a `LightningDataModule`."""
def __init__(self, loader, adapt_class = False):
self.loader = loader
self.adapt_class = adapt_class
def __len__(self) -> int:
return len(self.loader)
def get_image_and_labels_by_idx(self, index):
image, annotation = self.loader[index]
image = Image.fromarray(image)
bboxes = np.array(annotation['bbox']).astype(np.int32)
x_min = bboxes[:, 0]
y_min = bboxes[:, 1]
x_max = bboxes[:, 2] + x_min
y_max = bboxes[:, 3] + y_min
x_min, y_min = np.clip(x_min, 0, image.width), np.clip(y_min, 0, image.height)
x_max, y_max = np.clip(x_max, 0, image.width), np.clip(y_max, 0, image.height)
bboxes = np.dstack((x_min, y_min, x_max, y_max)).squeeze(axis = 0)
class_labels = np.array(annotation['category_id']).squeeze()
if self.adapt_class:
class_labels = np.ones_like(class_labels)
return image, bboxes, class_labels, index
class EfficientDetDataset(Dataset):
def __init__(self, adaptor, transforms = None):
self.ds = adaptor
if transforms is None:
transforms = get_transforms('val')
self.transforms = transforms
def __len__(self):
return len(self.ds)
def __getitem__(self, index):
image, pascal_bboxes, class_labels, image_id = \
self.ds.get_image_and_labels_by_idx(index)
# Add a label dimension for consistency.
if class_labels.ndim == 0:
class_labels = np.expand_dims(class_labels, axis = 0)
# Construct the sample.
sample = {
"image": np.array(image, dtype = np.float32),
"bboxes": pascal_bboxes, "labels": class_labels}
try:
sample = self.transforms(**sample)
except: # debugging
raise Exception(f"Failed sample: {sample}")
sample["bboxes"] = np.array(sample["bboxes"])
image = sample["image"]
labels = sample["labels"]
# Convert 1-channel and 4-channel to 3-channel.
if image.shape[0] == 1:
image = torch.tile(image, (3, 1, 1))
if image.shape[0] == 4:
image = image[:3]
# Convert to yxyx from xyxy.
_, new_h, new_w = image.shape
sample["bboxes"][:, [0, 1, 2, 3]] = \
sample["bboxes"][:, [1, 0, 3, 2]]
# Create the target from the annotations.
target = {
"bboxes": torch.as_tensor(sample["bboxes"], dtype = torch.float32),
"labels": torch.as_tensor(labels), "image_id": torch.tensor([image_id]),
"img_size": (new_h, new_w), "img_scale": torch.tensor([1.0])}
return image, target, image_id
class EfficientDetDataModule(pl.LightningDataModule):
"""A `LightningDataModule` for the `LightningModule`."""
def __init__(self,
train_dataset_adaptor,
validation_dataset_adaptor,
test_dataset_adaptor = None,
train_transforms = None,
val_transforms = None,
num_workers = 4,
batch_size = 8):
self.train_ds = train_dataset_adaptor
self.valid_ds = validation_dataset_adaptor
if test_dataset_adaptor is not None:
self.test_ds = test_dataset_adaptor
if train_transforms is None:
train_transforms = get_transforms('train')
self.train_tfms = train_transforms
if val_transforms is None:
val_transforms = get_transforms('val')
self.val_tfms = val_transforms
self.num_workers = num_workers
self.batch_size = batch_size
super().__init__()
def train_dataset(self) -> EfficientDetDataset:
return EfficientDetDataset(
adaptor = self.train_ds,
transforms = self.train_tfms)
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset(),
batch_size = self.batch_size,
shuffle = True,
pin_memory = True,
drop_last = True,
num_workers = self.num_workers,
collate_fn = self.collate_fn,
)
def val_dataset(self) -> EfficientDetDataset:
return EfficientDetDataset(
adaptor = self.valid_ds,
transforms = self.val_tfms)
def val_dataloader(self) -> DataLoader:
return DataLoader(
self.val_dataset(),
batch_size = self.batch_size,
shuffle = False,
pin_memory = True,
drop_last = True,
num_workers = self.num_workers,
collate_fn = self.collate_fn,
)
def test_dataset(self) -> EfficientDetDataset:
return EfficientDetDataset(
adaptor = self.test_ds,
transforms = self.val_tfms)
def test_dataloader(self) -> DataLoader:
return DataLoader(
self.test_dataset(),
batch_size = self.batch_size,
pin_memory = True,
drop_last = True,
num_workers = self.num_workers,
collate_fn = self.collate_fn,
)
@staticmethod
def collate_fn(batch):
images, targets, image_ids = tuple(zip(*batch))
images = torch.stack(images)
images = images.float()
boxes = [target["bboxes"].float() for target in targets]
labels = [target["labels"].float() for target in targets]
img_size = torch.tensor([target["img_size"] for target in targets]).float()
img_scale = torch.tensor([target["img_scale"] for target in targets]).float()
annotations = {
"bbox": boxes, "cls": labels,
"img_size": img_size, "img_scale": img_scale}
return images, annotations, targets, image_ids
class EfficientDetModel(pl.LightningModule):
def __init__(self,
num_classes = 1,
confidence_threshold = 0.3,
learning_rate = 0.0002,
wbf_iou_threshold = 0.44,
inference_transforms = None,
architecture = 'efficientdet_d4',
pretrained = False,
pretrained_path = None,
validation_dataset_adaptor = None,
test_dataset_adaptor = None):
super().__init__()
if pretrained_path is not None:
self.model = create_model_from_pretrained(
num_classes, architecture = architecture,
pretrained_path = pretrained_path)
else:
self.model = create_model(
num_classes, architecture = architecture,
pretrained = pretrained)
self.confidence_threshold = confidence_threshold
self.lr = learning_rate
self.wbf_iou_threshold = wbf_iou_threshold
if inference_transforms is None:
inference_transforms = get_transforms('inference')
self.inference_tfms = inference_transforms
# Construct the metric.
self.val_dataset_adaptor = None
if validation_dataset_adaptor is not None:
# Add a metric calculator.
self.val_dataset_adaptor = AgMLDatasetAdaptor(
validation_dataset_adaptor)
self.map = MAP()
self.test_dataset_adaptor = AgMLDatasetAdaptor(test_dataset_adaptor)
self.test_map = MAP()
self._sanity_check_passed = False
@auto_move_data
def forward(self, images, targets):
return self.model(images, targets)
def configure_optimizers(self):
opt = torch.optim.AdamW(self.model.parameters(), lr = self.lr)
return opt
def training_step(self, batch, batch_idx):
# Run a forward pass through the model.
images, annotations, _, _ = batch
losses = self.model(images, annotations)
# Calculate and log losses.
self.log("train_loss", losses["loss"], on_step = True,
on_epoch = True, prog_bar = True, logger = True)
self.log("train_class_loss", losses["class_loss"],
on_step = True, on_epoch = True, logger = True)
self.log("train_box_loss", losses["box_loss"], on_step = True,
on_epoch = True, logger = True)
return losses['loss']
@torch.no_grad()
def validation_step(self, batch, batch_idx):
images, annotations, targets, image_ids = batch
outputs = self.model(images, annotations)
detections = outputs["detections"]
# Update the metric.
if self.val_dataset_adaptor is not None and self._sanity_check_passed:
for idx in image_ids:
image, truth_boxes, truth_cls, _ = \
self.val_dataset_adaptor.get_image_and_labels_by_idx(idx)
pred_box, pred_labels, pred_conf = self.predict([image])
if not isinstance(pred_labels[0], float):
pred_box, pred_labels, pred_conf = pred_box[0], pred_labels[0], pred_conf[0]
if truth_cls.ndim == 0:
truth_cls = np.expand_dims(truth_cls, 0)
metric_update_values = \
dict(boxes = torch.tensor(pred_box, dtype = torch.float32),
labels = torch.tensor(pred_labels, dtype = torch.int32),
scores = torch.tensor(pred_conf)), \
dict(boxes = torch.tensor(truth_boxes, dtype = torch.float32),
labels = torch.tensor(truth_cls, dtype = torch.int32))
self.map.update(*metric_update_values)
batch_predictions = {
"predictions": detections,
"targets": targets,
"image_ids": image_ids,
}
logging_losses = {
"class_loss": outputs["class_loss"].detach(),
"box_loss": outputs["box_loss"].detach(),
}
self.log("valid_loss", outputs["loss"], on_step = True, on_epoch = True,
prog_bar = True, logger = True, sync_dist = True)
self.log("valid_class_loss", logging_losses["class_loss"],
on_step = True, on_epoch = True,
logger = True, sync_dist = True)
self.log("valid_box_loss", logging_losses["box_loss"],
on_step = True, on_epoch = True,
logger = True, sync_dist = True)
return {'loss': outputs["loss"], 'batch_predictions': batch_predictions}
def test_step(self, batch, batch_idx):
images, annotations, targets, image_ids = batch
outputs = self.model(images, annotations)
detections = outputs["detections"]
# Calculate the mean average precision.
if self._sanity_check_passed:
for idx in image_ids:
image, truth_boxes, truth_cls, _ = \
self.test_dataset_adaptor.get_image_and_labels_by_idx(idx)
pred_box, pred_labels, pred_conf = self.predict([image])
if not isinstance(pred_labels[0], float):
pred_box, pred_labels, pred_conf = pred_box[0], pred_labels[0], pred_conf[0]
if truth_cls.ndim == 0:
truth_cls = np.expand_dims(truth_cls, 0)
metric_update_values = \
dict(boxes = torch.tensor(pred_box, dtype = torch.float32),
labels = torch.tensor(pred_labels, dtype = torch.int32),
scores = torch.tensor(pred_conf)), \
dict(boxes = torch.tensor(truth_boxes, dtype = torch.float32),
labels = torch.tensor(truth_cls, dtype = torch.int32))
self.map.update(*metric_update_values)
def predict(self, images: Union[torch.Tensor, List]):
"""Runs inference on a set of images.
Parameters
----------
images : {torch.Tensor, list}
Either a list of images (which can be numpy arrays, tensors, or
another type), or a torch.Tensor returned from a DataLoader.
Returns
-------
A tuple containing bounding boxes, confidence scores, and class labels.
"""
if isinstance(images, list):
image_sizes = [(image.size[1], image.size[0]) for image in images]
images_tensor = torch.stack([
self.inference_tfms(
image = np.array(image, dtype = np.float32),
)["image"] for image in images])
return self._run_inference(images_tensor, image_sizes)
elif isinstance(images, torch.Tensor):
image_tensor = images
if image_tensor.ndim == 3:
image_tensor = image_tensor.unsqueeze(0)
if image_tensor.shape[-1] != IMAGE_SIZE \
or image_tensor.shape[-2] != IMAGE_SIZE:
raise ValueError(
f"Input tensors must be of shape "
f"(N, 3, {IMAGE_SIZE}, {IMAGE_SIZE})")
num_images = image_tensor.shape[0]
image_sizes = [(IMAGE_SIZE, IMAGE_SIZE)] * num_images
return self._run_inference(image_tensor, image_sizes)
else:
raise TypeError(
"Expected either a list of images or a "
"torch.Tensor of images for `predict()`.")
def _run_inference(self, images_tensor, image_sizes):
dummy_targets = self._create_dummy_inference_targets(
images_tensor.shape[0], self.device, IMAGE_SIZE)
detections = self.model(
images_tensor.to(self.device), dummy_targets)["detections"]
predicted_bboxes, predicted_class_confidences, predicted_class_labels = \
self.post_process_detections(detections)
scaled_bboxes = self._rescale_bboxes(
predicted_bboxes = predicted_bboxes,
image_sizes = image_sizes)
return scaled_bboxes, predicted_class_labels, predicted_class_confidences
@staticmethod
def _create_dummy_inference_targets(num_images, device, size):
return {
"bbox": [
torch.tensor([[0.0, 0.0, 0.0, 0.0]], device = device)
for _ in range(num_images)
],
"cls": [torch.tensor([1.0], device = device) for _ in range(num_images)],
"img_size": torch.tensor(
[(size, size)] * num_images, device = device).float(),
"img_scale": torch.ones(num_images, device = device).float(),
}
def post_process_detections(self, detections):
predictions = [self._postprocess_single_prediction_detections(d) for d in detections]
predicted_bboxes, predicted_class_confidences, predicted_class_labels = self.run_wbf(
predictions, image_size = IMAGE_SIZE, iou_thr = self.wbf_iou_threshold)
return predicted_bboxes, predicted_class_confidences, predicted_class_labels
def _postprocess_single_prediction_detections(self, detections):
# Extract the bounding boxes, confidence scores,
# and class labels from the output detections.
boxes = detections.detach().cpu().numpy()[:, :4]
scores = detections.detach().cpu().numpy()[:, 4]
classes = detections.detach().cpu().numpy()[:, 5]
# Only return boxes which are above the confidence threshold.
valid_indexes = np.where(scores > self.confidence_threshold)[0]
boxes = boxes[valid_indexes]
scores = scores[valid_indexes]
classes = classes[valid_indexes]
return {"boxes": boxes, "scores": scores, "classes": classes}
@staticmethod
def _rescale_bboxes(predicted_bboxes, image_sizes):
scaled_bboxes = []
for bboxes, img_dims in zip(predicted_bboxes, image_sizes):
im_h, im_w = img_dims
if len(bboxes) > 0:
scaled_bboxes.append(
(np.array(bboxes) * [
im_w / IMAGE_SIZE, im_h / IMAGE_SIZE,
im_w / IMAGE_SIZE, im_h / IMAGE_SIZE
]).tolist())
else:
scaled_bboxes.append(bboxes)
return scaled_bboxes
@staticmethod
def run_wbf(predictions, image_size = 512, iou_thr = 0.44, skip_box_thr = 0.43, weights = None):
bboxes, confidences, class_labels = [], [], []
for prediction in predictions:
boxes = [(prediction["boxes"] / image_size).tolist()]
scores = [prediction["scores"].tolist()]
labels = [prediction["classes"].tolist()]
boxes, scores, labels = ensemble_boxes_wbf.weighted_boxes_fusion(
boxes, scores, labels, weights = weights,
iou_thr = iou_thr, skip_box_thr = skip_box_thr)
boxes = boxes * (image_size - 1)
bboxes.append(boxes.tolist())
confidences.append(scores.tolist())
class_labels.append(labels.tolist())
return bboxes, confidences, class_labels
def on_test_epoch_end(self):
map = self.test_map.compute().detach().cpu().numpy().item()
self.log("test-map", map, prog_bar = True,
on_epoch = True,
logger = True, sync_dist = True)
self.test_map.reset()
def on_validation_epoch_end(self) -> None:
if not self._sanity_check_passed:
self._sanity_check_passed = True
return
if hasattr(self, 'metric_logger'):
self.metric_logger.compile_epoch()
if hasattr(self, 'map'):
map = self.map.compute().detach().cpu().numpy().item()
self.log("map", map, prog_bar = True,
on_epoch = True,
logger = True, sync_dist = True)
self.map.reset()
def on_fit_end(self) -> None:
if hasattr(self, 'metric_logger'):
self.metric_logger.save()
if hasattr(self, 'map'):
self.map.reset()
def get_progress_bar_dict(self):
p_bar = super(EfficientDetModel, self).get_progress_bar_dict()
p_bar.pop('v_num', None)
return p_bar
def create_model(num_classes = 1, architecture = "tf_efficientdet_d4", pretrained = (False, False)):
if isinstance(pretrained, bool):
if pretrained is False:
pretrained = True
else:
if not pretrained[0]:
pretrained = True
if isinstance(pretrained, str):
return create_model_from_pretrained(num_classes, architecture, pretrained)
config = get_efficientdet_config(architecture)
config.update({'image_size': (IMAGE_SIZE, IMAGE_SIZE)})
net = create_model_from_config(config, pretrained = pretrained, num_classes = num_classes)
net.class_net = HeadNet(
config,
num_outputs = num_classes,
)
return DetBenchTrain(net, config)
# Modification of the above to load pretrained weights from a path.
def create_model_from_pretrained(
num_classes = 1,
architecture = "tf_efficientdet_d4",
pretrained_path = None):
config = get_efficientdet_config(architecture)
config.update({'image_size': (IMAGE_SIZE, IMAGE_SIZE)})
net = create_model_from_config(
config, num_classes = pretrained_path[1], pretrained = False)
net.load_state_dict(
torch.load(pretrained_path[0], map_location = 'cpu'))
if net.config.num_classes != num_classes:
net.reset_head(num_classes = num_classes)
return DetBenchTrain(net, config)
| 21,691 | 38.44 | 100 | py |
AgML | AgML-main/experiments/benchmarking/classification.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from enum import Enum
import argparse
import torch
import torch.nn as nn
from torchvision.models import efficientnet_b4
import numpy as np
from tqdm import tqdm
import albumentations as A
import agml
from agml.utils.io import recursive_dirname
class EfficientNetB4Transfer(nn.Module):
"""Represents a transfer learning EfficientNetB4 model.
This is the base benchmarking model for image classification, using
the EfficientNetB4 model with two added linear fully-connected layers.
"""
def __init__(self, num_classes, pretrained = True):
super(EfficientNetB4Transfer, self).__init__()
self.base = efficientnet_b4(pretrained = pretrained)
self.l1 = nn.Linear(1000, 256)
self.dropout = nn.Dropout(0.1)
self.relu = nn.ReLU()
self.l2 = nn.Linear(256, num_classes)
def forward(self, x, **kwargs): # noqa
x = self.base(x)
x = x.view(x.size(0), -1)
x = self.dropout(self.relu(self.l1(x)))
x = self.l2(x)
return x
def build_loaders(name):
"""This method builds the `AgMLDataLoader`s used in training.
The data is split into train, validation, and test sets of the
following respective percentages: 80/10/10. Images are resized
to the imagenet default (224, 224), and also normalized using
the imagenet standard. Labels vectors are converted to one-hot.
"""
loader = agml.data.AgMLDataLoader(name)
loader.split(train = 0.8, val = 0.1, test = 0.1)
loader.batch(batch_size = 16)
loader.resize_images('imagenet')
loader.normalize_images('imagenet')
loader.labels_to_one_hot()
train_data = loader.train_data
train_data.transform(transform = A.RandomRotate90())
train_ds = train_data.copy().as_torch_dataset()
val_ds = loader.val_data.as_torch_dataset()
test_ds = loader.test_data.as_torch_dataset()
return train_ds, val_ds, test_ds
# Create the training loop.
class Trainer(object):
"""Trains a model and saves checkpoints to a save directory."""
def __init__(self, checkpoint_dir = None):
# Build the checkpoint directory.
if checkpoint_dir is None:
checkpoint_dir = os.path.join(
recursive_dirname(__file__, 4), 'checkpoints')
self._checkpoint_dir = checkpoint_dir
self._saved_checkpoints = dict()
def fit(self,
model,
train_ds,
val_ds,
epochs = 50,
log = False,
**kwargs):
"""Trains the model on the provided data loaders."""
# Set up the checkpoint tracking.
save_all = kwargs.pop('save_all', False)
self._checkpoint_dir = os.path.join(
self._checkpoint_dir, kwargs['dataset'])
os.makedirs(self._checkpoint_dir, exist_ok = True)
if log:
log_file = os.path.join(self._checkpoint_dir, 'log.txt')
open(log_file, 'w').close()
# Determine if a GPU exists.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
# Create the optimizer and loss.
optimizer = torch.optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss()
# Initialize training state variables.
print(f"Training EfficientNetB4 on '{kwargs['dataset']}': "
f"{epochs} epochs, writing checkpoints to {self._checkpoint_dir}.")
model.train()
# Start the training loop.
for epoch in range(epochs):
# Create epoch-state variables.
train_loss, val_loss, acc, val_acc = [], [], [], []
# Iterate through the training data loader.
model.train()
for (images, labels) in tqdm(
train_ds, desc = f"Epoch {epoch + 1}/{epochs}", file = sys.stdout):
# Move the data to the correct device.
images = images.to(device)
labels = labels.to(device)
# Train the model.
optimizer.zero_grad()
with torch.set_grad_enabled(True):
out = model(images)
loss = criterion(out, labels.float())
train_loss.append(loss.item())
# Backprop and update weights.
loss.backward()
optimizer.step()
# Compute accuracy.
label_logits = torch.argmax(labels, 1)
acc.append(accuracy(out, label_logits))
# Iterate through the validation data loader.
model.eval()
for (images, labels) in tqdm(val_ds, desc = "Validating"):
# Move the data to the correct device.
images = images.to(device)
labels = labels.to(device)
# Calculate the validation metrics.
with torch.no_grad():
out = model(images)
loss = criterion(out, labels.float())
val_loss.append(loss)
# Compute accuracy.
label_logits = torch.argmax(labels, 1)
val_acc.append(accuracy(out, label_logits))
# Print out metrics.
final_loss = train_loss[-1]
train_loss = torch.mean(torch.tensor(train_loss)).item()
final_val_loss = val_loss[-1]
final_acc = (sum(acc) / len(acc)).item()
final_val_acc = (sum(val_acc) / len(val_acc)).item()
print(f"\nAverage Loss: {train_loss:.4f}, "
f"Average Accuracy: {final_acc:.4f}, ",
f"Epoch Loss: {final_loss:.4f}, "
f"Validation Accuracy: {final_val_acc:.4f}, "
f"Validation Loss: {final_val_loss:.4f}")
# Save info to log file.
if log:
with open(log_file, 'a') as f: # noqa
f.write(f"Epoch {epoch}, "
f"Average Loss: {train_loss.items():.4f}, "
f"Epoch Loss: {final_loss:.4f}, "
f"Validation Loss: {final_val_loss:.4f}\n")
# Save the checkpoint.
save_path = os.path.join(
self._checkpoint_dir,
f"epoch_{epoch}_loss_{final_val_loss:.3f}.pth")
torch.save(model.state_dict(), save_path)
if not save_all:
for path, loss in self._saved_checkpoints.items():
if loss > final_val_loss:
if os.path.exists(path):
os.remove(path)
self._saved_checkpoints[save_path] = final_val_loss
def accuracy(output, target):
"""Computes the accuracy between `output` and `target`."""
with torch.no_grad():
batch_size = target.size(0)
_, pred = torch.topk(output, 1, 1)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:1].reshape(-1).float().sum(0, keepdim = True)
return correct_k.mul_(100.0 / batch_size)
def execute():
# Parse command line arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, help = "The name of the dataset.")
ap.add_argument(
'--save_all', action = 'store_true', default = False, help = 'Save all checkpoints.')
ap.add_argument(
'--checkpoint_dir', type = str, default = '/data2/amnjoshi/checkpoints',
help = "The checkpoint directory to save to.")
args = ap.parse_args()
# Execute the program.
train, val, test = build_loaders(args.dataset)
net = EfficientNetB4Transfer(agml.data.source(args.dataset).num_classes)
Trainer(checkpoint_dir = args.checkpoint_dir).fit(
net, train_ds = train, val_ds = val,
dataset = args.dataset, save_all = args.save_all
)
if __name__ == '__main__':
execute()
| 8,586 | 36.497817 | 93 | py |
AgML | AgML-main/experiments/benchmarking/segmentation_lightning.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import LearningRateMonitor
from torchmetrics import IoU
from torchvision.models.segmentation import deeplabv3_resnet50
import agml
import wandb
import albumentations as A
from tools import gpus, checkpoint_dir, MetricLogger
class DeepLabV3Transfer(nn.Module):
"""Represents a transfer learning DeepLabV3 model.
This is the base benchmarking model for semantic segmentation,
using the DeepLabV3 model with a ResNet50 backbone.
"""
def __init__(self, num_classes, pretrained = True, freeze_backbone = True):
super(DeepLabV3Transfer, self).__init__()
self.base = deeplabv3_resnet50(
pretrained = pretrained,
num_classes = num_classes
)
if freeze_backbone:
for parameter in self.base.backbone.parameters():
parameter.requires_grad = False
def forward(self, x, **kwargs): # noqa
return self.base(x)['out']
def dice_loss(y_pred, y):
y = y.float()
try: # Multi-class segmentation
c, h, w = y.shape[1:]
except: # Binary segmentation
h, w = y.shape[1:]; c = 1 # noqa
y_pred = torch.sigmoid(y_pred)
pred_flat = torch.reshape(y_pred, [-1, c * h * w])
y_flat = torch.reshape(y, [-1, c * h * w])
intersection = 2.0 * torch.sum(pred_flat * y_flat, dim = 1) + 1e-6
denominator = torch.sum(pred_flat, dim = 1) + torch.sum(y_flat, dim = 1) + 1e-6
return 1. - torch.mean(intersection / denominator)
def dice_metric(y_pred, y):
intersection = 2.0 * (y_pred * y).sum()
union = y_pred.sum() + y.sum()
if union == 0.0:
return 1.0
return intersection / union
class SegmentationBenchmark(pl.LightningModule):
"""Represents an image classification benchmark model."""
def __init__(self, dataset, pretrained = False,
save_dir = None, freeze_backbone = False):
# Initialize the module.
super(SegmentationBenchmark, self).__init__()
# Construct the network.
self._source = agml.data.source(dataset)
self._pretrained = pretrained
self.net = DeepLabV3Transfer(
self._source.num_classes,
self._pretrained,
freeze_backbone
)
# Construct the loss for training.
if self._source.num_classes == 1:
self.loss = nn.BCEWithLogitsLoss()
else:
self.loss = dice_loss
self.num_classes = self._source.num_classes
# Construct the IoU metric.
self.iou = IoU(self._source.num_classes + 1)
# Add a metric calculator.
if save_dir is not None:
self.metric_logger = SegmentationMetricLogger({
'iou': IoU(self._source.num_classes + 1)},
os.path.join(save_dir, f'logs-{self._version}.csv'))
self._sanity_check_passed = False
def forward(self, x):
return self.net.forward(x)
def calculate_loss(self, y_pred, y):
if self._source.num_classes != 1:
return self.loss(y_pred, y.long())
return self.loss(y_pred, y.float())
def training_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x).float().squeeze()
loss = self.calculate_loss(y_pred, y)
iou = self.iou(y_pred, y.int())
self.log('loss', loss.item(), prog_bar = True, logger = True, on_step = True, on_epoch = True)
self.log('iou', iou.item(), prog_bar = True, logger = True, on_step = True, on_epoch = True)
return {
'loss': loss,
}
def validation_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x).float().squeeze()
val_loss = self.calculate_loss(y_pred, y)
self.log('val_loss', val_loss.item(), prog_bar = True, logger = True, on_step = True, on_epoch = True)
val_iou = self.iou(y_pred, y.int())
if self._sanity_check_passed and hasattr(self, 'metric_logger'):
self.metric_logger.update_metrics(y_pred, y.int())
self.log('val_iou', val_iou.item(), prog_bar = True, logger = True, on_step = True, on_epoch = True)
return {
'val_loss': val_loss,
'image_sample': x[0].cpu().detach(),
'segmentation_sample': y_pred[0].cpu().detach()
}
# def validation_epoch_end(self, outputs):
# image_sample = outputs[0]['image_sample']
# segmentation_sample = outputs[0]['segmentation_sample']
# out = torch.sigmoid(segmentation_sample)
# print(out)
# if self.num_classes == 1:
# out[out >= 0.2] = 1
# out[out != 1] = 0
# else:
# out = torch.argmax(out, 1)
# result = agml.viz.overlay_segmentation_masks(image_sample, out)
# wandb.log(wandb.Image(result), caption = 'Segmentation Result')
@torch.no_grad()
def predict(self, inp):
return torch.sigmoid(self(inp))
def configure_optimizers(self):
opt = torch.optim.AdamW(self.net.parameters(), lr = 0.0005)
return {
'optimizer': opt,
'lr_scheduler': torch.optim.lr_scheduler.StepLR(opt, step_size = 5, gamma = 0.75)
}
def test_step(self, batch, *args, **kwargs):
x, y = batch
y_pred = self(x).float().squeeze()
loss = self.calculate_loss(y_pred, y)
self.log('test_loss', loss.item(), logger = True)
test_iou = self.iou(y_pred, y.int())
self.log('test_iou', test_iou.item(), logger = True)
def get_progress_bar_dict(self):
tqdm_dict = super(SegmentationBenchmark, self).get_progress_bar_dict()
tqdm_dict.pop('v_num', None)
return tqdm_dict
def on_validation_epoch_end(self) -> None:
if not self._sanity_check_passed:
self._sanity_check_passed = True
return
if hasattr(self, 'metric_logger'):
self.metric_logger.compile_epoch()
def on_fit_end(self) -> None:
if hasattr(self, 'metric_logger'):
self.metric_logger.save()
# Calculate and log the metrics.
class SegmentationMetricLogger(MetricLogger):
def update_metrics(self, y_pred, y_true) -> None:
for metric in self.metrics.values():
metric.update(y_pred.cpu(), y_true.cpu())
# Build the data loaders.
def build_loaders(name):
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(name)
loader.split(train = 0.8, val = 0.1, test = 0.1)
loader.batch(batch_size = 8)
loader.resize_images((512, 512))
loader.normalize_images('imagenet')
loader.mask_to_channel_basis()
train_data = loader.train_data
train_data.transform(transform = A.RandomRotate90())
train_ds = train_data.copy().as_torch_dataset()
train_loader = train_ds.export_torch(num_workers = 12, collate_fn = None)
val_ds = loader.val_data.as_torch_dataset()
val_ds.shuffle_data = False
val_loader = val_ds.export_torch(num_workers = 12, collate_fn = None)
test_ds = loader.test_data.as_torch_dataset()
test_ds.batch(batch_size = 2)
test_ds.eval()
return train_loader, val_loader, test_ds
def train(dataset, pretrained, epochs, save_dir = None,
freeze_backbone = False, overwrite = None):
"""Constructs the training loop and trains a model."""
save_dir = os.path.dirname(checkpoint_dir(save_dir, dataset))
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Construct the model.
model = SegmentationBenchmark(
dataset = dataset, pretrained = pretrained,
save_dir = save_dir, freeze_backbone = freeze_backbone)
# Construct the data loaders.
train_ds, val_ds, test_ds = build_loaders(dataset)
# Create the loggers.
loggers = [
WandbLogger(project = 'segmentation-benchmarking',
name = dataset, save_dir = save_dir)
]
# Create the trainer and train the model.
msg = f"Training dataset {dataset}!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = epochs, gpus = gpus(),
logger = loggers, log_every_n_steps = 2,
callbacks = LearningRateMonitor('epoch'))
trainer.fit(
model = model,
train_dataloaders = train_ds,
val_dataloaders = val_ds)
# Save the final state.
torch.save(model.state_dict(), os.path.join(save_dir, 'final_model.pth'))
# Run on the test set.
trainer.test(dataloaders = test_ds)
if __name__ == '__main__':
# Parse input arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--regenerate-existing', action = 'store_true',
default = False, help = "Whether to re-generate existing benchmarks.")
ap.add_argument(
'--pretrained', action = 'store_true',
default = False, help = "Whether to load a pretrained model.")
ap.add_argument(
'--checkpoint_dir', type = str, default = None,
help = "The checkpoint directory to save to.")
ap.add_argument(
'--epochs', type = int, default = 20,
help = "How many epochs to train for. Default is 20.")
ap.add_argument(
'--freeze-backbone', action = 'store_true',
default = False, help = "Whether to freeze backbone weights.")
args = ap.parse_args()
# Train the model.
if args.dataset[0] in agml.data.public_data_sources(ml_task = 'semantic_segmentation'):
train(args.dataset[0],
args.pretrained,
epochs = args.epochs,
save_dir = args.checkpoint_dir,
freeze_backbone = args.freeze_backbone)
else:
if args.dataset[0] == 'all':
datasets = [ds for ds in agml.data.public_data_sources(
ml_task = 'semantic_segmentation')]
else:
datasets = args.dataset
for ds in datasets:
train(ds,
args.pretrained,
epochs = args.epochs,
save_dir = args.checkpoint_dir,
freeze_backbone = args.freeze_backbone,
overwrite = args.regenerate_existing)
| 11,264 | 34.424528 | 110 | py |
AgML | AgML-main/experiments/benchmarking/classification_lightning.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
from torchmetrics.classification import Precision, Recall, Accuracy
from torchvision.models import efficientnet_b4
import agml
import albumentations as A
from tools import gpus, checkpoint_dir, MetricLogger
class EfficientNetB4Transfer(nn.Module):
"""Represents a transfer learning EfficientNetB4 model.
This is the base benchmarking model for image classification, using
the EfficientNetB4 model with two added linear fully-connected layers.
"""
def __init__(self, num_classes, pretrained = True):
super(EfficientNetB4Transfer, self).__init__()
self.base = efficientnet_b4(pretrained = pretrained)
self.l1 = nn.Linear(1000, 256)
self.dropout = nn.Dropout(0.1)
self.relu = nn.ReLU()
self.l2 = nn.Linear(256, num_classes)
def forward(self, x, **kwargs): # noqa
x = self.base(x)
x = x.view(x.size(0), -1)
x = self.dropout(self.relu(self.l1(x)))
x = self.l2(x)
return x
class ClassificationBenchmark(pl.LightningModule):
"""Represents an image classification benchmark model."""
def __init__(self, dataset, pretrained = False, save_dir = None):
# Initialize the module.
super(ClassificationBenchmark, self).__init__()
# Construct the network.
self._source = agml.data.source(dataset)
self._pretrained = pretrained
self.net = EfficientNetB4Transfer(
self._source.num_classes,
self._pretrained
)
# Construct the loss for training.
self.loss = nn.CrossEntropyLoss()
# Add a metric calculator.
if save_dir is not None:
self.metric_logger = ClassificationMetricLogger({
'accuracy': Accuracy(num_classes = self._source.num_classes),
'precision': Precision(num_classes = self._source.num_classes),
'recall': Recall(num_classes = self._source.num_classes)},
os.path.join(save_dir, f'logs-{self._version}.csv'))
self._sanity_check_passed = False
def forward(self, x):
return self.net.forward(x)
def training_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x)
loss = self.loss(y_pred, y)
acc = accuracy(y_pred, torch.argmax(y, 1)).item()
self.log('accuracy', acc, prog_bar = True, logger = True)
self.log('loss', loss, logger = True)
return {
'loss': loss,
'accuracy': acc
}
def validation_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x)
val_loss = self.loss(y_pred, y)
val_acc = accuracy(y_pred, torch.argmax(y, 1))
if self._sanity_check_passed and hasattr(self, 'metric_logger'):
self.metric_logger.update(y_pred, torch.argmax(y, 1))
self.log('val_loss', val_loss.item(), prog_bar = True, logger = True)
self.log('val_accuracy', val_acc.item(), prog_bar = True, logger = True)
return {
'val_loss': val_loss,
'val_accuracy': val_acc
}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters())
def get_progress_bar_dict(self):
tqdm_dict = super(ClassificationBenchmark, self)\
.get_progress_bar_dict()
tqdm_dict.pop('v_num', None)
return tqdm_dict
def on_validation_epoch_end(self) -> None:
if not self._sanity_check_passed:
self._sanity_check_passed = True
return
if hasattr(self, 'metric_logger'):
self.metric_logger.compile_epoch()
def on_fit_end(self) -> None:
if hasattr(self, 'metric_logger'):
self.metric_logger.save()
# Calculate and log the metrics.
class ClassificationMetricLogger(MetricLogger):
def update_metrics(self, y_pred, y_true) -> None:
for metric in self.metrics.values():
metric.update(y_pred.cpu(), y_true.cpu())
def accuracy(output, target):
"""Computes the accuracy between `output` and `target`."""
with torch.no_grad():
batch_size = target.size(0)
_, pred = torch.topk(output, 1, 1)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:1].reshape(-1).float().sum(0, keepdim = True)
return correct_k.mul_(100.0 / batch_size)
# Build the data loaders.
def build_loaders(name):
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(name)
loader.split(train = 0.8, val = 0.1, test = 0.1)
loader.batch(batch_size = 16)
loader.resize_images('imagenet')
loader.normalize_images('imagenet')
loader.labels_to_one_hot()
train_data = loader.train_data
train_data.transform(transform = A.RandomRotate90())
train_ds = train_data.copy().as_torch_dataset()
val_ds = loader.val_data.as_torch_dataset()
val_ds.shuffle_data = False
test_ds = loader.test_data.as_torch_dataset()
return train_ds, val_ds, test_ds
def train(dataset, pretrained, epochs, save_dir = None, overwrite = None):
"""Constructs the training loop and trains a model."""
save_dir = checkpoint_dir(save_dir, dataset)
log_dir = save_dir.replace('checkpoints', 'logs')
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Set up the checkpoint saving callback.
callbacks = [
pl.callbacks.ModelCheckpoint(
dirpath = save_dir, mode = 'min',
filename = f"{dataset}" + "-epoch{epoch:02d}-val_accuracy_{val_accuracy:.2f}",
monitor = 'val_accuracy',
save_top_k = 2,
auto_insert_metric_name = False
),
]
# Construct the model.
model = ClassificationBenchmark(
dataset = dataset, pretrained = pretrained, save_dir = save_dir)
# Construct the data loaders.
train_ds, val_ds, test_ds = build_loaders(dataset)
# Create the loggers.
loggers = [
CSVLogger(log_dir),
TensorBoardLogger(log_dir)
]
# Create the trainer and train the model.
msg = f"Training dataset {dataset}!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = epochs, gpus = gpus(None),
callbacks = callbacks, logger = loggers,
log_every_n_steps = 5)
trainer.fit(
model = model,
train_dataloaders = train_ds,
val_dataloaders = val_ds,
)
# Save the final state.
torch.save(model.state_dict(), os.path.join(save_dir, 'final_model.pth'))
if __name__ == '__main__':
# Parse input arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--regenerate-existing', action = 'store_true',
default = False, help = "Whether to re-generate existing benchmarks.")
ap.add_argument(
'--not-pretrained', action = 'store_false',
default = True, help = "Whether to load a pretrained model.")
ap.add_argument(
'--checkpoint_dir', type = str, default = None,
help = "The checkpoint directory to save to.")
ap.add_argument(
'--epochs', type = int, default = 20,
help = "How many epochs to train for. Default is 20.")
args = ap.parse_args()
# Train the model.
if args.dataset[0] in agml.data.public_data_sources(ml_task = 'image_classification'):
train(args.dataset,
args.not_pretrained,
epochs = args.epochs,
save_dir = args.checkpoint_dir)
else:
if args.dataset[0] == 'all':
datasets = [ds for ds in agml.data.public_data_sources(
ml_task = 'image_classification')]
else:
datasets = args.dataset
for dataset in datasets:
train(dataset,
args.not_pretrained,
epochs = args.epochs,
save_dir = args.checkpoint_dir,
overwrite = args.regenerate_existing)
| 9,116 | 33.665399 | 90 | py |
AgML | AgML-main/experiments/benchmarking/detection_lightning.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Some of the training code in this file is adapted from the following sources:
1. https://github.com/rwightman/efficientdet-pytorch
2. https://gist.github.com/Chris-hughes10/73628b1d8d6fc7d359b3dcbbbb8869d7
"""
import os
import argparse
import torch
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import LearningRateMonitor
import agml
from tools import gpus, checkpoint_dir
from detection_learning import (
AgMLDatasetAdaptor, EfficientDetDataModule, EfficientDetModel
)
def train(dataset, epochs, save_dir = None,
overwrite = None, pretrained_path = None):
"""Constructs the training loop and trains a model."""
save_dir = os.path.dirname(checkpoint_dir(save_dir, dataset))
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Set up the checkpoint saving callback.
callbacks = [
pl.callbacks.ModelCheckpoint(
dirpath = save_dir, mode = 'min',
filename = f"{dataset}" + "-epoch{epoch:02d}-valid_loss_{valid_loss:.2f}",
monitor = 'valid_loss',
save_top_k = 3,
auto_insert_metric_name = False
)
]
# Create the loggers.
loggers = [
WandbLogger(project = 'detection-experiments', name = args.name,
save_dir = save_dir)
]
# Construct the data.
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(dataset)
loader.shuffle()
loader.split(train = 0.8, val = 0.1, test = 0.1)
dm = EfficientDetDataModule(
train_dataset_adaptor = AgMLDatasetAdaptor(loader.train_data),
validation_dataset_adaptor = AgMLDatasetAdaptor(loader.val_data),
num_workers = 12, batch_size = 4)
# Construct the model.
model = EfficientDetModel(
num_classes = loader.num_classes,
architecture = 'tf_efficientdet_d4',
pretrained = pretrained_path,
validation_dataset_adaptor = loader.val_data)
# Create the trainer and train the model.
msg = f"Training dataset {dataset}!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = epochs, gpus = gpus(None),
logger = loggers)
trainer.fit(model, dm)
# Save the final state.
torch.save(model.state_dict(), os.path.join(save_dir, 'final_model.pth'))
def train_per_class(dataset, epochs, save_dir = None, overwrite = None):
"""Constructs the training loop and trains a model."""
save_dir = checkpoint_dir(save_dir, dataset)
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Construct the loader.
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(dataset)
loader.shuffle()
# Create the loop for each class.
for cl in range(agml.data.source(dataset).num_classes):
# Create the data module with the new, reduced class.
cls = cl + 1
new_loader = loader.take_class(cls)
new_loader.split(train = 0.8, val = 0.1, test = 0.1)
dm = EfficientDetDataModule(
train_dataset_adaptor = AgMLDatasetAdaptor(
new_loader.train_data, adapt_class = True),
validation_dataset_adaptor = AgMLDatasetAdaptor(
new_loader.val_data, adapt_class = True),
test_dataset_adaptor = AgMLDatasetAdaptor(
new_loader.test_data, adapt_class = True),
num_workers = 12, batch_size = 4)
name = f'{loader.num_to_class[cls]}-{cls}' + f'-{args.name}'
this_save_dir = os.path.join(save_dir, name)
os.makedirs(this_save_dir, exist_ok = True)
# Create the loggers.
loggers = [
WandbLogger(project = 'detection-experiments',
save_dir = this_save_dir,
name = name)
]
# Construct the model.
model = EfficientDetModel(
num_classes = 1,
architecture = 'tf_efficientdet_d4',
pretrained = True,
validation_dataset_adaptor = new_loader.val_data,
test_dataset_adaptor = new_loader.test_data)
# model.load_state_dict(
# torch.load('/data2/amnjoshi/detection-models/amg.pth',
# map_location = 'cpu'))
# Create the trainer and train the model.
msg = f"Training dataset {dataset} for class {cls}: {loader.num_to_class[cls]}!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = epochs, gpus = gpus(None), logger = loggers,
callbacks = LearningRateMonitor('step'))
trainer.fit(model, dm)
# Save the final state.
torch.save(model.state_dict(), os.path.join(this_save_dir, 'final_model.pth'))
# Test the loader.
trainer.test(datamodule = dm)
if __name__ == '__main__':
# Parse input arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--name', type = str, help = "The name of the run.", default = None)
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--regenerate-existing', action = 'store_true',
default = False, help = "Whether to re-generate existing benchmarks.")
ap.add_argument(
'--checkpoint_dir', type = str, default = None,
help = "The checkpoint directory to save to.")
ap.add_argument(
'--epochs', type = int, default = 50,
help = "How many epochs to train for. Default is 50.")
ap.add_argument(
'--per-class-for-dataset', action = 'store_true',
default = False, help = "Whether to generate benchmarks per class.")
ap.add_argument(
'--pretrained-model-path', type = str, default = None,
help = "The path to a set of pretrained weights for the model.")
ap.add_argument(
'--pretrained-num-classes', type = str, default = None,
help = "The number of classes in the pretrained model.")
args = ap.parse_args()
if args.name is None:
args.name = args.dataset
# Train the model.
if args.per_class_for_dataset:
train_per_class(args.dataset[0],
epochs = args.epochs,
save_dir = args.checkpoint_dir)
elif args.dataset[0] in agml.data.public_data_sources(ml_task = 'object_detection') \
and len(args.dataset) > 1:
train(args.dataset,
epochs = args.epochs,
save_dir = args.checkpoint_dir,
pretrained_path = (args.pretrained_model_path,
args.pretrained_num_classes))
else:
if args.dataset[0] == 'all':
datasets = [ds for ds in agml.data.public_data_sources(
ml_task = 'object_detection')]
else:
datasets = args.dataset
for ds in datasets:
train(ds,
epochs = args.epochs,
save_dir = args.checkpoint_dir,
overwrite = args.regenerate_existing,
pretrained_path = (args.pretrained_model_path,
args.pretrained_num_classes))
| 8,378 | 36.914027 | 89 | py |
AgML | AgML-main/experiments/benchmarking/finetune_evaluation.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
import torch
import pytorch_lightning as pl
from detection_lightning import (
EfficientDetModel, EfficientDetDataModule, AgMLDatasetAdaptor
)
from mean_average_precision_torch import MeanAveragePrecision
from pytorch_lightning.loggers import TensorBoardLogger
import agml
from tools import gpus, checkpoint_dir
from tqdm import tqdm
FINETUNE_EPOCHS = 5
EVAL_CLASSES = ['orange', 'apple', 'mango', 'capsicum']
EVAL_QUANTITIES = [6, 12, 14, 15, 16, 18, 20, 21, 23, 24, 30, 36, 42]
print("Eval splits:", EVAL_QUANTITIES)
PRETRAINED_PATH = '/data2/amnjoshi/amg/checkpoints/model_state.pth'
BASE = '/data2/amnjoshi/finetune'
def generate_splits():
# Generate the base loader.
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader('fruit_detection_worldwide')
# Create the new loaders for each of the classes.
cls_quant_loaders = {}
for cls in EVAL_CLASSES:
# We set aside a random pool of 36 from which the finetuning images
# will be selected, and then another pool of 15 which will be used
# purely for testing the mean average precision.
cls_loader = loader.take_class(cls).take_random(42 + 15)
cls_loader.split(train = 42, test = 15)
pool_loader = cls_loader.train_data
test_loader = cls_loader.test_data
quant_loaders = {'test': test_loader}
# Starting from 35, we pool a loader with 35 images. Then we take
# a pool of 30 images from the 35, then 25 from the 30, and so on
# so forth, so we can see the effect of "adding" more images for
# finetuning as opposed to just using new sets of random images.
for quant in reversed(EVAL_QUANTITIES):
quant_loaders[quant] = pool_loader = pool_loader.take_random(quant)
cls_quant_loaders[cls] = quant_loaders
return cls_quant_loaders
def train(cls, loader, save_dir, epochs, overwrite = False):
"""Constructs the training loop and trains a model."""
dataset = loader.name
save_dir = checkpoint_dir(save_dir, dataset)
log_dir = os.path.join(save_dir, 'logs')
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Create the loggers.
loggers = [
TensorBoardLogger(log_dir)
]
# Construct the data.
dm = EfficientDetDataModule(
train_dataset_adaptor = AgMLDatasetAdaptor(loader.train_data),
validation_dataset_adaptor = AgMLDatasetAdaptor(loader.val_data),
num_workers = 12, batch_size = 1)
# Construct the model.
model = EfficientDetModel(
num_classes = loader.num_classes,
architecture = 'tf_efficientdet_d4',
validation_dataset_adaptor = loader.val_data)
model.load_state_dict(torch.load(PRETRAINED_PATH, map_location = 'cpu'))
# Create the trainer and train the model.
msg = f"Finetuning class {cls} of size {len(loader.train_data)} for {epochs} epochs!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = FINETUNE_EPOCHS, gpus = gpus(None), logger = loggers)
trainer.fit(model, dm)
# Return the model state.
return model
def run_evaluation(model, loader) -> dict:
"""Runs evaluation for mAP @ [0.5,0.95]."""
iou_thresholds = np.linspace(
0.5, 0.95, int(np.round((0.95 - .5) / .05) + 1))
# Create the adaptor and load the test dataset.
ds = AgMLDatasetAdaptor(loader)
# Create the metric.
ma = MeanAveragePrecision(num_classes = loader.num_classes)
# Run inference for all of the images in the test dataset.
for i in tqdm(range(len(ds)), leave = False, desc = "Running mAP Evaluation"):
image, bboxes, labels, _ = ds.get_image_and_labels_by_idx(i)
pred_boxes, pred_labels, pred_conf = model.predict([image])
pred_boxes = np.squeeze(pred_boxes)
ma.update([pred_boxes, pred_labels, pred_conf], [bboxes, labels])
# Compute the mAP for all of the thresholds.
map_values = {f'map@{round(float(thresh), 2)}':
ma.compute(thresh).detach().cpu().numpy().item()
for thresh in iou_thresholds}
map_values['map@[0.5,0.95]'] = np.mean(list(map_values.values()))
return map_values
def train_all():
# Get all of the data splits.
splits = generate_splits()
# Create a dictionary with all of the results.
results = {}
nice_print_results = {}
# Iterate over each finetuning class and image quantity.
for cls in EVAL_CLASSES:
if cls not in results.keys():
results[cls] = {}
nice_print_results[cls] = {}
cls_path = os.path.join(BASE, cls)
test_loader = splits[cls]['test']
for quant in EVAL_QUANTITIES:
quant_path = os.path.join(cls_path, f'n-{quant}')
os.makedirs(quant_path, exist_ok = True)
# Get the loader and split it accordingly.
loader = splits[cls][quant]
train_q, val_q = int(5 * (quant / 6)), quant // 6
if train_q + val_q < len(loader):
val_q = len(loader) - train_q
loader.split(train = train_q, val = val_q)
# Train the model.
try:
model = train(cls, loader = loader, save_dir = quant_path)
except KeyboardInterrupt:
raise ValueError
# Evaluate the model.
model.eval()
eval_dict = run_evaluation(model, test_loader)
results[cls][train_q] = eval_dict
nice_print_results[cls][train_q] = eval_dict['map@[0.5,0.95]']
print("\n", eval_dict, "\n")
# Save all of the results.
from pprint import pprint
print("\n\nRESULTS:\n")
pprint(nice_print_results)
with open(os.path.join(BASE, 'results.pickle'), 'wb') as f:
pickle.dump(results, f)
if __name__ == '__main__':
train_all() | 6,788 | 35.304813 | 89 | py |
AgML | AgML-main/experiments/benchmarking/experiment.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
import agml
from experiments.benchmarking.detection_data import (
build_loader, TransformApplier, EfficientDetDataModule
)
from experiments.benchmarking.detection_modeling import (
DetectionTrainingModel
)
from agml.models.training_resources.tools import gpus
class DetectionExperiment(object):
"""Runs an object detection experiment with the input arguments."""
def __init__(self, parameters):
self._initialize_experiment(parameters)
def _initialize_experiment(self, params: dict):
# Get the datasets for which the model is being trained.
datasets = params['dataset']
for dataset in datasets:
if dataset not in agml.data.public_data_sources(
ml_task = 'object_detection'):
raise ValueError(
f"The provided dataset '{dataset}' is not a valid "
f"object detection dataset in AgML. Try another one.")
# Construct the object detection loader.
self._loader = build_loader(dataset = datasets,
batch_size = params.get('batch_size', 4))
# If the user wants to generalize detections, generalize.
if params.get('generalize_detections', False):
self._loader.generalize_class_detections()
# Parse the loader for a loader experiment.
self._parse_loader(params)
# Construct the checkpoint and log directory.
experiment_name = params.get('name', None)
experiment_dir_default = params.get('experiment_dir', None)
if experiment_dir_default is None:
if experiment_name is None:
raise ValueError("Expected either the experiment name or save directory.")
if os.path.exists('/data2'):
experiment_dir = os.path.join(
'/data2/amnjoshi/experiments', experiment_name)
else:
experiment_dir = os.path.join('../training_resources', experiment_name)
else:
experiment_dir = experiment_dir_default
os.makedirs(experiment_dir, exist_ok = True)
self._experiment_dir = experiment_dir
if experiment_name is None:
experiment_name = os.path.basename(experiment_dir)
# Initialize the model.
num_classes = params.get('num_classes', None)
if num_classes is None:
num_classes = self._loader.num_classes
self._model = DetectionTrainingModel(
num_classes = num_classes,
pretrained_weights = params.get('pretrained_weights', False),
confidence_threshold = params.get('confidence_threshold', 0.3),
learning_rate = params.get('learning_rate', 0.0002),
wbf_iou_threshold = params.get('wbf_iou_threshold', 0.44))
# Build the loggers.
loggers = self._parse_logger(experiment_name = experiment_name,
experiment_dir = experiment_dir)
# Construct the `Trainer` with the model.
self._trainer = Trainer(logger = loggers, gpus = gpus(None),
max_epochs = params.get('epochs', 25))
def _parse_loader(self, params):
"""Can be overridden by a subclass for data experiments."""
# Parse augmentations for an augmentations experiment.
augmentations = self._parse_augmentations(
params.get('augmentations', None))
# Construct the data module.
self._data_module = EfficientDetDataModule(
loader = self._loader,
augmentation = augmentations,
num_workers = params.get('num_workers', 8))
return self._loader
def _parse_augmentations(self, augmentations, **kwargs): # noqa
"""Can be overridden by a subclass to run an augmentation experiment."""
return TransformApplier(augmentations)
@property
def train_loader(self):
"""Returns the train `AgMLDataLoader` with the input data."""
return self._data_module.train_loader
@property
def val_loader(self):
"""Returns the val `AgMLDataLoader` with the input data."""
return self._data_module.val_loader
def _parse_logger(self, *args, **kwargs):
"""Can be overridden by a subclass to return a configured logger."""
return [
WandbLogger(name = kwargs['experiment_name'],
save_dir = kwargs['experiment_dir'])
]
def train(self):
"""Train the model."""
self._trainer.fit(self._model,
self._data_module)
torch.save(self._model.state_dict(),
os.path.join(self._experiment_dir, 'final_model.pth'))
| 5,443 | 37.609929 | 90 | py |
AgML | AgML-main/experiments/benchmarking/detection_data.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union, Any
import numpy as np
from PIL import Image
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torch
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
import agml
def _pre_prepare_for_efficientdet(image, annotation):
"""Prepares the image and annotation for EfficientDet.
This preparation stage occurs *pre-transformation*.
"""
# Convert the image type.
image = image.astype(np.uint8)
# Clip the bounding boxes to the image shape to prevent errors.
bboxes = np.array(annotation['bbox']).astype(np.int32)
x_min = bboxes[:, 0]
y_min = bboxes[:, 1]
x_max = bboxes[:, 2] + x_min
y_max = bboxes[:, 3] + y_min
x_min, y_min = np.clip(x_min, 0, image.shape[1]), \
np.clip(y_min, 0, image.shape[0])
x_max, y_max = np.clip(x_max, 0, image.shape[1]), \
np.clip(y_max, 0, image.shape[0])
# Reconstruct the boxes and get the class labels.
bboxes = np.dstack((x_min, y_min, x_max, y_max)).squeeze(axis = 0)
class_labels = np.array(annotation['category_id']).squeeze()
# Add an extra dimension to labels for consistency.
if class_labels.ndim == 0:
class_labels = np.expand_dims(class_labels, axis = 0)
# Construct and return the sample.
return image, {'bboxes': bboxes, 'labels': class_labels}
def _post_prepare_for_efficientdet(image, annotation):
"""Prepares the image and annotation for EfficientDet.
This preparation stage occurs *post-transformation*.
"""
bboxes = np.array(annotation['bboxes'])
labels = annotation['labels']
# Convert 1-channel and 4-channel to 3-channel.
if image.shape[0] == 1:
image = torch.tile(image, (3, 1, 1))
if image.shape[0] == 4:
image = image[:3]
# Convert to yxyx from xyxy.
_, new_h, new_w = image.shape
bboxes[:, [0, 1, 2, 3]] = bboxes[:, [1, 0, 3, 2]]
# Create the target from the annotations.
target = {
"bboxes": torch.as_tensor(
bboxes, dtype = torch.float32),
"labels": torch.as_tensor(labels),
"img_size": torch.tensor([new_h, new_w]),
"img_scale": torch.tensor([1.0])}
return image, target
class TransformApplier(object):
"""Applies transforms to the data."""
def __init__(self,
augmentations: Any,
image_size: int = 512):
self._image_size = image_size
if augmentations is None:
augmentations = self._default_augmentation
self._augmentations = augmentations
def _default_augmentation(self, image, bboxes, labels):
# Construct the sample.
sample = {
"image": np.array(image, dtype = np.float32),
"bboxes": bboxes,
"labels": labels}
# Augment the sample.
sample = A.Compose(
[A.Resize(height = self._image_size, width = self._image_size, p = 1),
ToTensorV2(p = 1)], p = 1.0,
bbox_params = A.BboxParams(
format = "pascal_voc", min_area = 0,
min_visibility = 0, label_fields = ["labels"]))(**sample)
# Return the sample.
return sample['image'], {'bboxes': sample['bboxes'],
'labels': sample['labels']}
@staticmethod
def _unpack_result(result):
if isinstance(result, dict):
return result['image'], {'bboxes': result['bboxes'], 'labels': result['labels']}
return result
def _apply_train(self, image, annotation):
image, annotation = _pre_prepare_for_efficientdet(image, annotation)
image, annotation = self._unpack_result(self._augmentations['train'](
image = image, bboxes = annotation['bboxes'], labels = annotation['labels']))
image, annotation = _post_prepare_for_efficientdet(image, annotation)
return image, annotation
def _apply_val(self, image, annotation):
image, annotation = _pre_prepare_for_efficientdet(image, annotation)
image, annotation = self._unpack_result(self._augmentations['val'](
image = image, bboxes = annotation['bboxes'], labels = annotation['labels']))
image, annotation = _post_prepare_for_efficientdet(image, annotation)
return image, annotation
def _apply(self, image, annotation):
image, annotation = _pre_prepare_for_efficientdet(image, annotation)
image, annotation = self._unpack_result(self._augmentations(
image = image, bboxes = annotation['bboxes'], labels = annotation['labels'])) # noqa
image, annotation = _post_prepare_for_efficientdet(image, annotation)
return image, annotation
def build_loader(dataset: Union[List[str], str],
batch_size: int = 4) -> agml.data.AgMLDataLoader:
"""Constructs an `AgMLDataLoader` for object detection.
Given either a single dataset or a list of datasets, this method will
construct an `AgMLDataLoader` which is prepared for object detection
tasks. This includes image and annotation formatting.
"""
# Construct the loader from the input dataset.
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(dataset)
loader.shuffle()
# Apply the batch size.
loader.batch(batch_size = batch_size)
# Split and return the loader.
loader.split(train = 0.8, val = 0.1, test = 0.1)
return loader
class EfficientDetDataModule(pl.LightningDataModule):
"""Wraps an `AgMLDataLoader` into a `LightningDataModule`."""
def __new__(cls, *args, **kwargs):
if kwargs.get('no_agml', False):
return EfficientDetDataModuleNoAgML(**kwargs)
return super(EfficientDetDataModule, cls).__new__(cls, *args, **kwargs)
def __init__(self,
loader: agml.data.AgMLDataLoader,
augmentation: Any = None,
num_workers: int = 8,
**kwargs):
# Get the training and validation `AgMLDataLoader`s.
self._train_loader = loader.train_data
self._train_loader.as_torch_dataset()
self._val_loader = loader.val_data
self._val_loader.as_torch_dataset()
# Update the transforms.
if isinstance(augmentation._augmentations, dict):
self._train_transform = augmentation._apply_train
self._val_transform = augmentation._apply_val
else:
self._train_transform = self._val_transform = \
augmentation._apply
self._train_loader.transform(
dual_transform = self._train_transform)
self._val_loader.transform(
dual_transform = self._val_transform)
# Initialize the base module.
self._num_workers = num_workers
super(EfficientDetDataModule, self).__init__()
@property
def train_loader(self):
return self._train_loader
def train_dataset(self) -> Dataset:
return self._train_loader
def train_dataloader(self) -> DataLoader:
return self.train_dataset().export_torch(
shuffle = True,
pin_memory = True,
drop_last = True,
num_workers = self._num_workers,
collate_fn = self.collate_fn,
)
@property
def val_loader(self):
return self._val_loader
def val_dataset(self) -> Dataset:
return self._val_loader
def val_dataloader(self) -> DataLoader:
return self.val_dataset().export_torch(
pin_memory = True,
drop_last = True,
num_workers = self._num_workers,
collate_fn = self.collate_fn,
)
@staticmethod
def collate_fn(batch):
"""Collates items together into a batch."""
images, targets = tuple(zip(*batch))
images = torch.stack(images)
images = images.float()
boxes = [target["bboxes"].float() for target in targets]
labels = [target["labels"].float() for target in targets]
img_size = torch.stack([target["img_size"] for target in targets]).float()
img_scale = torch.stack([target["img_scale"] for target in targets]).float()
annotations = {
"bbox": boxes, "cls": labels,
"img_size": img_size, "img_scale": img_scale}
return images, annotations, targets
class EfficientDetDataModuleNoAgML(pl.LightningDataModule):
"""Wraps a non-AgMLDataLoader dataset."""
def __init__(self,
**kwargs):
# A custom loader is passed.
self._train_ds = kwargs['train_loader']
self._val_ds = kwargs['val_loader']
self._num_workers = kwargs['num_workers']
super(EfficientDetDataModuleNoAgML, self).__init__()
def train_dataset(self) -> Dataset:
return self._train_ds
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset(),
batch_size = 2,
shuffle = True,
pin_memory = True,
drop_last = True,
num_workers = self._num_workers,
collate_fn = self.collate_fn,
)
def val_dataset(self) -> Dataset:
return self._val_ds
def val_dataloader(self) -> DataLoader:
return DataLoader(
self.val_dataset(),
batch_size = 2,
pin_memory = True,
drop_last = True,
num_workers = self._num_workers,
collate_fn = self.collate_fn,
)
@staticmethod
def collate_fn(batch):
"""Collates items together into a batch."""
images, targets = tuple(zip(*batch))
images = [torch.tensor(image) if not isinstance(
image, torch.Tensor) else image for image in images]
images = torch.stack([image for image in images])
images = images.float()
boxes = [target["bboxes"].float() for target in targets]
labels = [target["labels"].float() for target in targets]
img_size = torch.stack([target["img_size"] for target in targets]).float()
img_scale = torch.stack([target["img_scale"] for target in targets]).float()
annotations = {
"bbox": boxes, "cls": labels,
"img_size": img_size, "img_scale": img_scale}
return images, annotations, targets
| 10,965 | 34.374194 | 97 | py |
AgML | AgML-main/experiments/benchmarking/detection_lightning_multiple.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Some of the training code in this file is adapted from the following sources:
1. https://github.com/rwightman/efficientdet-pytorch
2. https://gist.github.com/Chris-hughes10/73628b1d8d6fc7d359b3dcbbbb8869d7
"""
import os
import argparse
import warnings
from typing import List, Union
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
from mean_average_precision_torch import MeanAveragePrecision as MAP
import agml
import albumentations as A
from albumentations.pytorch import ToTensorV2
from effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, create_model_from_config
from effdet.efficientdet import HeadNet
from ensemble_boxes import ensemble_boxes_wbf
from tools import gpus, checkpoint_dir, MetricLogger, auto_move_data
# Constants
IMAGE_SIZE = 512
def create_model(num_classes = 1, architecture = "tf_efficientdet_d4", pretrained = False):
config = get_efficientdet_config(architecture)
config.update({'image_size': (IMAGE_SIZE, IMAGE_SIZE)})
print(config)
net = create_model_from_config(config, pretrained = pretrained, num_classes = num_classes)
net.class_net = HeadNet(
config,
num_outputs = num_classes,
)
return DetBenchTrain(net, config)
class AgMLDatasetAdaptor(object):
"""Adapts an AgML dataset for use in a `LightningDataModule`."""
def __init__(self, loader):
self.loader = loader
def __len__(self) -> int:
return len(self.loader)
def get_image_and_labels_by_idx(self, index):
image, annotation = self.loader[index]
image = Image.fromarray(image)
bboxes = np.array(annotation['bbox']).astype(np.int32)
x_min = bboxes[:, 0]
y_min = bboxes[:, 1]
x_max = bboxes[:, 2] + x_min
y_max = bboxes[:, 3] + y_min
x_min, y_min = np.clip(x_min, 0, image.width), np.clip(y_min, 0, image.height)
x_max, y_max = np.clip(x_max, 0, image.width), np.clip(y_max, 0, image.height)
bboxes = np.dstack((x_min, y_min, x_max, y_max)).squeeze(axis = 0)
class_labels = np.array(annotation['category_id']).squeeze()
return image, bboxes, class_labels, index
def get_transforms(mode = 'inference'):
"""Returns a set of transforms corresponding to the mode."""
if mode == 'train':
return A.Compose(
[A.HorizontalFlip(p = 0.5),
A.Resize(height = IMAGE_SIZE, width = IMAGE_SIZE, p = 1),
ToTensorV2(p = 1)], p = 1.0,
bbox_params = A.BboxParams(
format = "pascal_voc", min_area = 0,
min_visibility = 0, label_fields = ["labels"]))
elif mode in ['val', 'validation']:
return A.Compose(
[A.Resize(height = IMAGE_SIZE, width = IMAGE_SIZE, p = 1),
ToTensorV2(p = 1)], p = 1.0,
bbox_params = A.BboxParams(
format = "pascal_voc", min_area = 0,
min_visibility = 0, label_fields = ["labels"]))
elif mode == 'inference':
return A.Compose(
[A.Resize(height = IMAGE_SIZE, width = IMAGE_SIZE, p = 1),
ToTensorV2(p = 1)], p = 1.0)
class EfficientDetDataset(Dataset):
def __init__(self, adaptor, transforms = None):
self.ds = adaptor
if transforms is None:
transforms = get_transforms('val')
self.transforms = transforms
def __len__(self):
return len(self.ds)
def __getitem__(self, index):
image, pascal_bboxes, class_labels, image_id = \
self.ds.get_image_and_labels_by_idx(index)
# Add a label dimension for consistency.
if class_labels.ndim == 0:
class_labels = np.expand_dims(class_labels, axis = 0)
# Construct the sample.
sample = {
"image": np.array(image, dtype = np.float32),
"bboxes": pascal_bboxes, "labels": class_labels}
try:
sample = self.transforms(**sample)
except: # debugging
raise Exception(f"Failed sample: {sample}")
sample["bboxes"] = np.array(sample["bboxes"])
image = sample["image"]
labels = sample["labels"]
# Convert 1-channel and 4-channel to 3-channel.
if image.shape[0] == 1:
image = torch.tile(image, (3, 1, 1))
if image.shape[0] == 4:
image = image[:3]
# Convert to yxyx from xyxy.
_, new_h, new_w = image.shape
sample["bboxes"][:, [0, 1, 2, 3]] = \
sample["bboxes"][:, [1, 0, 3, 2]]
# Create the target from the annotations.
target = {
"bboxes": torch.as_tensor(sample["bboxes"], dtype = torch.float32),
"labels": torch.as_tensor(labels), "image_id": torch.tensor([image_id]),
"img_size": (new_h, new_w), "img_scale": torch.tensor([1.0])}
return image, target, image_id
class EfficientDetDataModule(pl.LightningDataModule):
"""A `LightningDataModule` for the `LightningModule`."""
def __init__(self,
train_dataset_adaptor,
validation_dataset_adaptor,
train_transforms = None,
val_transforms = None,
num_workers = 4,
batch_size = 8):
self.train_ds = train_dataset_adaptor
self.valid_ds = validation_dataset_adaptor
if train_transforms is None:
train_transforms = get_transforms('train')
self.train_tfms = train_transforms
if val_transforms is None:
val_transforms = get_transforms('val')
self.val_tfms = val_transforms
self.num_workers = num_workers
self.batch_size = batch_size
super().__init__()
def train_dataset(self) -> EfficientDetDataset:
return EfficientDetDataset(
adaptor = self.train_ds,
transforms = self.train_tfms)
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset(),
batch_size = self.batch_size,
shuffle = True,
pin_memory = True,
drop_last = True,
num_workers = self.num_workers,
collate_fn = self.collate_fn,
)
def val_dataset(self) -> EfficientDetDataset:
return EfficientDetDataset(
adaptor = self.valid_ds,
transforms = self.val_tfms)
def val_dataloader(self) -> DataLoader:
return DataLoader(
self.val_dataset(),
batch_size = self.batch_size,
shuffle = False,
pin_memory = True,
drop_last = True,
num_workers = self.num_workers,
collate_fn = self.collate_fn,
)
@staticmethod
def collate_fn(batch):
images, targets, image_ids = tuple(zip(*batch))
images = torch.stack(images)
images = images.float()
boxes = [target["bboxes"].float() for target in targets]
labels = [target["labels"].float() for target in targets]
img_size = torch.tensor([target["img_size"] for target in targets]).float()
img_scale = torch.tensor([target["img_scale"] for target in targets]).float()
annotations = {
"bbox": boxes, "cls": labels,
"img_size": img_size, "img_scale": img_scale}
return images, annotations, targets, image_ids
class EfficientDetModel(pl.LightningModule):
def __init__(self,
num_classes = 1,
confidence_threshold = 0.3,
learning_rate = 0.0002,
wbf_iou_threshold = 0.44,
inference_transforms = None,
architecture = 'efficientdet_d4',
save_dir = None,
pretrained = False,
validation_dataset_adaptor = None):
super().__init__()
self.model = create_model(
num_classes, architecture = architecture,
pretrained = pretrained)
self.confidence_threshold = confidence_threshold
self.lr = learning_rate
self.wbf_iou_threshold = wbf_iou_threshold
if inference_transforms is None:
inference_transforms = get_transforms('inference')
self.inference_tfms = inference_transforms
# Construct the metric.
self.val_dataset_adaptor = None
if validation_dataset_adaptor is not None:
# Add a metric calculator.
self.val_dataset_adaptor = AgMLDatasetAdaptor(
validation_dataset_adaptor)
self.map = MAP()
self._sanity_check_passed = False
@auto_move_data
def forward(self, images, targets):
return self.model(images, targets)
def configure_optimizers(self):
return torch.optim.AdamW(self.model.parameters(), lr = self.lr)
def training_step(self, batch, batch_idx):
# Run a forward pass through the model.
images, annotations, _, _ = batch
losses = self.model(images, annotations)
# Calculate and log losses.
self.log("train_loss", losses["loss"], on_step = True,
on_epoch = True, prog_bar = True, logger = True)
self.log("train_class_loss", losses["class_loss"],
on_step = True, on_epoch = True, logger = True)
self.log("train_box_loss", losses["box_loss"], on_step = True,
on_epoch = True, logger = True)
return losses['loss']
@torch.no_grad()
def validation_step(self, batch, batch_idx):
images, annotations, targets, image_ids = batch
outputs = self.model(images, annotations)
detections = outputs["detections"]
# Update the metric.
if self.val_dataset_adaptor is not None and self._sanity_check_passed:
for idx in image_ids:
image, truth_boxes, truth_cls, _ = \
self.val_dataset_adaptor.get_image_and_labels_by_idx(idx)
pred_box, pred_labels, pred_conf = self.predict([image])
if not isinstance(pred_labels[0], float):
pred_box, pred_labels, pred_conf = pred_box[0], pred_labels[0], pred_conf[0]
if truth_cls.ndim == 0:
truth_cls = np.expand_dims(truth_cls, 0)
metric_update_values = \
dict(boxes = torch.tensor(pred_box, dtype = torch.float32),
labels = torch.tensor(pred_labels, dtype = torch.int32),
scores = torch.tensor(pred_conf)), \
dict(boxes = torch.tensor(truth_boxes, dtype = torch.float32),
labels = torch.tensor(truth_cls, dtype = torch.int32))
self.map.update(*metric_update_values)
batch_predictions = {
"predictions": detections,
"targets": targets,
"image_ids": image_ids,
}
logging_losses = {
"class_loss": outputs["class_loss"].detach(),
"box_loss": outputs["box_loss"].detach(),
}
self.log("valid_loss", outputs["loss"], on_step = True, on_epoch = True,
prog_bar = True, logger = True, sync_dist = True)
self.log("valid_class_loss", logging_losses["class_loss"],
on_step = True, on_epoch = True,
logger = True, sync_dist = True)
self.log("valid_box_loss", logging_losses["box_loss"],
on_step = True, on_epoch = True,
logger = True, sync_dist = True)
return {'loss': outputs["loss"], 'batch_predictions': batch_predictions}
def predict(self, images: Union[torch.Tensor, List]):
"""Runs inference on a set of images.
Parameters
----------
images : {torch.Tensor, list}
Either a list of images (which can be numpy arrays, tensors, or
another type), or a torch.Tensor returned from a DataLoader.
Returns
-------
A tuple containing bounding boxes, confidence scores, and class labels.
"""
if isinstance(images, list):
image_sizes = [(image.size[1], image.size[0]) for image in images]
images_tensor = torch.stack([
self.inference_tfms(
image = np.array(image, dtype = np.float32),
)["image"] for image in images])
return self._run_inference(images_tensor, image_sizes)
elif isinstance(images, torch.Tensor):
image_tensor = images
if image_tensor.ndim == 3:
image_tensor = image_tensor.unsqueeze(0)
if image_tensor.shape[-1] != IMAGE_SIZE \
or image_tensor.shape[-2] != IMAGE_SIZE:
raise ValueError(
f"Input tensors must be of shape "
f"(N, 3, {IMAGE_SIZE}, {IMAGE_SIZE})")
num_images = image_tensor.shape[0]
image_sizes = [(IMAGE_SIZE, IMAGE_SIZE)] * num_images
return self._run_inference(image_tensor, image_sizes)
else:
raise TypeError(
"Expected either a list of images or a "
"torch.Tensor of images for `predict()`.")
def _run_inference(self, images_tensor, image_sizes):
dummy_targets = self._create_dummy_inference_targets(
images_tensor.shape[0], self.device, IMAGE_SIZE)
detections = self.model(
images_tensor.to(self.device), dummy_targets)["detections"]
predicted_bboxes, predicted_class_confidences, predicted_class_labels = \
self.post_process_detections(detections)
scaled_bboxes = self._rescale_bboxes(
predicted_bboxes = predicted_bboxes,
image_sizes = image_sizes)
return scaled_bboxes, predicted_class_labels, predicted_class_confidences
@staticmethod
def _create_dummy_inference_targets(num_images, device, size):
return {
"bbox": [
torch.tensor([[0.0, 0.0, 0.0, 0.0]], device = device)
for _ in range(num_images)
],
"cls": [torch.tensor([1.0], device = device) for _ in range(num_images)],
"img_size": torch.tensor(
[(size, size)] * num_images, device = device).float(),
"img_scale": torch.ones(num_images, device = device).float(),
}
def post_process_detections(self, detections):
predictions = [self._postprocess_single_prediction_detections(d) for d in detections]
predicted_bboxes, predicted_class_confidences, predicted_class_labels = self.run_wbf(
predictions, image_size = IMAGE_SIZE, iou_thr = self.wbf_iou_threshold)
return predicted_bboxes, predicted_class_confidences, predicted_class_labels
def _postprocess_single_prediction_detections(self, detections):
# Extract the bounding boxes, confidence scores,
# and class labels from the output detections.
boxes = detections.detach().cpu().numpy()[:, :4]
scores = detections.detach().cpu().numpy()[:, 4]
classes = detections.detach().cpu().numpy()[:, 5]
# Only return boxes which are above the confidence threshold.
valid_indexes = np.where(scores > self.confidence_threshold)[0]
boxes = boxes[valid_indexes]
scores = scores[valid_indexes]
classes = classes[valid_indexes]
return {"boxes": boxes, "scores": scores, "classes": classes}
@staticmethod
def _rescale_bboxes(predicted_bboxes, image_sizes):
scaled_bboxes = []
for bboxes, img_dims in zip(predicted_bboxes, image_sizes):
im_h, im_w = img_dims
if len(bboxes) > 0:
scaled_bboxes.append(
(np.array(bboxes) * [
im_w / IMAGE_SIZE, im_h / IMAGE_SIZE,
im_w / IMAGE_SIZE, im_h / IMAGE_SIZE
]).tolist())
else:
scaled_bboxes.append(bboxes)
return scaled_bboxes
@staticmethod
def run_wbf(predictions, image_size = 512, iou_thr = 0.44, skip_box_thr = 0.43, weights = None):
bboxes, confidences, class_labels = [], [], []
for prediction in predictions:
boxes = [(prediction["boxes"] / image_size).tolist()]
scores = [prediction["scores"].tolist()]
labels = [prediction["classes"].tolist()]
boxes, scores, labels = ensemble_boxes_wbf.weighted_boxes_fusion(
boxes, scores, labels, weights = weights,
iou_thr = iou_thr, skip_box_thr = skip_box_thr)
boxes = boxes * (image_size - 1)
bboxes.append(boxes.tolist())
confidences.append(scores.tolist())
class_labels.append(labels.tolist())
return bboxes, confidences, class_labels
def on_validation_epoch_end(self) -> None:
if not self._sanity_check_passed:
self._sanity_check_passed = True
return
if hasattr(self, 'metric_logger'):
self.metric_logger.compile_epoch()
if hasattr(self, 'map'):
map = self.map.compute().detach().cpu().numpy().item()
self.log("map", map, prog_bar = True,
on_epoch = True,
logger = True, sync_dist = True)
self.map.reset()
def on_fit_end(self) -> None:
if hasattr(self, 'metric_logger'):
self.metric_logger.save()
self.map.reset()
def get_progress_bar_dict(self):
p_bar = super(EfficientDetModel, self).get_progress_bar_dict()
p_bar.pop('v_num', None)
return p_bar
# Calculate and log the metrics.
class DetectionMetricLogger(MetricLogger):
def update_metrics(self, y_pred, y_true) -> None:
self.metrics['map'].update(y_pred, y_true)
def train(dataset, epochs, save_dir = None, overwrite = None, generalize_detections = False):
"""Constructs the training loop and trains a model."""
save_dir = checkpoint_dir(None, save_dir)
log_dir = save_dir.replace('checkpoints', 'logs')
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Set up the checkpoint saving callback.
callbacks = [
pl.callbacks.ModelCheckpoint(
dirpath = save_dir, mode = 'min',
filename = f"{dataset}" + "-epoch{epoch:02d}-valid_loss_{valid_loss:.2f}",
monitor = 'valid_loss',
save_top_k = 3,
auto_insert_metric_name = False
)
]
# Create the loggers.
loggers = [
CSVLogger(log_dir),
TensorBoardLogger(log_dir)
]
# Construct the data.
print(f"Saving to {log_dir}.")
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(dataset)
loader.shuffle()
num_classes = loader.num_classes
if generalize_detections:
print("Generalizing class detections.")
loader.generalize_class_detections()
num_classes = 1
loader.split(train = 0.8, val = 0.1, test = 0.1)
dm = EfficientDetDataModule(
train_dataset_adaptor = AgMLDatasetAdaptor(loader.train_data),
validation_dataset_adaptor = AgMLDatasetAdaptor(loader.val_data),
num_workers = 12, batch_size = 4)
# Construct the model.
model = EfficientDetModel(
num_classes = num_classes,
architecture = 'tf_efficientdet_d4',
save_dir = save_dir,
pretrained = True,
validation_dataset_adaptor = loader.val_data)
# Create the trainer and train the model.
msg = f"Training dataset {dataset}!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = epochs, gpus = gpus(None),
callbacks = callbacks, logger = loggers)
trainer.fit(model, dm)
# Save the model state.
torch.save(model.state_dict(), os.path.join(save_dir, 'model_state.pth'))
if __name__ == '__main__':
# Parse input arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--regenerate-existing', action = 'store_true',
default = False, help = "Whether to re-generate existing benchmarks.")
ap.add_argument(
'--checkpoint_dir', type = str, default = None,
help = "The checkpoint directory to save to.")
ap.add_argument(
'--epochs', type = int, default = 20,
help = "How many epochs to train for. Default is 20.")
ap.add_argument(
'--generalize-detections', action = 'store_true',
default = False, help = "Whether to generalize class labels.")
args = ap.parse_args()
# Train the model.
if args.dataset[0] == 'except':
exclude_datasets = args.dataset[1:]
datasets = [
dataset.name for dataset in agml.data.public_data_sources(
ml_task = 'object_detection')
if dataset.name not in exclude_datasets]
else:
datasets = args.dataset
train(
dataset = datasets,
epochs = args.epochs,
save_dir = args.checkpoint_dir,
overwrite = args.regenerate_existing,
generalize_detections = args.generalize_detections)
| 22,292 | 36.848896 | 100 | py |
AgML | AgML-main/experiments/benchmarking/detection_lightning_ssd.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from torchvision.models.detection import ssd300_vgg16
import pytorch_lightning as pl
import agml
import albumentations as A
class EfficientDetTransfer(nn.Module):
"""Represents a transfer learning DeepLabV3 model.
This is the base benchmarking model for semantic segmentation,
using the DeepLabV3 model with a ResNet50 backbone.
"""
def __init__(self):
super(EfficientDetTransfer, self).__init__()
self.base = ssd300_vgg16(pretrained=False)
def forward(self, x, y, **kwargs): # noqa
return self.base(x, y)
class DetectionBenchmark(pl.LightningModule):
"""Represents an image classification benchmark model."""
def __init__(self):
# Initialize the module.
super(DetectionBenchmark, self).__init__()
# Construct the network.
self.net = ssd300_vgg16(pretrained=False)
def forward(self, x, target):
return self.net(x, target)
def training_step(self, batch, *args, **kwargs): # noqa
x, y = process_data(*batch)
loss_dict = self(x, y)
self.log('class_loss', loss_dict['classification'], prog_bar = True)
self.log('reg_loss', loss_dict['bbox_regression'], prog_bar = True)
return {
'loss': sum(i for i in loss_dict.values()),
'log': loss_dict
}
def validation_step(self, batch, *args, **kwargs): # noqa
x, y = process_data(*batch)
self.net.train()
loss_dict = self(x, y)
self.log('class_loss', loss_dict['classification'], prog_bar = True)
self.log('reg_loss', loss_dict['bbox_regression'], prog_bar = True)
return {
'loss': sum(i for i in loss_dict.values()),
'log': loss_dict
}
def configure_optimizers(self):
return torch.optim.Adam(self.net.parameters())
def get_progress_bar_dict(self):
tqdm_dict = super(DetectionBenchmark, self)\
.get_progress_bar_dict()
tqdm_dict.pop('v_num', None)
return tqdm_dict
def accuracy(output, target):
"""Computes the accuracy between `output` and `target`."""
with torch.no_grad():
batch_size = target.size(0)
_, pred = torch.topk(output, 1, 1)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:1].reshape(-1).float().sum(0, keepdim = True)
return correct_k.mul_(100.0 / batch_size)
# Transform to swap bounding box order from `xyxy` to `yxyx` # noqa
def bbox_swap(coco):
x1, y1, w, h = coco['bbox'].T
coco['bbox'] = np.squeeze(np.dstack((x1, y1, x1 + w, y1 + h)))
return coco
# Transform to process the image and COCO JSON dictionaries
# and make them compatible with the EfficientDet model. This
# is used directly in the training loop.
def _make_boxes(b):
if b.ndim == 1:
return torch.unsqueeze(b, dim = 0)
return b
def process_data(image, coco):
return torch.unbind(image, dim = 0), [
{'boxes': _make_boxes(d['bbox'].float()),
'labels': d['category_id'].long()} for d in coco]
# Build the data loaders.
def build_loaders(name):
loader = agml.data.AgMLDataLoader(name)
loader.split(train = 0.8, val = 0.1, test = 0.1)
loader.batch(4)
loader.resize_images((256, 256))
loader.normalize_images(method = 'scale')
train_data = loader.train_data
train_data.transform(transform = A.Compose([
A.RandomRotate90()
], bbox_params = A.BboxParams('coco')))
train_data.transform(target_transform = bbox_swap)
train_ds = train_data.copy().as_torch_dataset()
val_ds = loader.val_data.as_torch_dataset()
val_ds.transform(target_transform = bbox_swap)
val_ds.shuffle_data = False
test_ds = loader.test_data.as_torch_dataset()
return train_ds, val_ds, test_ds
def train(dataset, pretrained, epochs, save_dir = None):
"""Constructs the training loop and trains a model."""
if save_dir is None:
if os.path.isdir('/data2'):
save_dir = os.path.join(f"/data2/amnjoshi/checkpoints/{dataset}")
else:
save_dir = os.path.join(os.path.dirname(__file__), 'logs')
os.makedirs(save_dir, exist_ok = True)
# Set up the checkpoint saving callback.
callbacks = [
pl.callbacks.ModelCheckpoint(
dirpath = save_dir, mode = 'min',
filename = f"{dataset}" + "-epoch{epoch:02d}-val_loss_{val_loss:.2f}",
monitor = 'val_loss',
save_top_k = 3,
auto_insert_metric_name = False
),
pl.callbacks.EarlyStopping(
monitor = 'val_loss',
min_delta = 0.001,
patience = 3,
)
]
# Construct the model.
model = DetectionBenchmark()
# Construct the data loaders.
train_ds, val_ds, test_ds = build_loaders(dataset)
# Create the trainer and train the model.
trainer = pl.Trainer(
max_epochs = epochs, gpus = 0, callbacks = callbacks)
trainer.fit(
model = model,
train_dataloaders = train_ds,
val_dataloaders = val_ds
)
if __name__ == '__main__':
from agml.backend import set_seed
set_seed(0)
# Parse input arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, help = "The name of the dataset.")
ap.add_argument(
'--pretrained', action = 'store_true',
default = False, help = "Whether to load a pretrained model.")
ap.add_argument(
'--checkpoint_dir', type = str, default = None,
help = "The checkpoint directory to save to.")
ap.add_argument(
'--epochs', type = int, default = 50,
help = "How many epochs to train for. Default is 50.")
args = ap.parse_args()
args.dataset = "apple_detection_usa"
# Train the model.
train(args.dataset,
args.pretrained,
epochs = args.epochs,
save_dir = args.checkpoint_dir)
| 6,654 | 29.953488 | 82 | py |
AgML | AgML-main/experiments/benchmarking/mean_average_precision_torch.py | import torch
import numpy as np
from tqdm import tqdm
def _scalar_to_array(*args):
"""Converts 0-dimensional scalar arrays to 1-d arrays."""
cvt = lambda x: np.expand_dims(x, 0) if x.ndim == 0 else x
outs = [cvt(arg) for arg in args]
return outs[0] if len(args) == 1 else outs
def _add_truth_scores_to_annotations(annotations):
"""Adds a `score` element of 1.0 to ground truth annotations.
As for the mean average precision method, boxes are expected to be
in the format [image_idx, class, *score*, x1, y1, x2, y2], but
ground truth elements do not have a score, this method takes
elements of the form [image_idx, class, x1, y1, x2, y2], and adds
a dummy `score` element of 1.0 to satisfy the MAP method.
"""
a = annotations # short-hand
for idx in range(len(annotations)):
prior, after = a[idx][:2], a[idx][2:]
a[idx] = [*prior, 1.0, *after]
import torch
from collections import Counter
def intersection_over_union(boxes_preds, boxes_labels, box_format = "midpoint"):
"""
Calculates intersection over union
Parameters:
boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
boxes_labels (tensor): Correct Labels of Boxes (BATCH_SIZE, 4)
box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
Returns:
tensor: Intersection over union for all examples
"""
# Slicing idx:idx+1 in order to keep tensor dimensionality
# Doing ... in indexing if there would be additional dimensions
# Like for Yolo algorithm which would have (N, S, S, 4) in shape
if box_format == "midpoint":
box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
elif box_format == "corners":
box1_x1 = boxes_preds[..., 0:1]
box1_y1 = boxes_preds[..., 1:2]
box1_x2 = boxes_preds[..., 2:3]
box1_y2 = boxes_preds[..., 3:4]
box2_x1 = boxes_labels[..., 0:1]
box2_y1 = boxes_labels[..., 1:2]
box2_x2 = boxes_labels[..., 2:3]
box2_y2 = boxes_labels[..., 3:4]
x1 = torch.max(box1_x1, box2_x1)
y1 = torch.max(box1_y1, box2_y1)
x2 = torch.min(box1_x2, box2_x2)
y2 = torch.min(box1_y2, box2_y2)
# Need clamp(0) in case they do not intersect, then we want intersection to be 0
intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
# print(intersection, box1_area, box2_area, boxes_preds, boxes_labels)
return intersection / (box1_area + box2_area - intersection + 1e-6)
def mean_average_precision(
pred_boxes, true_boxes, iou_threshold = 0.5,
box_format = "midpoint", num_classes = 20, use_bar = False
):
"""
Calculates mean average precision
Parameters:
pred_boxes (list): list of lists containing all bboxes with each bboxes
specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2]
true_boxes (list): Similar as pred_boxes except all the correct ones
iou_threshold (float): threshold where predicted bboxes is correct
box_format (str): "midpoint" or "corners" used to specify bboxes
num_classes (int): number of classes
Returns:
float: mAP value across all classes given a specific IoU threshold
"""
# list storing all AP for respective classes
average_precisions = []
if len(true_boxes[0]) == 6:
my_new_boxes = true_boxes.copy()
_add_truth_scores_to_annotations(my_new_boxes)
true_boxes = my_new_boxes
# used for numerical stability later on
epsilon = 1e-6
classes = range(num_classes)
if use_bar:
classes = tqdm(classes, leave = False)
for c in classes:
detections = []
ground_truths = []
# Go through all predictions and targets,
# and only add the ones that belong to the
# current class c
for detection in pred_boxes:
if detection[1] == c:
detections.append(detection)
for true_box in true_boxes:
if true_box[1] == c:
ground_truths.append(true_box)
# find the amount of bboxes for each training example
# Counter here finds how many ground truth bboxes we get
# for each training example, so let's say img 0 has 3,
# img 1 has 5 then we will obtain a dictionary with:
# amount_bboxes = {0:3, 1:5}
amount_bboxes = Counter([gt[0] for gt in ground_truths])
# We then go through each key, val in this dictionary
# and convert to the following (w.r.t same example):
# ammount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]}
for key, val in amount_bboxes.items():
amount_bboxes[key] = torch.zeros(val)
# sort by box probabilities which is index 2
detections.sort(key = lambda x: x[2], reverse = True)
TP = torch.zeros((len(detections)))
FP = torch.zeros((len(detections)))
total_true_bboxes = len(ground_truths)
# If none exists for this class then we can safely skip
if total_true_bboxes == 0:
continue
for detection_idx, detection in enumerate(detections):
# Only take out the ground_truths that have the same
# training idx as detection
ground_truth_img = [
bbox for bbox in ground_truths if bbox[0] == detection[0]
]
num_gts = len(ground_truth_img)
best_iou = 0
for idx, gt in enumerate(ground_truth_img):
iou = intersection_over_union(
torch.tensor(detection[3:]),
torch.tensor(gt[3:]),
box_format = box_format,
)
if iou > best_iou:
best_iou = iou
best_gt_idx = idx
if best_iou > iou_threshold:
# only detect ground truth detection once
if amount_bboxes[detection[0]][best_gt_idx] == 0:
# true positive and add this bounding box to seen
TP[detection_idx] = 1
amount_bboxes[detection[0]][best_gt_idx] = 1
else:
FP[detection_idx] = 1
# if IOU is lower then the detection is a false positive
else:
FP[detection_idx] = 1
TP_cumsum = torch.cumsum(TP, dim = 0)
FP_cumsum = torch.cumsum(FP, dim = 0)
recalls = TP_cumsum / (total_true_bboxes + epsilon)
precisions = TP_cumsum / (TP_cumsum + FP_cumsum + epsilon)
precisions = torch.cat((torch.tensor([1]), precisions))
recalls = torch.cat((torch.tensor([0]), recalls))
# torch.trapz for numerical integration
average_precisions.append(torch.trapz(precisions, recalls))
if len(average_precisions) == 0:
return torch.tensor(0.0)
return sum(average_precisions) / len(average_precisions)
class MeanAveragePrecision(object):
"""Stores and calculates the mean average precision over data."""
def __init__(self, num_classes = 1, use_bar = False):
self._num_classes = num_classes
# Store all of the ground truth and prediction data.
self._prediction_data = []
self._ground_truth_data = []
# Track the number of times that a data sample is added,
# and any past MAP values which are computed.
self._num_updates = 0
self._prior_maps = {}
# Whether to use a progress bar.
self._use_bar = use_bar
def batch_update(self, pred_data, gt_data):
"""Same as `update()`, but for batches of data."""
for pred_d, gt_d in zip(pred_data, gt_data):
self.update(pred_d, gt_d)
def update(self, pred_data, gt_data):
"""Update the tracker with prediction and ground truth data.
The arguments `pred_data` and `gt_data` should be either
dictionaries (with the following keys), or lists of values
which correspond in order to the same keys listed below:
- `pred_data`: `boxes`, `labels`, and `scores`.
- `gt_data`: `boxes` and `labels`.
Note: To update a batch of data, use `batch_update()`.
"""
# Get the relevant data from the input arguments.
if isinstance(pred_data, dict):
pred_boxes, pred_labels, pred_scores = \
pred_data['boxes'], pred_data['labels'], pred_data['scores']
else:
pred_boxes, pred_labels, pred_scores = pred_data
if isinstance(gt_data, dict):
gt_boxes, gt_labels = \
gt_data['boxes'], gt_data['labels']
else:
gt_boxes, gt_labels = gt_data
# Format the data.
pred_boxes = np.squeeze(pred_boxes)
pred_labels = np.squeeze(pred_labels)
pred_scores = np.squeeze(pred_scores)
pred_labels, gt_labels, pred_scores = \
_scalar_to_array(pred_labels, gt_labels, pred_scores)
if pred_boxes.ndim == 1:
pred_boxes = np.expand_dims(pred_boxes, axis = 0)
if gt_boxes.ndim == 1:
gt_boxes = np.expand_dims(gt_boxes, axis = 0)
# Create the data in the proper format.
for bbox, label in zip(gt_boxes, gt_labels):
self._ground_truth_data.append(
[self._num_updates, int(label - 1), *bbox])
if pred_boxes.ndim == 1:
pred_boxes = np.expand_dims(pred_boxes, axis = 0)
for bbox, label, score in zip(pred_boxes, pred_labels, pred_scores):
self._prediction_data.append(
[self._num_updates, int(label - 1), score, *bbox])
# Increment the update number.
self._num_updates += 1
@property
def historical_data(self):
"""Returns historical mAP over past data."""
return self._prior_maps
def compute(self, iou_threshold = 0.5):
"""Computes the mAP with the data."""
ap = mean_average_precision(
self._prediction_data,
self._ground_truth_data,
num_classes = self._num_classes,
iou_threshold = iou_threshold,
use_bar = self._use_bar)
self._prior_maps[self._num_updates] = ap
return ap
def reset(self):
"""Resets the internal lists."""
del self._prediction_data
del self._ground_truth_data
self._prediction_data = []
self._ground_truth_data = []
self._num_updates = 0
| 11,134 | 35.749175 | 84 | py |
AgML | AgML-main/experiments/benchmarking/tools.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from functools import wraps
from typing import Dict, Callable
import pandas as pd
import torch
from torchmetrics import Metric
def gpus(given = None):
"""Gets the number of GPUs to use based on the experiment."""
if not torch.cuda.is_available():
return 0
if given is not None:
return int(given)
return torch.cuda.device_count()
def checkpoint_dir(given = None, dataset = None):
"""Returns the directory to save logs/checkpoints to."""
if given is not None:
if given.endswith('dataset'):
# if a path is passed like /root/dataset, this updates to the name
given = os.path.dirname(given)
given = os.path.join(given, dataset)
if not os.path.exists(given):
os.makedirs(given, exist_ok = True)
return given
if 'get_ipython()' in globals() and os.path.exists('/content'):
# In Google Colab.
return '/content/logs'
else:
if os.path.exists('/data2'):
save_dir = os.path.join(f"/data2/amnjoshi/checkpoints")
else:
save_dir = os.path.join(os.path.dirname(__file__), 'logs')
save_dir = os.path.join(save_dir, dataset)
os.makedirs(save_dir, exist_ok = True)
return save_dir
class MetricLogger(object):
"""Logs metrics for training after every epoch."""
def __init__(self, metrics, file):
if not isinstance(metrics, dict):
raise TypeError("Expected a dictionary of metrics.")
self.metrics: Dict[str, Metric] = metrics
if not os.path.exists(os.path.dirname(file)):
raise NotADirectoryError(
f"The directory of the file {file} does not exist.")
path_name = os.path.splitext(file)[0]
file = path_name + ".csv"
self.out_file = file
self.log_outputs = []
def update_metrics(self, *args) -> None:
raise NotImplementedError()
def update(self, *args):
self.update_metrics(*args)
def compile_epoch(self):
metric_outs = []
for metric in self.metrics.values():
result = metric.compute()
if isinstance(result, torch.Tensor):
result = result.item()
metric_outs.append(result)
self.log_outputs.append(metric_outs)
def save(self):
if not self.log_outputs:
sys.stderr.write("MetricLogger: No metrics to save!")
return
# Compile metrics into a CSV format.
names = list(self.metrics.keys())
df = pd.DataFrame(self.log_outputs, columns = names)
df.to_csv(self.out_file)
# Ported from PyTorch Lightning v1.3.0.
def auto_move_data(fn: Callable) -> Callable:
"""
Decorator for :class:`~pytorch_lightning.core.lightning.LightningModule` methods for which
input arguments should be moved automatically to the correct device.
It as no effect if applied to a method of an object that is not an instance of
:class:`~pytorch_lightning.core.lightning.LightningModule` and is typically applied to ``__call__``
or ``forward``.
Args:
fn: A LightningModule method for which the arguments should be moved to the device
the parameters are on.
Example::
# directly in the source code
class LitModel(LightningModule):
@auto_move_data
def forward(self, x):
return x
# or outside
LitModel.forward = auto_move_data(LitModel.forward)
model = LitModel()
model = model.to('cuda')
model(torch.zeros(1, 3))
# input gets moved to device
# tensor([[0., 0., 0.]], device='cuda:0')
"""
@wraps(fn)
def auto_transfer_args(self, *args, **kwargs):
from pytorch_lightning import LightningModule
if not isinstance(self, LightningModule):
return fn(self, *args, **kwargs)
args, kwargs = self.transfer_batch_to_device((args, kwargs), device=self.device, dataloader_idx=None)
return fn(self, *args, **kwargs)
return auto_transfer_args
| 4,718 | 30.885135 | 109 | py |
AgML | AgML-main/experiments/benchmarking/map_evaluation.py | import torch
import numpy as np
from tqdm import tqdm
def _scalar_to_array(*args):
"""Converts 0-dimensional scalar arrays to 1-d arrays."""
cvt = lambda x: np.expand_dims(x, 0) if x.ndim == 0 else x
outs = [cvt(arg) for arg in args]
return outs[0] if len(args) == 1 else outs
def _add_truth_scores_to_annotations(annotations):
"""Adds a `score` element of 1.0 to ground truth annotations.
As for the mean average precision method, boxes are expected to be
in the format [image_idx, class, *score*, x1, y1, x2, y2], but
ground truth elements do not have a score, this method takes
elements of the form [image_idx, class, x1, y1, x2, y2], and adds
a dummy `score` element of 1.0 to satisfy the MAP method.
"""
a = annotations # short-hand
for idx in range(len(annotations)):
prior, after = a[idx][:2], a[idx][2:]
a[idx] = [*prior, 1.0, *after]
import torch
from collections import Counter
def intersection_over_union(boxes_preds, boxes_labels, box_format = "midpoint"):
"""
Calculates intersection over union
Parameters:
boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
boxes_labels (tensor): Correct Labels of Boxes (BATCH_SIZE, 4)
box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
Returns:
tensor: Intersection over union for all examples
"""
# Slicing idx:idx+1 in order to keep tensor dimensionality
# Doing ... in indexing if there would be additional dimensions
# Like for Yolo algorithm which would have (N, S, S, 4) in shape
if box_format == "midpoint":
box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
elif box_format == "corners":
box1_x1 = boxes_preds[..., 0:1]
box1_y1 = boxes_preds[..., 1:2]
box1_x2 = boxes_preds[..., 2:3]
box1_y2 = boxes_preds[..., 3:4]
box2_x1 = boxes_labels[..., 0:1]
box2_y1 = boxes_labels[..., 1:2]
box2_x2 = boxes_labels[..., 2:3]
box2_y2 = boxes_labels[..., 3:4]
x1 = torch.max(box1_x1, box2_x1)
y1 = torch.max(box1_y1, box2_y1)
x2 = torch.min(box1_x2, box2_x2)
y2 = torch.min(box1_y2, box2_y2)
# Need clamp(0) in case they do not intersect, then we want intersection to be 0
intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
# print(intersection, box1_area, box2_area, boxes_preds, boxes_labels)
return intersection / (box1_area + box2_area - intersection + 1e-6)
def mean_average_precision(
pred_boxes, true_boxes, iou_threshold = 0.5,
box_format = "midpoint", num_classes = 20, use_bar = False
):
"""
Calculates mean average precision
Parameters:
pred_boxes (list): list of lists containing all bboxes with each bboxes
specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2]
true_boxes (list): Similar as pred_boxes except all the correct ones
iou_threshold (float): threshold where predicted bboxes is correct
box_format (str): "midpoint" or "corners" used to specify bboxes
num_classes (int): number of classes
Returns:
float: mAP value across all classes given a specific IoU threshold
"""
# list storing all AP for respective classes
average_precisions = []
if len(true_boxes[0]) == 6:
my_new_boxes = true_boxes.copy()
_add_truth_scores_to_annotations(my_new_boxes)
true_boxes = my_new_boxes
# used for numerical stability later on
epsilon = 1e-6
classes = range(num_classes)
if use_bar:
classes = tqdm(classes, leave = False)
for c in classes:
detections = []
ground_truths = []
# Go through all predictions and targets,
# and only add the ones that belong to the
# current class c
for detection in pred_boxes:
if detection[1] == c:
detections.append(detection)
for true_box in true_boxes:
if true_box[1] == c:
ground_truths.append(true_box)
# find the amount of bboxes for each training example
# Counter here finds how many ground truth bboxes we get
# for each training example, so let's say img 0 has 3,
# img 1 has 5 then we will obtain a dictionary with:
# amount_bboxes = {0:3, 1:5}
amount_bboxes = Counter([gt[0] for gt in ground_truths])
# We then go through each key, val in this dictionary
# and convert to the following (w.r.t same example):
# ammount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]}
for key, val in amount_bboxes.items():
amount_bboxes[key] = torch.zeros(val)
# sort by box probabilities which is index 2
detections.sort(key = lambda x: x[2], reverse = True)
TP = torch.zeros((len(detections)))
FP = torch.zeros((len(detections)))
total_true_bboxes = len(ground_truths)
# If none exists for this class then we can safely skip
if total_true_bboxes == 0:
continue
for detection_idx, detection in enumerate(detections):
# Only take out the ground_truths that have the same
# training idx as detection
ground_truth_img = [
bbox for bbox in ground_truths if bbox[0] == detection[0]
]
num_gts = len(ground_truth_img)
best_iou = 0
for idx, gt in enumerate(ground_truth_img):
iou = intersection_over_union(
torch.tensor(detection[3:]),
torch.tensor(gt[3:]),
box_format = box_format,
)
if iou > best_iou:
best_iou = iou
best_gt_idx = idx
if best_iou > iou_threshold:
# only detect ground truth detection once
if amount_bboxes[detection[0]][best_gt_idx] == 0:
# true positive and add this bounding box to seen
TP[detection_idx] = 1
amount_bboxes[detection[0]][best_gt_idx] = 1
else:
FP[detection_idx] = 1
# if IOU is lower then the detection is a false positive
else:
FP[detection_idx] = 1
TP_cumsum = torch.cumsum(TP, dim = 0)
FP_cumsum = torch.cumsum(FP, dim = 0)
recalls = TP_cumsum / (total_true_bboxes + epsilon)
precisions = TP_cumsum / (TP_cumsum + FP_cumsum + epsilon)
precisions = torch.cat((torch.tensor([1]), precisions))
recalls = torch.cat((torch.tensor([0]), recalls))
# torch.trapz for numerical integration
average_precisions.append(torch.trapz(precisions, recalls))
if len(average_precisions) == 0:
return torch.tensor(0.0)
return sum(average_precisions) / len(average_precisions)
class MeanAveragePrecision(object):
"""Stores and calculates the mean average precision over data."""
def __init__(self, num_classes = 1, use_bar = False):
self._num_classes = num_classes
# Store all of the ground truth and prediction data.
self._prediction_data = []
self._ground_truth_data = []
# Track the number of times that a data sample is added,
# and any past MAP values which are computed.
self._num_updates = 0
self._prior_maps = {}
# Whether to use a progress bar.
self._use_bar = use_bar
def batch_update(self, pred_data, gt_data):
"""Same as `update()`, but for batches of data."""
for pred_d, gt_d in zip(pred_data, gt_data):
self.update(pred_d, gt_d)
def update(self, pred_data, gt_data):
"""Update the tracker with prediction and ground truth data.
The arguments `pred_data` and `gt_data` should be either
dictionaries (with the following keys), or lists of values
which correspond in order to the same keys listed below:
- `pred_data`: `boxes`, `labels`, and `scores`.
- `gt_data`: `boxes` and `labels`.
Note: To update a batch of data, use `batch_update()`.
"""
# Get the relevant data from the input arguments.
if isinstance(pred_data, dict):
pred_boxes, pred_labels, pred_scores = \
pred_data['boxes'], pred_data['labels'], pred_data['scores']
else:
pred_boxes, pred_labels, pred_scores = pred_data
if isinstance(gt_data, dict):
gt_boxes, gt_labels = \
gt_data['boxes'], gt_data['labels']
else:
gt_boxes, gt_labels = gt_data
# Format the data.
pred_boxes = np.squeeze(pred_boxes)
pred_labels = np.squeeze(pred_labels)
pred_scores = np.squeeze(pred_scores)
pred_labels, gt_labels, pred_scores = \
_scalar_to_array(pred_labels, gt_labels, pred_scores)
if pred_boxes.ndim == 1:
pred_boxes = np.expand_dims(pred_boxes, axis = 0)
if gt_boxes.ndim == 1:
gt_boxes = np.expand_dims(gt_boxes, axis = 0)
# Create the data in the proper format.
for bbox, label in zip(gt_boxes, gt_labels):
self._ground_truth_data.append(
[self._num_updates, int(label - 1), *bbox])
if pred_boxes.ndim == 1:
pred_boxes = np.expand_dims(pred_boxes, axis = 0)
for bbox, label, score in zip(pred_boxes, pred_labels, pred_scores):
self._prediction_data.append(
[self._num_updates, int(label - 1), score, *bbox])
# Increment the update number.
self._num_updates += 1
@property
def historical_data(self):
"""Returns historical mAP over past data."""
return self._prior_maps
def compute(self, iou_threshold = 0.5):
"""Computes the mAP with the data."""
ap = mean_average_precision(
self._prediction_data,
self._ground_truth_data,
num_classes = self._num_classes,
iou_threshold = iou_threshold,
use_bar = self._use_bar)
self._prior_maps[self._num_updates] = ap
return ap
def reset(self):
"""Resets the internal lists."""
del self._prediction_data
del self._ground_truth_data
self._prediction_data = []
self._ground_truth_data = []
self._num_updates = 0
| 11,134 | 35.749175 | 84 | py |
AgML | AgML-main/experiments/benchmarking/accuracy_evaluation.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
import pytorch_lightning as pl
import agml
from classification_lightning import ClassificationBenchmark
from torchmetrics import Accuracy
# Define device.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def run_evaluation(model, name):
"""Runs evaluation for categorical accuracy."""
# Create and load the test dataset.
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(name)
loader.shuffle()
loader.split(0.8, 0.1, 0.1)
loader.batch(batch_size = 16)
loader.resize_images('imagenet')
loader.normalize_images('imagenet')
loader.labels_to_one_hot()
ds = loader.test_data.as_torch_dataset()
# Create the metric.
acc = Accuracy(num_classes = loader.num_classes)
# Run inference for all of the images in the test dataset.
for i in tqdm(range(len(ds)), leave = False):
image, y = ds[i]
y_pred = model(image.to(device))
acc(y_pred.detach().cpu(), torch.argmax(y, 1).cpu())
# Compute the mAP for all of the thresholds.
return acc.compute().detach().cpu().numpy()
def make_checkpoint(name):
"""Gets a checkpoint for the model name."""
ckpt_path = os.path.join(
"/data2/amnjoshi/final/classification_checkpoints", name, "final_model.pth")
state = torch.load(ckpt_path, map_location = 'cpu')
model = ClassificationBenchmark(dataset = name)
model.load_state_dict(state)
model.eval().to(device)
return model
def evaluate(names, log_file = None):
"""Runs the evaluation and saves results to a file."""
print(f"Running accuracy evaluation for {names}.")
# Create the log file.
if log_file is None:
log_file = os.path.join(os.getcwd(), 'accuracy_evaluation.csv')
# Run the evaluation.
log_contents = {}
bar = tqdm(names)
for name in bar:
ckpt = make_checkpoint(name)
bar.set_description(f"Evaluating {name}")
if hasattr(name, 'name'):
name = name.name
log_contents[name] = run_evaluation(ckpt, name)
# Save the results.
df = pd.DataFrame(columns = ('name', 'accuracy'))
for name, value in log_contents.items():
df.loc[len(df.index)] = [name, value]
df.to_csv(log_file)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--log_file', type = str, default = None,
help = "The name of the output log file.")
args = ap.parse_args()
# Train the model.
if args.dataset[0] in agml.data.public_data_sources(ml_task = 'image_classification'):
datasets = args.dataset[0]
else:
if args.dataset[0] == 'all':
datasets = [ds for ds in agml.data.public_data_sources(
ml_task = 'image_classification')]
elif args.dataset[0] == 'except':
exclude_datasets = args.dataset[1:]
datasets = [
dataset for dataset in agml.data.public_data_sources(
ml_task = 'image_classification')
if dataset.name not in exclude_datasets]
else:
datasets = args.dataset
evaluate(datasets, args.log_file)
| 3,960 | 30.688 | 90 | py |
AgML | AgML-main/experiments/benchmarking/map_evaluation_multiple.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
import pytorch_lightning as pl
import agml
from detection_lightning import AgMLDatasetAdaptor, EfficientDetModel
from mean_average_precision_torch import MeanAveragePrecision
def run_evaluation(model, name):
"""Runs evaluation for mAP @ [0.5,0.95]."""
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(name)
loader.shuffle()
loader.split(0.8, 0.1, 0.1)
return run_evaluation_by_loader(model, loader)
def run_evaluation_by_loader(model, loader):
"""Runs evaluation for a class for mAP @ [0.5,0.95]."""
iou_thresholds = np.linspace(
0.5, 0.95, int(np.round((0.95 - .5) / .05) + 1))
# Create the adaptor and load the test dataset.
ds = AgMLDatasetAdaptor(loader.test_data)
# Create the metric.
ma = MeanAveragePrecision(num_classes = loader.num_classes)
# Run inference for all of the images in the test dataset.
for i in tqdm(range(len(ds)), leave = False):
image, bboxes, labels, _ = ds.get_image_and_labels_by_idx(i)
pred_boxes, pred_labels, pred_conf = model.predict([image])
pred_boxes = np.squeeze(pred_boxes)
ma.update([pred_boxes, pred_labels, pred_conf], [bboxes, labels])
# Compute the mAP for all of the thresholds.
map_values = [ma.compute(thresh).detach().cpu().numpy()
for thresh in iou_thresholds]
return np.mean(map_values)
def make_checkpoint(name, path = None, num_classes = None):
"""Gets a checkpoint for the model name."""
model = EfficientDetModel(
num_classes = agml.data.source(name).num_classes if num_classes is None else num_classes,
architecture = 'tf_efficientdet_d4')
if path is not None:
state = torch.load(path, map_location = 'cpu')
model.load_state_dict(state)
model.eval().cuda()
return model
def evaluate_different_benchmarks(paths, names, log_file = None):
"""Runs the evaluation for different pretrained weights."""
print(f"Running mAP evaluation for {paths}.")
# Create the log file.
if log_file is None:
log_file = os.path.join(os.getcwd(), 'map_evaluation.csv')
# Run the evaluation.
df = pd.DataFrame(columns = names, index = [
os.path.basename(os.path.dirname(os.path.dirname(p))) for p in paths])
for name in names:
log_contents = {}
bar = tqdm(paths)
from collections import defaultdict
ncs = defaultdict(lambda: 1)
for path in bar:
nc = ncs[os.path.basename(path).split('-')[0]]
ckpt = make_checkpoint(name, path = path, num_classes = nc)
bar.set_description(f"Evaluating {name} @ {path} @ nc = {nc}")
if hasattr(name, 'name'):
name = name.name
log_contents[os.path.basename(os.path.dirname(os.path.dirname(path)))] \
= run_evaluation(ckpt, name)
# Save the results.
df[name] = log_contents.values()
df.to_csv(log_file)
def evaluate_per_class(dataset, path = None, log_file = None):
"""Runs the evaluation for each class in a dataset."""
print(f"Running mAP evaluation for {dataset}.")
# Create the log file.
if log_file is None:
log_file = os.path.join(os.getcwd(), 'map_evaluation_per_class.csv')
# Construct the super-loader.
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(dataset)
loader.shuffle()
# Create the loop for each class.
# Run the evaluation.
lit = {}
for cl in range(agml.data.source(dataset).num_classes):
cls = cl + 1
new_loader = loader.take_class(cls)
new_loader.split(train = 0.8, val = 0.1, test = 0.1)
ckpt = make_checkpoint('grape_detection_californiaday', path = path, num_classes = 1)
print(f"Evaluating fruit_detection_worldwide @ class '{loader.num_to_class[cls]}'")
result = run_evaluation_by_loader(ckpt, new_loader)
# Save the results.
lit[loader.num_to_class[cls]] = result
return lit
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--log_file', type = str, default = None,
help = "The name of the output log file.")
ap.add_argument(
'--per-class-for-dataset', action = 'store_true',
default = False, help = "Whether to generate benchmarks per class.")
args = ap.parse_args()
# Train the model.
if args.per_class_for_dataset:
results = []
for path_type in [None, '/data2/amnjoshi/detection-models/grape.pth',
'/data2/amnjoshi/detection-models/amg.pth']:
results.append(evaluate_per_class(args.dataset[0], path = path_type, log_file = args.log_file))
print(results)
loader = agml.data.AgMLDataLoader('fruit_detection_worldwide')
df = pd.DataFrame(columns = loader.classes, index = ['COCO', 'GRAPE', 'AMG'])
for result, typ in zip(results, ['COCO', 'GRAPE', 'AMG']):
df.loc[typ] = result
df.to_csv(args.log_file)
else:
if args.dataset[0] in agml.data.public_data_sources(ml_task = 'object_detection'):
datasets = args.dataset[0]
else:
if args.dataset[0] == 'all':
datasets = [ds for ds in agml.data.public_data_sources(
ml_task = 'object_detection')]
elif args.dataset[0] == 'except':
exclude_datasets = args.dataset[1:]
datasets = [
dataset for dataset in agml.data.public_data_sources(
ml_task = 'object_detection')
if dataset.name not in exclude_datasets]
else:
datasets = args.dataset
evaluate_different_benchmarks(
glob.glob('/data2/amnjoshi/flood-grape/ejb-cc/**/*.pth', recursive = True),
datasets, args.log_file)
| 6,711 | 36.082873 | 107 | py |
AgML | AgML-main/experiments/benchmarking/segmentation_lightning_pretrained.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
from collections import OrderedDict
import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from torchmetrics import IoU
from torchvision.models.segmentation import deeplabv3_resnet50
import agml
import albumentations as A
from tools import gpus, checkpoint_dir, MetricLogger
class DeepLabV3Transfer(nn.Module):
"""Represents a transfer learning DeepLabV3 model.
This is the base benchmarking model for semantic segmentation,
using the DeepLabV3 model with a ResNet50 backbone.
"""
def __init__(self, num_classes, pretrained = False,
unfreeze_backbone = False):
super(DeepLabV3Transfer, self).__init__()
self.base = deeplabv3_resnet50(
pretrained = False,
num_classes = num_classes)
if pretrained:
self.base.load_state_dict(load_checkpoint(), strict = False)
if not unfreeze_backbone:
for parameter in self.base.backbone.parameters():
parameter.requires_grad = False
def forward(self, x, **kwargs): # noqa
return self.base(x)
def load_checkpoint():
"""Loads a ResNet50 pretrained checkpoint."""
ckpt_path = "/data2/amnjoshi/resnet50_pretrained/checkpoints/" \
"plant_village_classification-epoch42-val_loss_0.05.ckpt"
state = torch.load(ckpt_path, map_location = 'cpu')['state_dict']
new_state = []
for key in state:
if key.startswith('net.base'):
new_state.append((key.replace('net.base.', 'backbone.'), state[key]))
new_state = OrderedDict(new_state)
return new_state
def dice_loss(y_pred, y):
y = y.float()
try: # Multi-class segmentation
c, h, w = y.shape[1:]
except: # Binary segmentation
h, w = y.shape[1:]; c = 1 # noqa
pred_flat = torch.reshape(y_pred, [-1, c * h * w])
y_flat = torch.reshape(y, [-1, c * h * w])
intersection = 2.0 * torch.sum(pred_flat * y_flat, dim = 1) + 1e-6
denominator = torch.sum(pred_flat, dim = 1) + torch.sum(y_flat, dim = 1) + 1e-6
return 1. - torch.mean(intersection / denominator)
def dice_metric(y_pred, y):
intersection = 2.0 * (y_pred * y).sum()
union = y_pred.sum() + y.sum()
if union == 0.0:
return 1.0
return intersection / union
class SegmentationBenchmark(pl.LightningModule):
"""Represents an image classification benchmark model."""
def __init__(self, dataset, pretrained = False,
unfreeze_backbone = False, save_dir = None):
# Initialize the module.
super(SegmentationBenchmark, self).__init__()
# Construct the network.
self._source = agml.data.source(dataset)
self._pretrained = pretrained
self.net = DeepLabV3Transfer(
self._source.num_classes,
self._pretrained,
unfreeze_backbone = unfreeze_backbone
)
# Construct the loss for training.
if self._source.num_classes == 1:
self.loss = nn.BCEWithLogitsLoss()
else:
self.loss = dice_loss
# Construct the IoU metric.
self.iou = IoU(self._source.num_classes + 1)
# Add a metric calculator.
self.metric_logger = SegmentationMetricLogger({
'iou': IoU(self._source.num_classes + 1)},
os.path.join(save_dir, f'logs-{self._version}.csv'))
self._sanity_check_passed = False
def forward(self, x):
return self.net.forward(x)
def calculate_loss(self, y_pred, y):
if self._source.num_classes != 1:
return self.loss(y_pred, y.long())
return self.loss(y_pred, y.float())
def training_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x)['out'].float().squeeze()
loss = self.calculate_loss(y_pred, y)
iou = self.iou(y_pred, y.int())
self.log('iou', iou.item(), prog_bar = True)
return {
'loss': loss,
}
def validation_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x)['out'].float().squeeze()
val_loss = self.calculate_loss(y_pred, y)
self.log('val_loss', val_loss.item(), prog_bar = True)
val_iou = self.iou(y_pred, y.int())
if self._sanity_check_passed:
self.metric_logger.update_metrics(y_pred, y.int())
self.log('val_iou', val_iou.item(), prog_bar = True)
return {
'val_loss': val_loss,
}
def configure_optimizers(self):
return torch.optim.Adam(self.net.parameters())
def get_progress_bar_dict(self):
tqdm_dict = super(SegmentationBenchmark, self).get_progress_bar_dict()
tqdm_dict.pop('v_num', None)
return tqdm_dict
def on_validation_epoch_end(self) -> None:
if not self._sanity_check_passed:
self._sanity_check_passed = True
return
self.metric_logger.compile_epoch()
def on_fit_end(self) -> None:
self.metric_logger.save()
# Calculate and log the metrics.
class SegmentationMetricLogger(MetricLogger):
def update_metrics(self, y_pred, y_true) -> None:
for metric in self.metrics.values():
metric.update(y_pred.cpu(), y_true.cpu())
# Build the data loaders.
def build_loaders(name):
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(name)
loader.split(train = 0.8, val = 0.1, test = 0.1)
loader.batch(batch_size = 8)
loader.resize_images('imagenet')
# loader.normalize_images('imagenet')
loader.mask_to_channel_basis()
train_data = loader.train_data
train_data.transform(transform = A.RandomRotate90())
train_ds = train_data.copy().as_torch_dataset()
val_ds = loader.val_data.as_torch_dataset()
val_ds.shuffle_data = False
test_ds = loader.test_data.as_torch_dataset()
return train_ds, val_ds, test_ds
def train(dataset, epochs, save_dir = None, pretrained = False,
unfreeze_backbone = False, overwrite = None):
"""Constructs the training loop and trains a model."""
save_dir = checkpoint_dir(save_dir, dataset)
log_dir = save_dir.replace('checkpoints', 'logs')
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Set up the checkpoint saving callback.
callbacks = [
pl.callbacks.ModelCheckpoint(
dirpath = save_dir, mode = 'min',
filename = f"{dataset}" + "-epoch{epoch:02d}-val_loss_{val_loss:.2f}",
monitor = 'val_iou',
save_top_k = 3,
auto_insert_metric_name = False
),
]
# Construct the model.
model = SegmentationBenchmark(
dataset = dataset, save_dir = save_dir,
pretrained = pretrained,
unfreeze_backbone = unfreeze_backbone)
# Construct the data loaders.
train_ds, val_ds, test_ds = build_loaders(dataset)
# Create the loggers.
loggers = [
WandbLogger(save_dir = os.path.dirname(log_dir),
name = f'{dataset}-pretrained',
project = 'segmentation-experiments')
]
# Create the trainer and train the model.
msg = f"Training dataset {dataset}!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = epochs, gpus = gpus(),
callbacks = callbacks, logger = loggers,
log_every_n_steps = 5)
trainer.fit(
model = model,
train_dataloaders = train_ds,
val_dataloaders = val_ds)
if __name__ == '__main__':
# Parse input arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--regenerate-existing', action = 'store_true',
default = False, help = "Whether to re-generate existing benchmarks.")
ap.add_argument(
'--pretrained', action = 'store_true',
default = False, help = "Whether to load a pretrained model.")
ap.add_argument(
'--checkpoint_dir', type = str, default = None,
help = "The checkpoint directory to save to.")
ap.add_argument(
'--epochs', type = int, default = 20,
help = "How many epochs to train for. Default is 20.")
ap.add_argument(
'--unfreeze-backbone', action = 'store_true',
default = False, help = "Whether to not freeze backbone weights.")
args = ap.parse_args()
# Train the model.
if args.dataset[0] in agml.data.public_data_sources(ml_task = 'semantic_segmentation'):
train(dataset = args.dataset[0],
epochs = args.epochs,
save_dir = args.checkpoint_dir,
unfreeze_backbone = args.unfreeze_backbone)
else:
if args.dataset[0] == 'all':
datasets = [ds for ds in agml.data.public_data_sources(
ml_task = 'semantic_segmentation')]
else:
datasets = args.dataset
for ds in datasets:
train(dataset = ds,
epochs = args.epochs,
pretrained = args.pretrained,
save_dir = args.checkpoint_dir,
unfreeze_backbone = args.unfreeze_backbone,
overwrite = args.regenerate_existing)
| 10,211 | 33.268456 | 91 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.