repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/auxilary/multi_head_att.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout = 0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
# calculate attention using function we will define next
scores = attention(q, k, v, self.d_k, mask, self.dropout)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
| 1,719 | 28.152542 | 78 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/rpn/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from lib.scene_parser.rcnn.modeling.box_coder import BoxCoder
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
from lib.scene_parser.rcnn.structures.boxlist_ops import cat_boxlist
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_nms
from lib.scene_parser.rcnn.structures.boxlist_ops import remove_small_boxes
from ..utils import cat
from .utils import permute_and_flatten
class RPNPostProcessor(torch.nn.Module):
"""
Performs post-processing on the outputs of the RPN boxes, before feeding the
proposals to the heads
"""
def __init__(
self,
pre_nms_top_n,
post_nms_top_n,
nms_thresh,
min_size,
box_coder=None,
fpn_post_nms_top_n=None,
fpn_post_nms_per_batch=True,
):
"""
Arguments:
pre_nms_top_n (int)
post_nms_top_n (int)
nms_thresh (float)
min_size (int)
box_coder (BoxCoder)
fpn_post_nms_top_n (int)
"""
super(RPNPostProcessor, self).__init__()
self.pre_nms_top_n = pre_nms_top_n
self.post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.min_size = min_size
if box_coder is None:
box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
self.box_coder = box_coder
if fpn_post_nms_top_n is None:
fpn_post_nms_top_n = post_nms_top_n
self.fpn_post_nms_top_n = fpn_post_nms_top_n
self.fpn_post_nms_per_batch = fpn_post_nms_per_batch
def add_gt_proposals(self, proposals, targets):
"""
Arguments:
proposals: list[BoxList]
targets: list[BoxList]
"""
# Get the device we're operating on
device = proposals[0].bbox.device
gt_boxes = [target.copy_with_fields([]) for target in targets]
# later cat of bbox requires all fields to be present for all bbox
# so we need to add a dummy for objectness that's missing
for gt_box in gt_boxes:
gt_box.add_field("objectness", torch.ones(len(gt_box), device=device))
proposals = [
cat_boxlist((proposal, gt_box))
for proposal, gt_box in zip(proposals, gt_boxes)
]
return proposals
def forward_for_single_feature_map(self, anchors, objectness, box_regression):
"""
Arguments:
anchors: list[BoxList]
objectness: tensor of size N, A, H, W
box_regression: tensor of size N, A * 4, H, W
"""
device = objectness.device
N, A, H, W = objectness.shape
# put in the same format as anchors
objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1)
objectness = objectness.sigmoid()
box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)
num_anchors = A * H * W
pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True)
batch_idx = torch.arange(N, device=device)[:, None]
box_regression = box_regression[batch_idx, topk_idx]
image_shapes = [box.size for box in anchors]
concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]
proposals = self.box_coder.decode(
box_regression.view(-1, 4), concat_anchors.view(-1, 4)
)
proposals = proposals.view(N, -1, 4)
result = []
for proposal, score, im_shape in zip(proposals, objectness, image_shapes):
boxlist = BoxList(proposal, im_shape, mode="xyxy")
boxlist.add_field("objectness", score)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, self.min_size)
boxlist = boxlist_nms(
boxlist,
self.nms_thresh,
max_proposals=self.post_nms_top_n,
score_field="objectness",
)
result.append(boxlist)
return result
def forward(self, anchors, objectness, box_regression, targets=None):
"""
Arguments:
anchors: list[list[BoxList]]
objectness: list[tensor]
box_regression: list[tensor]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
"""
sampled_boxes = []
num_levels = len(objectness)
anchors = list(zip(*anchors))
for a, o, b in zip(anchors, objectness, box_regression):
sampled_boxes.append(self.forward_for_single_feature_map(a, o, b))
boxlists = list(zip(*sampled_boxes))
boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
if num_levels > 1:
boxlists = self.select_over_all_levels(boxlists)
# append ground-truth bboxes to proposals
if self.training and targets is not None:
boxlists = self.add_gt_proposals(boxlists, targets)
return boxlists
def select_over_all_levels(self, boxlists):
num_images = len(boxlists)
# different behavior during training and during testing:
# during training, post_nms_top_n is over *all* the proposals combined, while
# during testing, it is over the proposals for each image
# NOTE: it should be per image, and not per batch. However, to be consistent
# with Detectron, the default is per batch (see Issue #672)
if self.training and self.fpn_post_nms_per_batch:
objectness = torch.cat(
[boxlist.get_field("objectness") for boxlist in boxlists], dim=0
)
box_sizes = [len(boxlist) for boxlist in boxlists]
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True)
inds_mask = torch.zeros_like(objectness, dtype=torch.uint8)
inds_mask[inds_sorted] = 1
inds_mask = inds_mask.split(box_sizes)
for i in range(num_images):
boxlists[i] = boxlists[i][inds_mask[i]]
else:
for i in range(num_images):
objectness = boxlists[i].get_field("objectness")
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(
objectness, post_nms_top_n, dim=0, sorted=True
)
boxlists[i] = boxlists[i][inds_sorted]
return boxlists
def make_rpn_postprocessor(config, rpn_box_coder, is_train):
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN
if not is_train:
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TRAIN
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TRAIN
if not is_train:
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TEST
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TEST
fpn_post_nms_per_batch = config.MODEL.RPN.FPN_POST_NMS_PER_BATCH
nms_thresh = config.MODEL.RPN.NMS_THRESH
min_size = config.MODEL.RPN.MIN_SIZE
box_selector = RPNPostProcessor(
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=nms_thresh,
min_size=min_size,
box_coder=rpn_box_coder,
fpn_post_nms_top_n=fpn_post_nms_top_n,
fpn_post_nms_per_batch=fpn_post_nms_per_batch,
)
return box_selector
| 7,773 | 36.555556 | 87 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/rpn/anchor_generator.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numpy as np
import torch
from torch import nn
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
class BufferList(nn.Module):
"""
Similar to nn.ParameterList, but for buffers
"""
def __init__(self, buffers=None):
super(BufferList, self).__init__()
if buffers is not None:
self.extend(buffers)
def extend(self, buffers):
offset = len(self)
for i, buffer in enumerate(buffers):
self.register_buffer(str(offset + i), buffer)
return self
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.values())
class AnchorGenerator(nn.Module):
"""
For a set of image sizes and feature maps, computes a set
of anchors
"""
def __init__(
self,
sizes=(128, 256, 512),
aspect_ratios=(0.5, 1.0, 2.0),
anchor_strides=(8, 16, 32),
straddle_thresh=0,
):
super(AnchorGenerator, self).__init__()
if len(anchor_strides) == 1:
anchor_stride = anchor_strides[0]
cell_anchors = [
generate_anchors(anchor_stride, sizes, aspect_ratios).float()
]
else:
if len(anchor_strides) != len(sizes):
raise RuntimeError("FPN should have #anchor_strides == #sizes")
cell_anchors = [
generate_anchors(
anchor_stride,
size if isinstance(size, (tuple, list)) else (size,),
aspect_ratios
).float()
for anchor_stride, size in zip(anchor_strides, sizes)
]
self.strides = anchor_strides
self.cell_anchors = BufferList(cell_anchors)
self.straddle_thresh = straddle_thresh
def num_anchors_per_location(self):
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def grid_anchors(self, grid_sizes):
anchors = []
for size, stride, base_anchors in zip(
grid_sizes, self.strides, self.cell_anchors
):
grid_height, grid_width = size
device = base_anchors.device
shifts_x = torch.arange(
0, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
anchors.append(
(shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
)
return anchors
def add_visibility_to(self, boxlist):
image_width, image_height = boxlist.size
anchors = boxlist.bbox
if self.straddle_thresh >= 0:
inds_inside = (
(anchors[..., 0] >= -self.straddle_thresh)
& (anchors[..., 1] >= -self.straddle_thresh)
& (anchors[..., 2] < image_width + self.straddle_thresh)
& (anchors[..., 3] < image_height + self.straddle_thresh)
)
else:
device = anchors.device
inds_inside = torch.ones(anchors.shape[0], dtype=torch.uint8, device=device)
boxlist.add_field("visibility", inds_inside)
def forward(self, image_list, feature_maps):
grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
anchors = []
for i, (image_height, image_width) in enumerate(image_list.image_sizes):
anchors_in_image = []
for anchors_per_feature_map in anchors_over_all_feature_maps:
boxlist = BoxList(
anchors_per_feature_map, (image_width, image_height), mode="xyxy"
)
self.add_visibility_to(boxlist)
anchors_in_image.append(boxlist)
anchors.append(anchors_in_image)
return anchors
def make_anchor_generator(config):
anchor_sizes = config.MODEL.RPN.ANCHOR_SIZES
aspect_ratios = config.MODEL.RPN.ASPECT_RATIOS
anchor_stride = config.MODEL.RPN.ANCHOR_STRIDE
straddle_thresh = config.MODEL.RPN.STRADDLE_THRESH
if config.MODEL.RPN.USE_FPN:
assert len(anchor_stride) == len(
anchor_sizes
), "FPN should have len(ANCHOR_STRIDE) == len(ANCHOR_SIZES)"
else:
assert len(anchor_stride) == 1, "Non-FPN should have a single ANCHOR_STRIDE"
anchor_generator = AnchorGenerator(
anchor_sizes, aspect_ratios, anchor_stride, straddle_thresh
)
return anchor_generator
def make_anchor_generator_retinanet(config):
anchor_sizes = config.MODEL.RETINANET.ANCHOR_SIZES
aspect_ratios = config.MODEL.RETINANET.ASPECT_RATIOS
anchor_strides = config.MODEL.RETINANET.ANCHOR_STRIDES
straddle_thresh = config.MODEL.RETINANET.STRADDLE_THRESH
octave = config.MODEL.RETINANET.OCTAVE
scales_per_octave = config.MODEL.RETINANET.SCALES_PER_OCTAVE
assert len(anchor_strides) == len(anchor_sizes), "Only support FPN now"
new_anchor_sizes = []
for size in anchor_sizes:
per_layer_anchor_sizes = []
for scale_per_octave in range(scales_per_octave):
octave_scale = octave ** (scale_per_octave / float(scales_per_octave))
per_layer_anchor_sizes.append(octave_scale * size)
new_anchor_sizes.append(tuple(per_layer_anchor_sizes))
anchor_generator = AnchorGenerator(
tuple(new_anchor_sizes), aspect_ratios, anchor_strides, straddle_thresh
)
return anchor_generator
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
# Verify that we compute the same anchors as Shaoqing's matlab implementation:
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
# array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_anchors(
stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)
):
"""Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
are centered on stride / 2, have (approximate) sqrt areas of the specified
sizes, and aspect ratios as given.
"""
return _generate_anchors(
stride,
np.array(sizes, dtype=np.float) / stride,
np.array(aspect_ratios, dtype=np.float),
)
def _generate_anchors(base_size, scales, aspect_ratios):
"""Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, base_size - 1, base_size - 1) window.
"""
anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1
anchors = _ratio_enum(anchor, aspect_ratios)
anchors = np.vstack(
[_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]
)
return torch.from_numpy(anchors)
def _whctrs(anchor):
"""Return width, height, x center, and y center for an anchor (window)."""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack(
(
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1),
)
)
return anchors
def _ratio_enum(anchor, ratios):
"""Enumerate a set of anchors for each aspect ratio wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
| 9,951 | 33.317241 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/rpn/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This file contains specific functions for computing losses on the RPN
file
"""
import torch
from torch.nn import functional as F
from .utils import concat_box_prediction_layers
from ..balanced_positive_negative_sampler import BalancedPositiveNegativeSampler
from ..utils import cat
from lib.scene_parser.rcnn.layers import smooth_l1_loss
from lib.scene_parser.rcnn.modeling.matcher import Matcher
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_iou
from lib.scene_parser.rcnn.structures.boxlist_ops import cat_boxlist
class RPNLossComputation(object):
"""
This class computes the RPN loss.
"""
def __init__(self, proposal_matcher, fg_bg_sampler, box_coder,
generate_labels_func):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
# self.target_preparator = target_preparator
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
self.copied_fields = []
self.generate_labels_func = generate_labels_func
self.discard_cases = ['not_visibility', 'between_thresholds']
def match_targets_to_anchors(self, anchor, target, copied_fields=[]):
match_quality_matrix = boxlist_iou(target, anchor)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# RPN doesn't need any fields from target
# for creating the labels, so clear them all
target = target.copy_with_fields(copied_fields)
# get the targets corresponding GT for each anchor
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, anchors, targets):
labels = []
regression_targets = []
for anchors_per_image, targets_per_image in zip(anchors, targets):
matched_targets = self.match_targets_to_anchors(
anchors_per_image, targets_per_image, self.copied_fields
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = self.generate_labels_func(matched_targets)
labels_per_image = labels_per_image.to(dtype=torch.float32)
# Background (negative examples)
bg_indices = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_indices] = 0
# discard anchors that go out of the boundaries of the image
if "not_visibility" in self.discard_cases:
labels_per_image[~anchors_per_image.get_field("visibility")] = -1
# discard indices that are between thresholds
if "between_thresholds" in self.discard_cases:
inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[inds_to_discard] = -1
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, anchors_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return labels, regression_targets
def __call__(self, anchors, objectness, box_regression, targets):
"""
Arguments:
anchors (list[BoxList])
objectness (list[Tensor])
box_regression (list[Tensor])
targets (list[BoxList])
Returns:
objectness_loss (Tensor)
box_loss (Tensor
"""
anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
labels, regression_targets = self.prepare_targets(anchors, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
objectness, box_regression = \
concat_box_prediction_layers(objectness, box_regression)
objectness = objectness.squeeze()
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds],
regression_targets[sampled_pos_inds],
beta=1.0 / 9,
size_average=False,
) / (sampled_inds.numel())
objectness_loss = F.binary_cross_entropy_with_logits(
objectness[sampled_inds], labels[sampled_inds]
)
return objectness_loss, box_loss
# This function should be overwritten in RetinaNet
def generate_rpn_labels(matched_targets):
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_idxs >= 0
return labels_per_image
def make_rpn_loss_evaluator(cfg, box_coder):
matcher = Matcher(
cfg.MODEL.RPN.FG_IOU_THRESHOLD,
cfg.MODEL.RPN.BG_IOU_THRESHOLD,
allow_low_quality_matches=True,
)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, cfg.MODEL.RPN.POSITIVE_FRACTION
)
loss_evaluator = RPNLossComputation(
matcher,
fg_bg_sampler,
box_coder,
generate_rpn_labels
)
return loss_evaluator
| 5,780 | 35.588608 | 87 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/rpn/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Utility functions minipulating the prediction layers
"""
from ..utils import cat
import torch
def permute_and_flatten(layer, N, A, C, H, W):
layer = layer.view(N, -1, C, H, W)
layer = layer.permute(0, 3, 4, 1, 2)
layer = layer.reshape(N, -1, C)
return layer
def concat_box_prediction_layers(box_cls, box_regression):
box_cls_flattened = []
box_regression_flattened = []
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness and the box_regression
for box_cls_per_level, box_regression_per_level in zip(
box_cls, box_regression
):
N, AxC, H, W = box_cls_per_level.shape
Ax4 = box_regression_per_level.shape[1]
A = Ax4 // 4
C = AxC // A
box_cls_per_level = permute_and_flatten(
box_cls_per_level, N, A, C, H, W
)
box_cls_flattened.append(box_cls_per_level)
box_regression_per_level = permute_and_flatten(
box_regression_per_level, N, A, 4, H, W
)
box_regression_flattened.append(box_regression_per_level)
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
box_cls = cat(box_cls_flattened, dim=1).reshape(-1, C)
box_regression = cat(box_regression_flattened, dim=1).reshape(-1, 4)
return box_cls, box_regression
| 1,679 | 35.521739 | 80 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/rpn/rpn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from lib.scene_parser.rcnn.modeling import registry
from lib.scene_parser.rcnn.modeling.box_coder import BoxCoder
from lib.scene_parser.rcnn.modeling.rpn.retinanet.retinanet import build_retinanet
from .loss import make_rpn_loss_evaluator
from .anchor_generator import make_anchor_generator
from .inference import make_rpn_postprocessor
class RPNHeadConvRegressor(nn.Module):
"""
A simple RPN Head for classification and bbox regression
"""
def __init__(self, cfg, in_channels, num_anchors):
"""
Arguments:
cfg : config
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
super(RPNHeadConvRegressor, self).__init__()
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=1, stride=1
)
for l in [self.cls_logits, self.bbox_pred]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x):
assert isinstance(x, (list, tuple))
logits = [self.cls_logits(y) for y in x]
bbox_reg = [self.bbox_pred(y) for y in x]
return logits, bbox_reg
class RPNHeadFeatureSingleConv(nn.Module):
"""
Adds a simple RPN Head with one conv to extract the feature
"""
def __init__(self, cfg, in_channels):
"""
Arguments:
cfg : config
in_channels (int): number of channels of the input feature
"""
super(RPNHeadFeatureSingleConv, self).__init__()
self.conv = nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
for l in [self.conv]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
self.out_channels = in_channels
def forward(self, x):
assert isinstance(x, (list, tuple))
x = [F.relu(self.conv(z)) for z in x]
return x
@registry.RPN_HEADS.register("SingleConvRPNHead")
class RPNHead(nn.Module):
"""
Adds a simple RPN Head with classification and regression heads
"""
def __init__(self, cfg, in_channels, num_anchors):
"""
Arguments:
cfg : config
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
super(RPNHead, self).__init__()
self.conv = nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=1, stride=1
)
for l in [self.conv, self.cls_logits, self.bbox_pred]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x):
logits = []
bbox_reg = []
for feature in x:
t = F.relu(self.conv(feature))
logits.append(self.cls_logits(t))
bbox_reg.append(self.bbox_pred(t))
return logits, bbox_reg
class RPNModule(torch.nn.Module):
"""
Module for RPN computation. Takes feature maps from the backbone and outputs
RPN proposals and losses. Works for both FPN and non-FPN.
"""
def __init__(self, cfg, in_channels):
super(RPNModule, self).__init__()
self.cfg = cfg.clone()
anchor_generator = make_anchor_generator(cfg)
rpn_head = registry.RPN_HEADS[cfg.MODEL.RPN.RPN_HEAD]
head = rpn_head(
cfg, in_channels, anchor_generator.num_anchors_per_location()[0]
)
rpn_box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
box_selector_train = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=True)
box_selector_test = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=False)
loss_evaluator = make_rpn_loss_evaluator(cfg, rpn_box_coder)
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_train = box_selector_train
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
"""
Arguments:
images (ImageList): images for which we want to compute the predictions
features (list[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (list[BoxList): ground-truth boxes present in the image (optional)
Returns:
boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
image.
losses (dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
objectness, rpn_box_regression = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, objectness, rpn_box_regression, targets)
else:
return self._forward_test(anchors, objectness, rpn_box_regression)
def _forward_train(self, anchors, objectness, rpn_box_regression, targets):
if self.cfg.MODEL.RPN_ONLY:
# When training an RPN-only model, the loss is determined by the
# predicted objectness and rpn_box_regression values and there is
# no need to transform the anchors into predicted boxes; this is an
# optimization that avoids the unnecessary transformation.
boxes = anchors
else:
# For end-to-end models, anchors must be transformed into boxes and
# sampled into a training batch.
with torch.no_grad():
boxes = self.box_selector_train(
anchors, objectness, rpn_box_regression, targets
)
loss_objectness, loss_rpn_box_reg = self.loss_evaluator(
anchors, objectness, rpn_box_regression, targets
)
losses = {
"loss_objectness": loss_objectness,
"loss_rpn_box_reg": loss_rpn_box_reg,
}
return boxes, losses
def _forward_test(self, anchors, objectness, rpn_box_regression):
boxes = self.box_selector_test(anchors, objectness, rpn_box_regression)
if self.cfg.MODEL.RPN_ONLY:
# For end-to-end models, the RPN proposals are an intermediate state
# and don't bother to sort them in decreasing score order. For RPN-only
# models, the proposals are the final output and we return them in
# high-to-low confidence order.
inds = [
box.get_field("objectness").sort(descending=True)[1] for box in boxes
]
boxes = [box[ind] for box, ind in zip(boxes, inds)]
return boxes, {}
def build_rpn(cfg, in_channels):
"""
This gives the gist of it. Not super important because it doesn't change as much
"""
if cfg.MODEL.RETINANET_ON:
return build_retinanet(cfg, in_channels)
return RPNModule(cfg, in_channels)
| 7,624 | 35.658654 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/rpn/retinanet/inference.py | import torch
from ..inference import RPNPostProcessor
from ..utils import permute_and_flatten
from lib.scene_parser.rcnn.modeling.box_coder import BoxCoder
from lib.scene_parser.rcnn.modeling.utils import cat
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
from lib.scene_parser.rcnn.structures.boxlist_ops import cat_boxlist
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_nms
from lib.scene_parser.rcnn.structures.boxlist_ops import remove_small_boxes
class RetinaNetPostProcessor(RPNPostProcessor):
"""
Performs post-processing on the outputs of the RetinaNet boxes.
This is only used in the testing.
"""
def __init__(
self,
pre_nms_thresh,
pre_nms_top_n,
nms_thresh,
fpn_post_nms_top_n,
min_size,
num_classes,
box_coder=None,
):
"""
Arguments:
pre_nms_thresh (float)
pre_nms_top_n (int)
nms_thresh (float)
fpn_post_nms_top_n (int)
min_size (int)
num_classes (int)
box_coder (BoxCoder)
"""
super(RetinaNetPostProcessor, self).__init__(
pre_nms_thresh, 0, nms_thresh, min_size
)
self.pre_nms_thresh = pre_nms_thresh
self.pre_nms_top_n = pre_nms_top_n
self.nms_thresh = nms_thresh
self.fpn_post_nms_top_n = fpn_post_nms_top_n
self.min_size = min_size
self.num_classes = num_classes
if box_coder is None:
box_coder = BoxCoder(weights=(10., 10., 5., 5.))
self.box_coder = box_coder
def add_gt_proposals(self, proposals, targets):
"""
This function is not used in RetinaNet
"""
pass
def forward_for_single_feature_map(
self, anchors, box_cls, box_regression):
"""
Arguments:
anchors: list[BoxList]
box_cls: tensor of size N, A * C, H, W
box_regression: tensor of size N, A * 4, H, W
"""
device = box_cls.device
N, _, H, W = box_cls.shape
A = box_regression.size(1) // 4
C = box_cls.size(1) // A
# put in the same format as anchors
box_cls = permute_and_flatten(box_cls, N, A, C, H, W)
box_cls = box_cls.sigmoid()
box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)
box_regression = box_regression.reshape(N, -1, 4)
num_anchors = A * H * W
candidate_inds = box_cls > self.pre_nms_thresh
pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)
results = []
for per_box_cls, per_box_regression, per_pre_nms_top_n, \
per_candidate_inds, per_anchors in zip(
box_cls,
box_regression,
pre_nms_top_n,
candidate_inds,
anchors):
# Sort and select TopN
# TODO most of this can be made out of the loop for
# all images.
# TODO:Yang: Not easy to do. Because the numbers of detections are
# different in each image. Therefore, this part needs to be done
# per image.
per_box_cls = per_box_cls[per_candidate_inds]
per_box_cls, top_k_indices = \
per_box_cls.topk(per_pre_nms_top_n, sorted=False)
per_candidate_nonzeros = \
per_candidate_inds.nonzero()[top_k_indices, :]
per_box_loc = per_candidate_nonzeros[:, 0]
per_class = per_candidate_nonzeros[:, 1]
per_class += 1
detections = self.box_coder.decode(
per_box_regression[per_box_loc, :].view(-1, 4),
per_anchors.bbox[per_box_loc, :].view(-1, 4)
)
boxlist = BoxList(detections, per_anchors.size, mode="xyxy")
boxlist.add_field("labels", per_class)
boxlist.add_field("scores", per_box_cls)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, self.min_size)
results.append(boxlist)
return results
# TODO very similar to filter_results from PostProcessor
# but filter_results is per image
# TODO Yang: solve this issue in the future. No good solution
# right now.
def select_over_all_levels(self, boxlists):
num_images = len(boxlists)
results = []
for i in range(num_images):
scores = boxlists[i].get_field("scores")
labels = boxlists[i].get_field("labels")
boxes = boxlists[i].bbox
boxlist = boxlists[i]
result = []
# skip the background
for j in range(1, self.num_classes):
inds = (labels == j).nonzero().view(-1)
scores_j = scores[inds]
boxes_j = boxes[inds, :].view(-1, 4)
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms_thresh,
score_field="scores"
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j,
dtype=torch.int64,
device=scores.device)
)
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.fpn_post_nms_top_n > 0:
cls_scores = result.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(),
number_of_detections - self.fpn_post_nms_top_n + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
results.append(result)
return results
def make_retinanet_postprocessor(config, rpn_box_coder, is_train):
pre_nms_thresh = config.MODEL.RETINANET.INFERENCE_TH
pre_nms_top_n = config.MODEL.RETINANET.PRE_NMS_TOP_N
nms_thresh = config.MODEL.RETINANET.NMS_TH
fpn_post_nms_top_n = config.TEST.DETECTIONS_PER_IMG
min_size = 0
box_selector = RetinaNetPostProcessor(
pre_nms_thresh=pre_nms_thresh,
pre_nms_top_n=pre_nms_top_n,
nms_thresh=nms_thresh,
fpn_post_nms_top_n=fpn_post_nms_top_n,
min_size=min_size,
num_classes=config.MODEL.RETINANET.NUM_CLASSES,
box_coder=rpn_box_coder,
)
return box_selector
| 6,937 | 34.579487 | 79 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/rpn/retinanet/loss.py | """
This file contains specific functions for computing losses on the RetinaNet
file
"""
import torch
from torch.nn import functional as F
from ..utils import concat_box_prediction_layers
from lib.scene_parser.rcnn.layers import smooth_l1_loss
from lib.scene_parser.rcnn.layers import SigmoidFocalLoss
from lib.scene_parser.rcnn.modeling.matcher import Matcher
from lib.scene_parser.rcnn.modeling.utils import cat
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_iou
from lib.scene_parser.rcnn.structures.boxlist_ops import cat_boxlist
from lib.scene_parser.rcnn.modeling.rpn.loss import RPNLossComputation
class RetinaNetLossComputation(RPNLossComputation):
"""
This class computes the RetinaNet loss.
"""
def __init__(self, proposal_matcher, box_coder,
generate_labels_func,
sigmoid_focal_loss,
bbox_reg_beta=0.11,
regress_norm=1.0):
"""
Arguments:
proposal_matcher (Matcher)
box_coder (BoxCoder)
"""
self.proposal_matcher = proposal_matcher
self.box_coder = box_coder
self.box_cls_loss_func = sigmoid_focal_loss
self.bbox_reg_beta = bbox_reg_beta
self.copied_fields = ['labels']
self.generate_labels_func = generate_labels_func
self.discard_cases = ['between_thresholds']
self.regress_norm = regress_norm
def __call__(self, anchors, box_cls, box_regression, targets):
"""
Arguments:
anchors (list[BoxList])
box_cls (list[Tensor])
box_regression (list[Tensor])
targets (list[BoxList])
Returns:
retinanet_cls_loss (Tensor)
retinanet_regression_loss (Tensor
"""
anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
labels, regression_targets = self.prepare_targets(anchors, targets)
N = len(labels)
box_cls, box_regression = \
concat_box_prediction_layers(box_cls, box_regression)
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
pos_inds = torch.nonzero(labels > 0).squeeze(1)
retinanet_regression_loss = smooth_l1_loss(
box_regression[pos_inds],
regression_targets[pos_inds],
beta=self.bbox_reg_beta,
size_average=False,
) / (max(1, pos_inds.numel() * self.regress_norm))
labels = labels.int()
retinanet_cls_loss = self.box_cls_loss_func(
box_cls,
labels
) / (pos_inds.numel() + N)
return retinanet_cls_loss, retinanet_regression_loss
def generate_retinanet_labels(matched_targets):
labels_per_image = matched_targets.get_field("labels")
return labels_per_image
def make_retinanet_loss_evaluator(cfg, box_coder):
matcher = Matcher(
cfg.MODEL.RETINANET.FG_IOU_THRESHOLD,
cfg.MODEL.RETINANET.BG_IOU_THRESHOLD,
allow_low_quality_matches=True,
)
sigmoid_focal_loss = SigmoidFocalLoss(
cfg.MODEL.RETINANET.LOSS_GAMMA,
cfg.MODEL.RETINANET.LOSS_ALPHA
)
loss_evaluator = RetinaNetLossComputation(
matcher,
box_coder,
generate_retinanet_labels,
sigmoid_focal_loss,
bbox_reg_beta = cfg.MODEL.RETINANET.BBOX_REG_BETA,
regress_norm = cfg.MODEL.RETINANET.BBOX_REG_WEIGHT,
)
return loss_evaluator
| 3,505 | 31.462963 | 83 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/rpn/retinanet/retinanet.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from .inference import make_retinanet_postprocessor
from .loss import make_retinanet_loss_evaluator
from ..anchor_generator import make_anchor_generator_retinanet
from lib.scene_parser.rcnn.modeling.box_coder import BoxCoder
class RetinaNetHead(torch.nn.Module):
"""
Adds a RetinNet head with classification and regression heads
"""
def __init__(self, cfg, in_channels):
"""
Arguments:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
super(RetinaNetHead, self).__init__()
# TODO: Implement the sigmoid version first.
num_classes = cfg.MODEL.RETINANET.NUM_CLASSES - 1
num_anchors = len(cfg.MODEL.RETINANET.ASPECT_RATIOS) \
* cfg.MODEL.RETINANET.SCALES_PER_OCTAVE
cls_tower = []
bbox_tower = []
for i in range(cfg.MODEL.RETINANET.NUM_CONVS):
cls_tower.append(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
)
)
cls_tower.append(nn.ReLU())
bbox_tower.append(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
)
)
bbox_tower.append(nn.ReLU())
self.add_module('cls_tower', nn.Sequential(*cls_tower))
self.add_module('bbox_tower', nn.Sequential(*bbox_tower))
self.cls_logits = nn.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1,
padding=1
)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1,
padding=1
)
# Initialization
for modules in [self.cls_tower, self.bbox_tower, self.cls_logits,
self.bbox_pred]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
# retinanet_bias_init
prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
def forward(self, x):
logits = []
bbox_reg = []
for feature in x:
logits.append(self.cls_logits(self.cls_tower(feature)))
bbox_reg.append(self.bbox_pred(self.bbox_tower(feature)))
return logits, bbox_reg
class RetinaNetModule(torch.nn.Module):
"""
Module for RetinaNet computation. Takes feature maps from the backbone and
RetinaNet outputs and losses. Only Test on FPN now.
"""
def __init__(self, cfg, in_channels):
super(RetinaNetModule, self).__init__()
self.cfg = cfg.clone()
anchor_generator = make_anchor_generator_retinanet(cfg)
head = RetinaNetHead(cfg, in_channels)
box_coder = BoxCoder(weights=(10., 10., 5., 5.))
box_selector_test = make_retinanet_postprocessor(cfg, box_coder, is_train=False)
loss_evaluator = make_retinanet_loss_evaluator(cfg, box_coder)
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
"""
Arguments:
images (ImageList): images for which we want to compute the predictions
features (list[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (list[BoxList): ground-truth boxes present in the image (optional)
Returns:
boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
image.
losses (dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
box_cls, box_regression = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, box_cls, box_regression, targets)
else:
return self._forward_test(anchors, box_cls, box_regression)
def _forward_train(self, anchors, box_cls, box_regression, targets):
loss_box_cls, loss_box_reg = self.loss_evaluator(
anchors, box_cls, box_regression, targets
)
losses = {
"loss_retina_cls": loss_box_cls,
"loss_retina_reg": loss_box_reg,
}
return anchors, losses
def _forward_test(self, anchors, box_cls, box_regression):
boxes = self.box_selector_test(anchors, box_cls, box_regression)
return boxes, {}
def build_retinanet(cfg, in_channels):
return RetinaNetModule(cfg, in_channels)
| 5,303 | 33.666667 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/roi_heads/roi_heads.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .box_head.box_head import build_roi_box_head
class CombinedROIHeads(torch.nn.ModuleDict):
"""
Combines a set of individual heads (for box prediction or masks) into a single
head.
"""
def __init__(self, cfg, heads):
super(CombinedROIHeads, self).__init__(heads)
self.cfg = cfg.clone()
if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
self.mask.feature_extractor = self.box.feature_extractor
if cfg.MODEL.KEYPOINT_ON and cfg.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
self.keypoint.feature_extractor = self.box.feature_extractor
def forward(self, features, proposals, targets=None):
# TODO rename x to roi_box_features, if it doesn't increase memory consumption
x, detections, loss_box = self.box(features, proposals, targets)
return x, detections, loss_box
def build_roi_heads(cfg, in_channels):
# individually create the heads, that will be combined together
# afterwards
roi_heads = []
if cfg.MODEL.RETINANET_ON:
return []
if not cfg.MODEL.RPN_ONLY:
roi_heads.append(("box", build_roi_box_head(cfg, in_channels)))
# combine individual heads in a single module
if roi_heads:
roi_heads = CombinedROIHeads(cfg, roi_heads)
return roi_heads
| 1,436 | 33.214286 | 93 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/roi_heads/box_head/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_nms
from lib.scene_parser.rcnn.structures.boxlist_ops import cat_boxlist
from lib.scene_parser.rcnn.modeling.box_coder import BoxCoder
class PostProcessor(nn.Module):
"""
From a set of classification scores, box regression and proposals,
computes the post-processed boxes, and applies NMS to obtain the
final results
"""
def __init__(
self,
score_thresh=0.05,
nms=0.5,
detections_per_img=100,
min_detections_per_img=0,
box_coder=None,
cls_agnostic_bbox_reg=False,
bbox_aug_enabled=False,
relation_on=False
):
"""
Arguments:
score_thresh (float)
nms (float)
detections_per_img (int)
box_coder (BoxCoder)
"""
super(PostProcessor, self).__init__()
self.score_thresh = score_thresh
self.nms = nms
self.detections_per_img = detections_per_img
self.min_detections_per_img = min_detections_per_img
if box_coder is None:
box_coder = BoxCoder(weights=(10., 10., 5., 5.))
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.bbox_aug_enabled = bbox_aug_enabled
self.relation_on = relation_on
def forward(self, x, boxes, skip_nms=False):
"""
Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores
"""
class_logit, box_regression = x
class_prob = F.softmax(class_logit, -1)
# TODO think about a representation of batch of boxes
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
features = [box.get_field("features") for box in boxes]
concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)
if not skip_nms:
if self.cls_agnostic_bbox_reg:
box_regression = box_regression[:, -4:]
proposals = self.box_coder.decode(
box_regression.view(sum(boxes_per_image), -1), concat_boxes
)
if self.cls_agnostic_bbox_reg:
proposals = proposals.repeat(1, class_prob.shape[1])
proposals = proposals.split(boxes_per_image, dim=0)
else:
proposals = concat_boxes.split(boxes_per_image, dim=0)
num_classes = class_prob.shape[1]
class_prob = class_prob.split(boxes_per_image, dim=0)
class_logit = class_logit.split(boxes_per_image, dim=0)
results = []; idx = 0
for prob, logit, boxes_per_img, features_per_img, image_shape in zip(
class_prob, class_logit, proposals, features, image_shapes
):
if not self.bbox_aug_enabled and not skip_nms: # If bbox aug is enabled, we will do it later
boxlist = self.prepare_boxlist(boxes_per_img, features_per_img, prob, logit, image_shape)
boxlist = boxlist.clip_to_image(remove_empty=False)
if not self.relation_on:
boxlist_filtered = self.filter_results(boxlist, num_classes)
else:
# boxlist_pre = self.filter_results(boxlist, num_classes)
boxlist_filtered = self.filter_results_nm(boxlist, num_classes)
# to enforce minimum number of detections per image
# we will do a binary search on the confidence threshold
score_thresh = 0.05
while len(boxlist_filtered) < self.min_detections_per_img:
score_thresh /= 2.0
print(("\nNumber of proposals {} is too small, "
"retrying filter_results with score thresh"
" = {}").format(len(boxlist_filtered), score_thresh))
boxlist_filtered = self.filter_results_nm(boxlist, num_classes, thresh=score_thresh)
else:
boxlist = BoxList(boxes_per_img, image_shape, mode="xyxy")
boxlist.add_field("scores", prob[:,1:].max(1)[0])
boxlist.add_field("logits", logit)
boxlist.add_field("features", features_per_img)
boxlist.add_field("labels", boxes[idx].get_field("labels"))
boxlist.add_field("regression_targets", boxes[idx].bbox.clone().fill_(0.0))
boxlist_filtered = boxlist
idx += 1
if len(boxlist) == 0:
raise ValueError("boxlist shoud not be empty!")
results.append(boxlist_filtered)
return results
def prepare_boxlist(self, boxes, features, scores, logits, image_shape):
"""
Returns BoxList from `boxes` and adds probability scores information
as an extra field
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
boxes = boxes.reshape(-1, 4)
scores = scores.reshape(-1)
boxlist = BoxList(boxes, image_shape, mode="xyxy")
boxlist.add_field("scores", scores)
boxlist.add_field("logits", logits)
boxlist.add_field("features", features)
return boxlist
def filter_results(self, boxlist, num_classes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
"""
# unwrap the boxlist to avoid additional overhead.
# if we had multi-class NMS, we could perform this directly on the boxlist
boxes = boxlist.bbox.reshape(-1, num_classes * 4)
scores = boxlist.get_field("scores").reshape(-1, num_classes)
logits = boxlist.get_field("logits").reshape(-1, num_classes)
features = boxlist.get_field("features")
device = scores.device
result = []
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
inds_all = scores > self.score_thresh
for j in range(1, num_classes):
inds = inds_all[:, j].nonzero().squeeze(1)
scores_j = scores[inds, j]
features_j = features[inds]
boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class.add_field("features", features_j)
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
)
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.detections_per_img > 0:
cls_scores = result.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
return result
def filter_results_nm(self, boxlist, num_classes, thresh=0.05):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS). Similar to Neural-Motif Network
"""
# unwrap the boxlist to avoid additional overhead.
# if we had multi-class NMS, we could perform this directly on the boxlist
boxes = boxlist.bbox.reshape(-1, num_classes * 4)
scores = boxlist.get_field("scores").reshape(-1, num_classes)
logits = boxlist.get_field("logits").reshape(-1, num_classes)
features = boxlist.get_field("features")
valid_cls = (scores[:, 1:].max(0)[0] > thresh).nonzero() + 1
nms_mask = scores.clone()
nms_mask.zero_()
device = scores.device
result = []
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
inds_all = scores > self.score_thresh
for j in valid_cls.view(-1).cpu():
scores_j = scores[:, j]
boxes_j = boxes[:, j * 4 : (j + 1) * 4]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class.add_field("idxs", torch.arange(0, scores.shape[0]).long())
# boxlist_for_class = boxlist_nms(
# boxlist_for_class, self.nms
# )
boxlist_for_class = boxlist_nms(
boxlist_for_class, 0.3
)
nms_mask[:, j][boxlist_for_class.get_field("idxs")] = 1
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
)
result.append(boxlist_for_class)
dists_all = nms_mask * scores
# filter duplicate boxes
scores_pre, labels_pre = dists_all.max(1)
inds_all = scores_pre.nonzero()
assert inds_all.dim() != 0
inds_all = inds_all.squeeze(1)
labels_all = labels_pre[inds_all]
scores_all = scores_pre[inds_all]
features_all = features[inds_all]
logits_all = logits[inds_all]
box_inds_all = inds_all * scores.shape[1] + labels_all
result = BoxList(boxlist.bbox.view(-1, 4)[box_inds_all], boxlist.size, mode="xyxy")
result.add_field("labels", labels_all)
result.add_field("scores", scores_all)
result.add_field("logits", logits_all)
result.add_field("features", features_all)
number_of_detections = len(result)
vs, idx = torch.sort(scores_all, dim=0, descending=True)
idx = idx[vs > thresh]
if self.detections_per_img < idx.size(0):
idx = idx[:self.detections_per_img]
result = result[idx]
return result
def make_roi_box_post_processor(cfg):
use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH
nms_thresh = cfg.MODEL.ROI_HEADS.NMS
detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG
min_detections_per_img = cfg.MODEL.ROI_HEADS.MIN_DETECTIONS_PER_IMG
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
bbox_aug_enabled = cfg.TEST.BBOX_AUG.ENABLED
postprocessor = PostProcessor(
score_thresh,
nms_thresh,
detections_per_img,
min_detections_per_img,
box_coder,
cls_agnostic_bbox_reg,
bbox_aug_enabled,
relation_on=cfg.MODEL.RELATION_ON
)
return postprocessor
| 12,133 | 41.575439 | 108 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/roi_heads/box_head/roi_box_feature_extractors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from lib.scene_parser.rcnn.modeling import registry
from lib.scene_parser.rcnn.modeling.backbone import resnet
from lib.scene_parser.rcnn.modeling.poolers import Pooler
from lib.scene_parser.rcnn.modeling.make_layers import group_norm
from lib.scene_parser.rcnn.modeling.make_layers import make_fc
@registry.ROI_BOX_FEATURE_EXTRACTORS.register("ResNet50Conv5ROIFeatureExtractor")
class ResNet50Conv5ROIFeatureExtractor(nn.Module):
def __init__(self, config, in_channels):
super(ResNet50Conv5ROIFeatureExtractor, self).__init__()
resolution = config.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = config.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = config.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
stage = resnet.StageSpec(index=4, block_count=3, return_features=False)
head = resnet.ResNetHead(
block_module=config.MODEL.RESNETS.TRANS_FUNC,
stages=(stage,),
num_groups=config.MODEL.RESNETS.NUM_GROUPS,
width_per_group=config.MODEL.RESNETS.WIDTH_PER_GROUP,
stride_in_1x1=config.MODEL.RESNETS.STRIDE_IN_1X1,
stride_init=None,
res2_out_channels=config.MODEL.RESNETS.RES2_OUT_CHANNELS,
dilation=config.MODEL.RESNETS.RES5_DILATION
)
self.pooler = pooler
self.head = head
self.out_channels = head.out_channels
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.head(x)
return x
@registry.ROI_BOX_FEATURE_EXTRACTORS.register("FPN2MLPFeatureExtractor")
class FPN2MLPFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg, in_channels):
super(FPN2MLPFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = in_channels * resolution ** 2
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN
self.pooler = pooler
self.fc6 = make_fc(input_size, representation_size, use_gn)
self.fc7 = make_fc(representation_size, representation_size, use_gn)
self.out_channels = representation_size
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
@registry.ROI_BOX_FEATURE_EXTRACTORS.register("FPNXconv1fcFeatureExtractor")
class FPNXconv1fcFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg, in_channels):
super(FPNXconv1fcFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
self.pooler = pooler
use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN
conv_head_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM
num_stacked_convs = cfg.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS
dilation = cfg.MODEL.ROI_BOX_HEAD.DILATION
xconvs = []
for ix in range(num_stacked_convs):
xconvs.append(
nn.Conv2d(
in_channels,
conv_head_dim,
kernel_size=3,
stride=1,
padding=dilation,
dilation=dilation,
bias=False if use_gn else True
)
)
in_channels = conv_head_dim
if use_gn:
xconvs.append(group_norm(in_channels))
xconvs.append(nn.ReLU(inplace=True))
self.add_module("xconvs", nn.Sequential(*xconvs))
for modules in [self.xconvs,]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
if not use_gn:
torch.nn.init.constant_(l.bias, 0)
input_size = conv_head_dim * resolution ** 2
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.fc6 = make_fc(input_size, representation_size, use_gn=False)
self.out_channels = representation_size
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.xconvs(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
return x
def make_roi_box_feature_extractor(cfg, in_channels):
func = registry.ROI_BOX_FEATURE_EXTRACTORS[
cfg.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR
]
return func(cfg, in_channels)
| 5,419 | 34.657895 | 81 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/roi_heads/box_head/box_head.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from .roi_box_feature_extractors import make_roi_box_feature_extractor
from .roi_box_predictors import make_roi_box_predictor
from .inference import make_roi_box_post_processor
from .loss import make_roi_box_loss_evaluator
class ROIBoxHead(torch.nn.Module):
"""
Generic Box Head class.
"""
def __init__(self, cfg, in_channels):
super(ROIBoxHead, self).__init__()
self.cfg = cfg
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.feature_extractor = make_roi_box_feature_extractor(cfg, in_channels)
self.predictor = make_roi_box_predictor(
cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_box_post_processor(cfg)
self.loss_evaluator = make_roi_box_loss_evaluator(cfg)
def forward(self, features, proposals, targets=None):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
if self.training: # or not self.cfg.inference:
# Faster R-CNN subsamples during training the proposals with a fixed
# positive / negative ratio
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
x = self.feature_extractor(features, proposals)
# final classifier that converts the features into predictions
class_logits, box_regression = self.predictor(x)
boxes_per_image = [len(proposal) for proposal in proposals]
features = x.split(boxes_per_image, dim=0)
for proposal, feature in zip(proposals, features):
proposal.add_field("features", self.avgpool(feature))
if not self.training:
# if self.cfg.inference:
result = self.post_processor((class_logits, box_regression), proposals)
if targets:
result = self.loss_evaluator.prepare_labels(result, targets)
return x, result, {}
# else:
# return x, proposals, {}
loss_classifier, loss_box_reg = self.loss_evaluator(
[class_logits], [box_regression]
)
class_logits = class_logits.split(boxes_per_image, dim=0)
for proposal, class_logit in zip(proposals, class_logits):
proposal.add_field("logits", class_logit)
return (
x,
proposals,
dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg),
)
def build_roi_box_head(cfg, in_channels):
"""
Constructs a new box head.
By default, uses ROIBoxHead, but if it turns out not to be enough, just register a new class
and make it a parameter in the config
"""
return ROIBoxHead(cfg, in_channels)
| 3,495 | 39.651163 | 96 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/roi_heads/box_head/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from lib.scene_parser.rcnn.layers import smooth_l1_loss
from lib.scene_parser.rcnn.modeling.box_coder import BoxCoder
from lib.scene_parser.rcnn.modeling.matcher import Matcher
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_iou
from lib.scene_parser.rcnn.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler
)
from lib.scene_parser.rcnn.modeling.utils import cat
class FastRCNNLossComputation(object):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(
self,
proposal_matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg=False
):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields("labels")
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, proposals_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return labels, regression_targets
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
labels, regression_targets = self.prepare_targets(proposals, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
proposals = list(proposals)
# add corresponding label and regression_targets information to the bounding boxes
for labels_per_image, regression_targets_per_image, proposals_per_image in zip(
labels, regression_targets, proposals
):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field(
"regression_targets", regression_targets_per_image
)
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img.view(-1) | neg_inds_img.view(-1)).squeeze(1)
proposals_per_image = proposals[img_idx][img_sampled_inds]
proposals[img_idx] = proposals_per_image
self._proposals = proposals
return proposals
def prepare_labels(self, proposals, targets):
"""
This method prepares the ground-truth labels for each bounding box, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
labels, regression_targets = self.prepare_targets(proposals, targets)
proposals = list(proposals)
# add corresponding label and regression_targets information to the bounding boxes
for labels_per_image, regression_targets_per_image, proposals_per_image in zip(
labels, regression_targets, proposals
):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field(
"regression_targets", regression_targets_per_image
)
return proposals
def __call__(self, class_logits, box_regression):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
box_regression (list[Tensor])
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
box_regression = cat(box_regression, dim=0)
device = class_logits.device
if not hasattr(self, "_proposals"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposals
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
regression_targets = cat(
[proposal.get_field("regression_targets") for proposal in proposals], dim=0
)
classification_loss = F.cross_entropy(class_logits, labels)
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)
labels_pos = labels[sampled_pos_inds_subset]
if self.cls_agnostic_bbox_reg:
map_inds = torch.tensor([4, 5, 6, 7], device=device)
else:
map_inds = 4 * labels_pos[:, None] + torch.tensor(
[0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds_subset],
size_average=False,
beta=1,
)
box_loss = box_loss / labels.numel()
return classification_loss, box_loss
def make_roi_box_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
loss_evaluator = FastRCNNLossComputation(
matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg
)
return loss_evaluator
| 8,001 | 35.538813 | 102 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/roi_heads/box_head/roi_box_predictors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from lib.scene_parser.rcnn.modeling import registry
from torch import nn
@registry.ROI_BOX_PREDICTOR.register("FastRCNNPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, config, in_channels):
super(FastRCNNPredictor, self).__init__()
assert in_channels is not None
num_inputs = in_channels
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls_score = nn.Linear(num_inputs, num_classes)
num_bbox_reg_classes = 2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return cls_logit, bbox_pred
@registry.ROI_BOX_PREDICTOR.register("FPNPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = in_channels
self.cls_score = nn.Linear(representation_size, num_classes)
num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.view(x.size(0), -1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
def make_roi_box_predictor(cfg, in_channels):
func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg, in_channels)
| 2,298 | 35.492063 | 87 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/structures/image_list.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import torch
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
"""
def __init__(self, tensors, image_sizes):
"""
Arguments:
tensors (tensor)
image_sizes (list[tuple[int, int]])
"""
self.tensors = tensors
self.image_sizes = image_sizes
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
def to_image_list(tensors, size_divisible=0):
"""
tensors can be an ImageList, a torch.Tensor or
an iterable of Tensors. It can't be a numpy array.
When tensors is an iterable of Tensors, it pads
the Tensors with zeros so that they have the same
shape
"""
if isinstance(tensors, torch.Tensor) and size_divisible > 0:
tensors = [tensors]
if isinstance(tensors, ImageList):
return tensors
elif isinstance(tensors, torch.Tensor):
# single tensor shape can be inferred
if tensors.dim() == 3:
tensors = tensors[None]
assert tensors.dim() == 4
image_sizes = [tensor.shape[-2:] for tensor in tensors]
return ImageList(tensors, image_sizes)
elif isinstance(tensors, (tuple, list)):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors),) + max_size
batched_imgs = tensors[0].new(*batch_shape).zero_()
for img, pad_img in zip(tensors, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
image_sizes = [im.shape[-2:] for im in tensors]
return ImageList(batched_imgs, image_sizes)
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
| 2,485 | 33.054795 | 87 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/structures/segmentation_mask.py | import cv2
import copy
import torch
import numpy as np
from maskrcnn_benchmark.layers.misc import interpolate
from maskrcnn_benchmark.utils import cv2_util
import pycocotools.mask as mask_utils
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
""" ABSTRACT
Segmentations come in either:
1) Binary masks
2) Polygons
Binary masks can be represented in a contiguous array
and operations can be carried out more efficiently,
therefore BinaryMaskList handles them together.
Polygons are handled separately for each instance,
by PolygonInstance and instances are handled by
PolygonList.
SegmentationList is supposed to represent both,
therefore it wraps the functions of BinaryMaskList
and PolygonList to make it transparent.
"""
class BinaryMaskList(object):
"""
This class handles binary masks for all objects in the image
"""
def __init__(self, masks, size):
"""
Arguments:
masks: Either torch.tensor of [num_instances, H, W]
or list of torch.tensors of [H, W] with num_instances elems,
or RLE (Run Length Encoding) - interpreted as list of dicts,
or BinaryMaskList.
size: absolute image size, width first
After initialization, a hard copy will be made, to leave the
initializing source data intact.
"""
assert isinstance(size, (list, tuple))
assert len(size) == 2
if isinstance(masks, torch.Tensor):
# The raw data representation is passed as argument
masks = masks.clone()
elif isinstance(masks, (list, tuple)):
if len(masks) == 0:
masks = torch.empty([0, size[1], size[0]]) # num_instances = 0!
elif isinstance(masks[0], torch.Tensor):
masks = torch.stack(masks, dim=2).clone()
elif isinstance(masks[0], dict) and "counts" in masks[0]:
# RLE interpretation
rle_sizes = [tuple(inst["size"]) for inst in masks]
masks = mask_utils.decode(masks) # [h, w, n]
masks = torch.tensor(masks).permute(2, 0, 1) # [n, h, w]
assert rle_sizes.count(rle_sizes[0]) == len(rle_sizes), (
"All the sizes must be the same size: %s" % rle_sizes
)
# in RLE, height come first in "size"
rle_height, rle_width = rle_sizes[0]
assert masks.shape[1] == rle_height
assert masks.shape[2] == rle_width
width, height = size
if width != rle_width or height != rle_height:
masks = interpolate(
input=masks[None].float(),
size=(height, width),
mode="bilinear",
align_corners=False,
)[0].type_as(masks)
else:
RuntimeError(
"Type of `masks[0]` could not be interpreted: %s"
% type(masks)
)
elif isinstance(masks, BinaryMaskList):
# just hard copy the BinaryMaskList instance's underlying data
masks = masks.masks.clone()
else:
RuntimeError(
"Type of `masks` argument could not be interpreted:%s"
% type(masks)
)
if len(masks.shape) == 2:
# if only a single instance mask is passed
masks = masks[None]
assert len(masks.shape) == 3
assert masks.shape[1] == size[1], "%s != %s" % (masks.shape[1], size[1])
assert masks.shape[2] == size[0], "%s != %s" % (masks.shape[2], size[0])
self.masks = masks
self.size = tuple(size)
def transpose(self, method):
dim = 1 if method == FLIP_TOP_BOTTOM else 2
flipped_masks = self.masks.flip(dim)
return BinaryMaskList(flipped_masks, self.size)
def crop(self, box):
assert isinstance(box, (list, tuple, torch.Tensor)), str(type(box))
# box is assumed to be xyxy
current_width, current_height = self.size
xmin, ymin, xmax, ymax = [round(float(b)) for b in box]
assert xmin <= xmax and ymin <= ymax, str(box)
xmin = min(max(xmin, 0), current_width - 1)
ymin = min(max(ymin, 0), current_height - 1)
xmax = min(max(xmax, 0), current_width)
ymax = min(max(ymax, 0), current_height)
xmax = max(xmax, xmin + 1)
ymax = max(ymax, ymin + 1)
width, height = xmax - xmin, ymax - ymin
cropped_masks = self.masks[:, ymin:ymax, xmin:xmax]
cropped_size = width, height
return BinaryMaskList(cropped_masks, cropped_size)
def resize(self, size):
try:
iter(size)
except TypeError:
assert isinstance(size, (int, float))
size = size, size
width, height = map(int, size)
assert width > 0
assert height > 0
# Height comes first here!
resized_masks = interpolate(
input=self.masks[None].float(),
size=(height, width),
mode="bilinear",
align_corners=False,
)[0].type_as(self.masks)
resized_size = width, height
return BinaryMaskList(resized_masks, resized_size)
def convert_to_polygon(self):
if self.masks.numel() == 0:
return PolygonList([], self.size)
contours = self._findContours()
return PolygonList(contours, self.size)
def to(self, *args, **kwargs):
return self
def _findContours(self):
contours = []
masks = self.masks.detach().numpy()
for mask in masks:
mask = cv2.UMat(mask)
contour, hierarchy = cv2_util.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1
)
reshaped_contour = []
for entity in contour:
assert len(entity.shape) == 3
assert (
entity.shape[1] == 1
), "Hierarchical contours are not allowed"
reshaped_contour.append(entity.reshape(-1).tolist())
contours.append(reshaped_contour)
return contours
def __len__(self):
return len(self.masks)
def __getitem__(self, index):
if self.masks.numel() == 0:
raise RuntimeError("Indexing empty BinaryMaskList")
return BinaryMaskList(self.masks[index], self.size)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.masks))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
class PolygonInstance(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(self, polygons, size):
"""
Arguments:
a list of lists of numbers.
The first level refers to all the polygons that compose the
object, and the second level to the polygon coordinates.
"""
if isinstance(polygons, (list, tuple)):
valid_polygons = []
for p in polygons:
p = torch.as_tensor(p, dtype=torch.float32)
if len(p) >= 6: # 3 * 2 coordinates
valid_polygons.append(p)
polygons = valid_polygons
elif isinstance(polygons, PolygonInstance):
polygons = copy.copy(polygons.polygons)
else:
RuntimeError(
"Type of argument `polygons` is not allowed:%s"
% (type(polygons))
)
""" This crashes the training way too many times...
for p in polygons:
assert p[::2].min() >= 0
assert p[::2].max() < size[0]
assert p[1::2].min() >= 0
assert p[1::2].max() , size[1]
"""
self.polygons = polygons
self.size = tuple(size)
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for poly in self.polygons:
p = poly.clone()
TO_REMOVE = 1
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return PolygonInstance(flipped_polygons, size=self.size)
def crop(self, box):
assert isinstance(box, (list, tuple, torch.Tensor)), str(type(box))
# box is assumed to be xyxy
current_width, current_height = self.size
xmin, ymin, xmax, ymax = map(float, box)
assert xmin <= xmax and ymin <= ymax, str(box)
xmin = min(max(xmin, 0), current_width - 1)
ymin = min(max(ymin, 0), current_height - 1)
xmax = min(max(xmax, 0), current_width)
ymax = min(max(ymax, 0), current_height)
xmax = max(xmax, xmin + 1)
ymax = max(ymax, ymin + 1)
w, h = xmax - xmin, ymax - ymin
cropped_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] = p[0::2] - xmin # .clamp(min=0, max=w)
p[1::2] = p[1::2] - ymin # .clamp(min=0, max=h)
cropped_polygons.append(p)
return PolygonInstance(cropped_polygons, size=(w, h))
def resize(self, size):
try:
iter(size)
except TypeError:
assert isinstance(size, (int, float))
size = size, size
ratios = tuple(
float(s) / float(s_orig) for s, s_orig in zip(size, self.size)
)
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.polygons]
return PolygonInstance(scaled_polys, size)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return PolygonInstance(scaled_polygons, size=size)
def convert_to_binarymask(self):
width, height = self.size
# formatting for COCO PythonAPI
polygons = [p.numpy() for p in self.polygons]
rles = mask_utils.frPyObjects(polygons, height, width)
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
return mask
def __len__(self):
return len(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_groups={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
class PolygonList(object):
"""
This class handles PolygonInstances for all objects in the image
"""
def __init__(self, polygons, size):
"""
Arguments:
polygons:
a list of list of lists of numbers. The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
object, and the third level to the polygon coordinates.
OR
a list of PolygonInstances.
OR
a PolygonList
size: absolute image size
"""
if isinstance(polygons, (list, tuple)):
if len(polygons) == 0:
polygons = [[[]]]
if isinstance(polygons[0], (list, tuple)):
assert isinstance(polygons[0][0], (list, tuple)), str(
type(polygons[0][0])
)
else:
assert isinstance(polygons[0], PolygonInstance), str(
type(polygons[0])
)
elif isinstance(polygons, PolygonList):
size = polygons.size
polygons = polygons.polygons
else:
RuntimeError(
"Type of argument `polygons` is not allowed:%s"
% (type(polygons))
)
assert isinstance(size, (list, tuple)), str(type(size))
self.polygons = []
for p in polygons:
p = PolygonInstance(p, size)
if len(p) > 0:
self.polygons.append(p)
self.size = tuple(size)
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
for polygon in self.polygons:
flipped_polygons.append(polygon.transpose(method))
return PolygonList(flipped_polygons, size=self.size)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped_polygons = []
for polygon in self.polygons:
cropped_polygons.append(polygon.crop(box))
cropped_size = w, h
return PolygonList(cropped_polygons, cropped_size)
def resize(self, size):
resized_polygons = []
for polygon in self.polygons:
resized_polygons.append(polygon.resize(size))
resized_size = size
return PolygonList(resized_polygons, resized_size)
def to(self, *args, **kwargs):
return self
def convert_to_binarymask(self):
if len(self) > 0:
masks = torch.stack(
[p.convert_to_binarymask() for p in self.polygons]
)
else:
size = self.size
masks = torch.empty([0, size[1], size[0]], dtype=torch.uint8)
return BinaryMaskList(masks, size=self.size)
def __len__(self):
return len(self.polygons)
def __getitem__(self, item):
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
else:
# advanced indexing on a single dimension
selected_polygons = []
if isinstance(item, torch.Tensor) and item.dtype == torch.uint8:
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
selected_polygons.append(self.polygons[i])
return PolygonList(selected_polygons, size=self.size)
def __iter__(self):
return iter(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
class SegmentationMask(object):
"""
This class stores the segmentations for all objects in the image.
It wraps BinaryMaskList and PolygonList conveniently.
"""
def __init__(self, instances, size, mode="poly"):
"""
Arguments:
instances: two types
(1) polygon
(2) binary mask
size: (width, height)
mode: 'poly', 'mask'. if mode is 'mask', convert mask of any format to binary mask
"""
assert isinstance(size, (list, tuple))
assert len(size) == 2
if isinstance(size[0], torch.Tensor):
assert isinstance(size[1], torch.Tensor)
size = size[0].item(), size[1].item()
assert isinstance(size[0], (int, float))
assert isinstance(size[1], (int, float))
if mode == "poly":
self.instances = PolygonList(instances, size)
elif mode == "mask":
self.instances = BinaryMaskList(instances, size)
else:
raise NotImplementedError("Unknown mode: %s" % str(mode))
self.mode = mode
self.size = tuple(size)
def transpose(self, method):
flipped_instances = self.instances.transpose(method)
return SegmentationMask(flipped_instances, self.size, self.mode)
def crop(self, box):
cropped_instances = self.instances.crop(box)
cropped_size = cropped_instances.size
return SegmentationMask(cropped_instances, cropped_size, self.mode)
def resize(self, size, *args, **kwargs):
resized_instances = self.instances.resize(size)
resized_size = size
return SegmentationMask(resized_instances, resized_size, self.mode)
def to(self, *args, **kwargs):
return self
def convert(self, mode):
if mode == self.mode:
return self
if mode == "poly":
converted_instances = self.instances.convert_to_polygon()
elif mode == "mask":
converted_instances = self.instances.convert_to_binarymask()
else:
raise NotImplementedError("Unknown mode: %s" % str(mode))
return SegmentationMask(converted_instances, self.size, mode)
def get_mask_tensor(self):
instances = self.instances
if self.mode == "poly":
instances = instances.convert_to_binarymask()
# If there is only 1 instance
return instances.masks.squeeze(0)
def __len__(self):
return len(self.instances)
def __getitem__(self, item):
selected_instances = self.instances.__getitem__(item)
return SegmentationMask(selected_instances, self.size, self.mode)
def __iter__(self):
self.iter_idx = 0
return self
def __next__(self):
if self.iter_idx < self.__len__():
next_segmentation = self.__getitem__(self.iter_idx)
self.iter_idx += 1
return next_segmentation
raise StopIteration()
next = __next__ # Python 2 compatibility
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.instances))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
| 18,637 | 31.357639 | 94 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/structures/bounding_box.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box, such as
labels.
"""
def __init__(self, bbox, image_size, mode="xyxy"):
device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError(
"bbox should have 2 dimensions, got {}".format(bbox.ndimension())
)
if bbox.size(-1) != 4:
raise ValueError(
"last dimension of bbox should have a "
"size of 4, got {}".format(bbox.size(-1))
)
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == "xyxy":
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == "xyxy":
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError("Should not be here")
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(scaled_box, size, mode=self.mode)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
)
bbox = BoxList(scaled_box, size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat(
(transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
)
bbox = BoxList(transposed_boxes, self.size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Crops a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
# TODO should I filter empty boxes here?
if False:
is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
)
bbox = BoxList(cropped_box, (w, h), mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device):
bbox = BoxList(self.bbox.to(device), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(device)
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxList(self.bbox[item], self.size, self.mode)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 1
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
box = self.bbox
if self.mode == "xyxy":
TO_REMOVE = 1
area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
elif self.mode == "xywh":
area = box[:, 2] * box[:, 3]
else:
raise RuntimeError("Should not be here")
return area
def copy_with_fields(self, fields, skip_missing=False):
bbox = BoxList(self.bbox, self.size, self.mode)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if self.has_field(field):
bbox.add_field(field, self.get_field(field))
elif not skip_missing:
raise KeyError("Field '{}' not found in {}".format(field, self))
return bbox
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_boxes={}, ".format(len(self))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
if __name__ == "__main__":
bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))
s_bbox = bbox.resize((5, 5))
print(s_bbox)
print(s_bbox.bbox)
t_bbox = bbox.transpose(0)
print(t_bbox)
print(t_bbox.bbox)
| 9,645 | 35.127341 | 92 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/structures/bounding_box_pair.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .bounding_box import BoxList
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class BoxPairList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box, such as
labels.
"""
def __init__(self, bbox_pair, image_size, mode="xyxy"):
device = bbox_pair.device if isinstance(bbox_pair, torch.Tensor) else torch.device("cpu")
bbox_pair = torch.as_tensor(bbox_pair, dtype=torch.float32, device=device)
if bbox_pair.ndimension() != 2:
raise ValueError(
"bbox should have 2 dimensions, got {}".format(bbox_pair.ndimension())
)
if bbox_pair.size(-1) != 8:
raise ValueError(
"last dimension of bbox should have a "
"size of 8, got {}".format(bbox_pair.size(-1))
)
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox_pair
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == "xyxy":
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxPairList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
)
bbox = BoxPairList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
# def convert_from_boxlist(self, boxes):
# # input:
# # boxes: boxlist
def _split_into_xyxy(self):
if self.mode == "xyxy":
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError("Should not be here")
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxPairList(scaled_box, size, mode=self.mode)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
)
bbox = BoxPairList(scaled_box, size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat(
(transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
)
bbox = BoxPairList(transposed_boxes, self.size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Crops a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
# TODO should I filter empty boxes here?
if False:
is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
)
bbox = BoxPairList(cropped_box, (w, h), mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device):
bbox = BoxPairList(self.bbox.to(device), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(device)
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxPairList(self.bbox[item], self.size, self.mode)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 1
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
box = self.bbox
if self.mode == "xyxy":
TO_REMOVE = 1
area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
elif self.mode == "xywh":
area = box[:, 2] * box[:, 3]
else:
raise RuntimeError("Should not be here")
return area
def copy_with_fields(self, fields, skip_missing=False):
bbox = BoxPairList(self.bbox, self.size, self.mode)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if self.has_field(field):
bbox.add_field(field, self.get_field(field))
elif not skip_missing:
raise KeyError("Field '{}' not found in {}".format(field, self))
return bbox
def copy_with_subject(self):
bbox = BoxList(self.bbox[:, :4], self.size, self.mode)
return bbox
def copy_with_object(self):
bbox = BoxList(self.bbox[:, 4:], self.size, self.mode)
return bbox
def copy_with_union(self):
x1 = self.bbox[:, 0::4].min(1)[0].view(-1, 1) # x1
y1 = self.bbox[:, 1::4].min(1)[0].view(-1, 1) # y1
x2 = self.bbox[:, 2::4].max(1)[0].view(-1, 1) # x2
y2 = self.bbox[:, 3::4].max(1)[0].view(-1, 1) # y2
bbox = BoxList(torch.cat((x1, y1, x2, y2), 1), self.size, self.mode)
return bbox
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_boxes={}, ".format(len(self))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
if __name__ == "__main__":
bbox = BoxPairList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))
s_bbox = bbox.resize((5, 5))
print(s_bbox)
print(s_bbox.bbox)
t_bbox = bbox.transpose(0)
print(t_bbox)
print(t_bbox.bbox)
| 10,466 | 35.34375 | 97 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/structures/boxlist_ops.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .bounding_box import BoxList
from ..layers import nms as _box_nms
def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maximum suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
score = boxlist.get_field(score_field)
keep = _box_nms(boxes, score, nms_thresh)
if max_proposals > 0:
keep = keep[: max_proposals]
boxlist = boxlist[keep]
return boxlist.convert(mode)
def remove_small_boxes(boxlist, min_size):
"""
Only keep boxes with both sides >= min_size
Arguments:
boxlist (Boxlist)
min_size (int)
"""
# TODO maybe add an API for querying the ws / hs
xywh_boxes = boxlist.convert("xywh").bbox
_, _, ws, hs = xywh_boxes.unbind(dim=1)
keep = (
(ws >= min_size) & (hs >= min_size)
).nonzero().squeeze(1)
return boxlist[keep]
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def boxlist_iou(boxlist1, boxlist2):
"""Compute the intersection over union of two set of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Arguments:
box1: (BoxList) bounding boxes, sized [N,4].
box2: (BoxList) bounding boxes, sized [M,4].
Returns:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
"""
if boxlist1.size != boxlist2.size:
raise RuntimeError(
"boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2))
boxlist1 = boxlist1.convert("xyxy")
boxlist2 = boxlist2.convert("xyxy")
N = len(boxlist1)
M = len(boxlist2)
area1 = boxlist1.area()
area2 = boxlist2.area()
box1, box2 = boxlist1.bbox, boxlist2.bbox
lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]
rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]
TO_REMOVE = 1
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
# TODO redundant, remove
def _cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def cat_boxlist(bboxes):
"""
Concatenates a list of BoxList (having the same image size) into a
single BoxList
Arguments:
bboxes (list[BoxList])
"""
assert isinstance(bboxes, (list, tuple))
assert all(isinstance(bbox, BoxList) for bbox in bboxes)
size = bboxes[0].size
assert all(bbox.size == size for bbox in bboxes)
mode = bboxes[0].mode
assert all(bbox.mode == mode for bbox in bboxes)
fields = set(bboxes[0].fields())
assert all(set(bbox.fields()) == fields for bbox in bboxes)
cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode)
for field in fields:
data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0)
cat_boxes.add_field(field, data)
return cat_boxes
| 3,703 | 27.492308 | 97 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/structures/keypoint.py | import torch
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Keypoints(object):
def __init__(self, keypoints, size, mode=None):
# FIXME remove check once we have better integration with device
# in my version this would consistently return a CPU tensor
device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device('cpu')
keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
# TODO should I split them?
# self.visibility = keypoints[..., 2]
self.keypoints = keypoints# [..., :2]
self.size = size
self.mode = mode
self.extra_fields = {}
def crop(self, box):
raise NotImplementedError()
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
ratio_w, ratio_h = ratios
resized_data = self.keypoints.clone()
resized_data[..., 0] *= ratio_w
resized_data[..., 1] *= ratio_h
keypoints = type(self)(resized_data, size, self.mode)
for k, v in self.extra_fields.items():
keypoints.add_field(k, v)
return keypoints
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT,):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT implemented")
flip_inds = type(self).FLIP_INDS
flipped_data = self.keypoints[:, flip_inds]
width = self.size[0]
TO_REMOVE = 1
# Flip x coordinates
flipped_data[..., 0] = width - flipped_data[..., 0] - TO_REMOVE
# Maintain COCO convention that if visibility == 0, then x, y = 0
inds = flipped_data[..., 2] == 0
flipped_data[inds] = 0
keypoints = type(self)(flipped_data, self.size, self.mode)
for k, v in self.extra_fields.items():
keypoints.add_field(k, v)
return keypoints
def to(self, *args, **kwargs):
keypoints = type(self)(self.keypoints.to(*args, **kwargs), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(*args, **kwargs)
keypoints.add_field(k, v)
return keypoints
def __getitem__(self, item):
keypoints = type(self)(self.keypoints[item], self.size, self.mode)
for k, v in self.extra_fields.items():
keypoints.add_field(k, v[item])
return keypoints
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def __repr__(self):
s = self.__class__.__name__ + '('
s += 'num_instances={}, '.format(len(self.keypoints))
s += 'image_width={}, '.format(self.size[0])
s += 'image_height={})'.format(self.size[1])
return s
def _create_flip_indices(names, flip_map):
full_flip_map = flip_map.copy()
full_flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in full_flip_map else full_flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return torch.tensor(flip_indices)
class PersonKeypoints(Keypoints):
NAMES = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
FLIP_MAP = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
# TODO this doesn't look great
PersonKeypoints.FLIP_INDS = _create_flip_indices(PersonKeypoints.NAMES, PersonKeypoints.FLIP_MAP)
def kp_connections(keypoints):
kp_lines = [
[keypoints.index('left_eye'), keypoints.index('right_eye')],
[keypoints.index('left_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('right_ear')],
[keypoints.index('left_eye'), keypoints.index('left_ear')],
[keypoints.index('right_shoulder'), keypoints.index('right_elbow')],
[keypoints.index('right_elbow'), keypoints.index('right_wrist')],
[keypoints.index('left_shoulder'), keypoints.index('left_elbow')],
[keypoints.index('left_elbow'), keypoints.index('left_wrist')],
[keypoints.index('right_hip'), keypoints.index('right_knee')],
[keypoints.index('right_knee'), keypoints.index('right_ankle')],
[keypoints.index('left_hip'), keypoints.index('left_knee')],
[keypoints.index('left_knee'), keypoints.index('left_ankle')],
[keypoints.index('right_shoulder'), keypoints.index('left_shoulder')],
[keypoints.index('right_hip'), keypoints.index('left_hip')],
]
return kp_lines
PersonKeypoints.CONNECTIONS = kp_connections(PersonKeypoints.NAMES)
# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop)
def keypoints_to_heat_map(keypoints, rois, heatmap_size):
if rois.numel() == 0:
return rois.new().long(), rois.new().long()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
| 6,555 | 33.687831 | 97 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/utils/box.py | import numpy as np
import torch
def bbox_overlaps(anchors, gt_boxes):
"""
anchors: (N, 4) ndarray of float
gt_boxes: (K, 4) ndarray of float
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = anchors.size(0)
K = gt_boxes.size(0)
gt_boxes_area = ((gt_boxes[:,2] - gt_boxes[:,0] + 1) *
(gt_boxes[:,3] - gt_boxes[:,1] + 1)).view(1, K)
anchors_area = ((anchors[:,2] - anchors[:,0] + 1) *
(anchors[:,3] - anchors[:,1] + 1)).view(N, 1)
boxes = anchors.view(N, 1, 4).expand(N, K, 4)
query_boxes = gt_boxes.view(1, K, 4).expand(N, K, 4)
iw = (torch.min(boxes[:,:,2], query_boxes[:,:,2]) -
torch.max(boxes[:,:,0], query_boxes[:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,3], query_boxes[:,:,3]) -
torch.max(boxes[:,:,1], query_boxes[:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
return overlaps
| 998 | 28.382353 | 69 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/utils/pytorch_misc.py | """
Miscellaneous functions that might be useful for pytorch
"""
import h5py
import numpy as np
import torch
from torch.autograd import Variable
import os
import dill as pkl
from itertools import tee
from torch import nn
def optimistic_restore(network, state_dict):
mismatch = False
own_state = network.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Unexpected key {} in state_dict with size {}".format(name, param.size()))
mismatch = True
elif param.size() == own_state[name].size():
own_state[name].copy_(param)
else:
print("Network has {} with size {}, ckpt has {}".format(name,
own_state[name].size(),
param.size()))
mismatch = True
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
print("We couldn't find {}".format(','.join(missing)))
mismatch = True
return not mismatch
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def get_ranking(predictions, labels, num_guesses=5):
"""
Given a matrix of predictions and labels for the correct ones, get the number of guesses
required to get the prediction right per example.
:param predictions: [batch_size, range_size] predictions
:param labels: [batch_size] array of labels
:param num_guesses: Number of guesses to return
:return:
"""
assert labels.size(0) == predictions.size(0)
assert labels.dim() == 1
assert predictions.dim() == 2
values, full_guesses = predictions.topk(predictions.size(1), dim=1)
_, ranking = full_guesses.topk(full_guesses.size(1), dim=1, largest=False)
gt_ranks = torch.gather(ranking.data, 1, labels.data[:, None]).squeeze()
guesses = full_guesses[:, :num_guesses]
return gt_ranks, guesses
def cache(f):
"""
Caches a computation
"""
def cache_wrapper(fn, *args, **kwargs):
if os.path.exists(fn):
with open(fn, 'rb') as file:
data = pkl.load(file)
else:
print("file {} not found, so rebuilding".format(fn))
data = f(*args, **kwargs)
with open(fn, 'wb') as file:
pkl.dump(data, file)
return data
return cache_wrapper
class Flattener(nn.Module):
def __init__(self):
"""
Flattens last 3 dimensions to make it only batch size, -1
"""
super(Flattener, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
def to_variable(f):
"""
Decorator that pushes all the outputs to a variable
:param f:
:return:
"""
def variable_wrapper(*args, **kwargs):
rez = f(*args, **kwargs)
if isinstance(rez, tuple):
return tuple([Variable(x) for x in rez])
return Variable(rez)
return variable_wrapper
def arange(base_tensor, n=None):
new_size = base_tensor.size(0) if n is None else n
new_vec = base_tensor.new(new_size).long()
torch.arange(0, new_size, out=new_vec)
return new_vec
def to_onehot(vec, num_classes, fill=1000):
"""
Creates a [size, num_classes] torch FloatTensor where
one_hot[i, vec[i]] = fill
:param vec: 1d torch tensor
:param num_classes: int
:param fill: value that we want + and - things to be.
:return:
"""
onehot_result = vec.new(vec.size(0), num_classes).float().fill_(-fill)
arange_inds = vec.new(vec.size(0)).long()
torch.arange(0, vec.size(0), out=arange_inds)
onehot_result.view(-1)[vec + num_classes*arange_inds] = fill
return onehot_result
def save_net(fname, net):
h5f = h5py.File(fname, mode='w')
for k, v in list(net.state_dict().items()):
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
h5f = h5py.File(fname, mode='r')
for k, v in list(net.state_dict().items()):
param = torch.from_numpy(np.asarray(h5f[k]))
if v.size() != param.size():
print("On k={} desired size is {} but supplied {}".format(k, v.size(), param.size()))
else:
v.copy_(param)
def batch_index_iterator(len_l, batch_size, skip_end=True):
"""
Provides indices that iterate over a list
:param len_l: int representing size of thing that we will
iterate over
:param batch_size: size of each batch
:param skip_end: if true, don't iterate over the last batch
:return: A generator that returns (start, end) tuples
as it goes through all batches
"""
iterate_until = len_l
if skip_end:
iterate_until = (len_l // batch_size) * batch_size
for b_start in range(0, iterate_until, batch_size):
yield (b_start, min(b_start+batch_size, len_l))
def batch_map(f, a, batch_size):
"""
Maps f over the array a in chunks of batch_size.
:param f: function to be applied. Must take in a block of
(batch_size, dim_a) and map it to (batch_size, something).
:param a: Array to be applied over of shape (num_rows, dim_a).
:param batch_size: size of each array
:return: Array of size (num_rows, something).
"""
rez = []
for s, e in batch_index_iterator(a.size(0), batch_size, skip_end=False):
print("Calling on {}".format(a[s:e].size()))
rez.append(f(a[s:e]))
return torch.cat(rez)
def const_row(fill, l, volatile=False):
input_tok = Variable(torch.LongTensor([fill] * l),volatile=volatile)
if torch.cuda.is_available():
input_tok = input_tok.cuda()
return input_tok
def print_para(model):
"""
Prints parameters of a model
:param opt:
:return:
"""
st = {}
strings = []
total_params = 0
for p_name, p in model.named_parameters():
if not ('bias' in p_name.split('.')[-1] or 'bn' in p_name.split('.')[-1]):
st[p_name] = ([str(x) for x in p.size()], np.prod(p.size()), p.requires_grad)
total_params += np.prod(p.size())
for p_name, (size, prod, p_req_grad) in sorted(st.items(), key=lambda x: -x[1][1]):
strings.append("{:<50s}: {:<16s}({:8d}) ({})".format(
p_name, '[{}]'.format(','.join(size)), prod, 'grad' if p_req_grad else ' '
))
return '\n {:.1f}M total parameters \n ----- \n \n{}'.format(total_params / 1000000.0, '\n'.join(strings))
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def nonintersecting_2d_inds(x):
"""
Returns np.array([(a,b) for a in range(x) for b in range(x) if a != b]) efficiently
:param x: Size
:return: a x*(x-1) array that is [(0,1), (0,2)... (0, x-1), (1,0), (1,2), ..., (x-1, x-2)]
"""
rs = 1 - np.diag(np.ones(x, dtype=np.int32))
relations = np.column_stack(np.where(rs))
return relations
def intersect_2d(x1, x2):
"""
Given two arrays [m1, n], [m2,n], returns a [m1, m2] array where each entry is True if those
rows match.
:param x1: [m1, n] numpy array
:param x2: [m2, n] numpy array
:return: [m1, m2] bool array of the intersections
"""
if x1.shape[1] != x2.shape[1]:
raise ValueError("Input arrays must have same #columns")
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
res = (x1[..., None] == x2.T[None, ...]).all(1)
return res
def np_to_variable(x, is_cuda=True, dtype=torch.FloatTensor):
v = Variable(torch.from_numpy(x).type(dtype))
if is_cuda:
v = v.cuda()
return v
def gather_nd(x, index):
"""
:param x: n dimensional tensor [x0, x1, x2, ... x{n-1}, dim]
:param index: [num, n-1] where each row contains the indices we'll use
:return: [num, dim]
"""
nd = x.dim() - 1
assert nd > 0
assert index.dim() == 2
assert index.size(1) == nd
dim = x.size(-1)
sel_inds = index[:,nd-1].clone()
mult_factor = x.size(nd-1)
for col in range(nd-2, -1, -1): # [n-2, n-3, ..., 1, 0]
sel_inds += index[:,col] * mult_factor
mult_factor *= x.size(col)
grouped = x.view(-1, dim)[sel_inds]
return grouped
def enumerate_by_image(im_inds):
im_inds_np = im_inds.cpu().numpy()
initial_ind = int(im_inds_np[0])
s = 0
for i, val in enumerate(im_inds_np):
if val != initial_ind:
yield initial_ind, s, i
initial_ind = int(val)
s = i
yield initial_ind, s, len(im_inds_np)
# num_im = im_inds[-1] + 1
# # print("Num im is {}".format(num_im))
# for i in range(num_im):
# # print("On i={}".format(i))
# inds_i = (im_inds == i).nonzero()
# if inds_i.dim() == 0:
# continue
# inds_i = inds_i.squeeze(1)
# s = inds_i[0]
# e = inds_i[-1] + 1
# # print("On i={} we have s={} e={}".format(i, s, e))
# yield i, s, e
def diagonal_inds(tensor):
"""
Returns the indices required to go along first 2 dims of tensor in diag fashion
:param tensor: thing
:return:
"""
assert tensor.dim() >= 2
assert tensor.size(0) == tensor.size(1)
size = tensor.size(0)
arange_inds = tensor.new(size).long()
torch.arange(0, tensor.size(0), out=arange_inds)
return (size+1)*arange_inds
def enumerate_imsize(im_sizes):
s = 0
for i, (h, w, scale, num_anchors) in enumerate(im_sizes):
na = int(num_anchors)
e = s + na
yield i, s, e, h, w, scale, na
s = e
def argsort_desc(scores):
"""
Returns the indices that sort scores descending in a smart way
:param scores: Numpy array of arbitrary size
:return: an array of size [numel(scores), dim(scores)] where each row is the index you'd
need to get the score.
"""
return np.column_stack(np.unravel_index(np.argsort(-scores.ravel()), scores.shape))
def unravel_index(index, dims):
unraveled = []
index_cp = index.clone()
for d in dims[::-1]:
unraveled.append(index_cp % d)
index_cp /= d
return torch.cat([x[:,None] for x in unraveled[::-1]], 1)
def de_chunkize(tensor, chunks):
s = 0
for c in chunks:
yield tensor[s:(s+c)]
s = s+c
def random_choose(tensor, num):
"randomly choose indices"
num_choose = min(tensor.size(0), num)
if num_choose == tensor.size(0):
return tensor
# Gotta do this in numpy because of https://github.com/pytorch/pytorch/issues/1868
rand_idx = np.random.choice(tensor.size(0), size=num, replace=False)
rand_idx = torch.LongTensor(rand_idx).cuda(tensor.get_device())
chosen = tensor[rand_idx].contiguous()
# rand_values = tensor.new(tensor.size(0)).float().normal_()
# _, idx = torch.sort(rand_values)
#
# chosen = tensor[idx[:num]].contiguous()
return chosen
def transpose_packed_sequence_inds(lengths):
"""
Goes from a TxB packed sequence to a BxT or vice versa. Assumes that nothing is a variable
:param ps: PackedSequence
:return:
"""
new_inds = []
new_lens = []
cum_add = np.cumsum([0] + lengths)
max_len = lengths[0]
length_pointer = len(lengths) - 1
for i in range(max_len):
while length_pointer > 0 and lengths[length_pointer] <= i:
length_pointer -= 1
new_inds.append(cum_add[:(length_pointer+1)].copy())
cum_add[:(length_pointer+1)] += 1
new_lens.append(length_pointer+1)
new_inds = np.concatenate(new_inds, 0)
return new_inds, new_lens
def right_shift_packed_sequence_inds(lengths):
"""
:param lengths: e.g. [2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1]
:return: perm indices for the old stuff (TxB) to shift it right 1 slot so as to accomodate
BOS toks
visual example: of lengths = [4,3,1,1]
before:
a (0) b (4) c (7) d (8)
a (1) b (5)
a (2) b (6)
a (3)
after:
bos a (0) b (4) c (7)
bos a (1)
bos a (2)
bos
"""
cur_ind = 0
inds = []
for (l1, l2) in zip(lengths[:-1], lengths[1:]):
for i in range(l2):
inds.append(cur_ind + i)
cur_ind += l1
return inds
def clip_grad_norm(named_parameters, max_norm, clip=False, verbose=False):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Variable]): an iterable of Variables that will have
gradients normalized
max_norm (float or int): max norm of the gradients
Returns:
Total norm of the parameters (viewed as a single vector).
"""
max_norm = float(max_norm)
total_norm = 0
param_to_norm = {}
param_to_shape = {}
for n, p in named_parameters:
if p.grad is not None:
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
param_to_norm[n] = param_norm
param_to_shape[n] = p.size()
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1 and clip:
for _, p in named_parameters:
if p.grad is not None:
p.grad.data.mul_(clip_coef)
if verbose:
print('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
print("{:<50s}: {:.3f}, ({})".format(name, norm, param_to_shape[name]))
print('-------------------------------', flush=True)
return total_norm
def update_lr(optimizer, lr=1e-4):
print("------ Learning rate -> {}".format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | 14,457 | 30.430435 | 110 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/vg_hdf5.py | import os
from collections import defaultdict
import numpy as np
import copy
import pickle
import scipy.sparse
from PIL import Image
import h5py, json
import torch
from pycocotools.coco import COCO
from torch.utils.data import Dataset
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
from lib.utils.box import bbox_overlaps
class vg_hdf5(Dataset):
def __init__(self, cfg, split="train", transforms=None, num_im=-1, num_val_im=5000,
filter_duplicate_rels=True, filter_non_overlap=True, filter_empty_rels=True):
assert split == "train" or split == "test", "split must be one of [train, val, test]"
assert num_im >= -1, "the number of samples must be >= 0"
# split = 'train' if split == 'test' else 'test'
self.data_dir = cfg.DATASET.PATH
self.transforms = transforms
self.split = split
self.filter_non_overlap = filter_non_overlap
self.filter_duplicate_rels = filter_duplicate_rels and self.split == 'train'
self.roidb_file = os.path.join(self.data_dir, "VG-SGG.h5")
self.image_file = os.path.join(self.data_dir, "imdb_1024.h5")
# read in dataset from a h5 file and a dict (json) file
assert os.path.exists(self.data_dir), \
"cannot find folder {}, please download the visual genome data into this folder".format(self.data_dir)
self.im_h5 = h5py.File(self.image_file, 'r')
self.info = json.load(open(os.path.join(self.data_dir, "VG-SGG-dicts.json"), 'r'))
self.im_refs = self.im_h5['images'] # image data reference
im_scale = self.im_refs.shape[2]
# add background class
self.info['label_to_idx']['__background__'] = 0
self.class_to_ind = self.info['label_to_idx']
self.ind_to_classes = sorted(self.class_to_ind, key=lambda k:
self.class_to_ind[k])
# cfg.ind_to_class = self.ind_to_classes
self.predicate_to_ind = self.info['predicate_to_idx']
self.predicate_to_ind['__background__'] = 0
self.ind_to_predicates = sorted(self.predicate_to_ind, key=lambda k:
self.predicate_to_ind[k])
# cfg.ind_to_predicate = self.ind_to_predicates
self.split_mask, self.image_index, self.im_sizes, self.gt_boxes, self.gt_classes, self.relationships = load_graphs(
self.roidb_file, self.image_file,
self.split, num_im, num_val_im=num_val_im,
filter_empty_rels=filter_empty_rels,
filter_non_overlap=filter_non_overlap and split == "train",
)
self.json_category_id_to_contiguous_id = self.class_to_ind
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
@property
def coco(self):
"""
:return: a Coco-like object that we can use to evaluate detection!
"""
anns = []
for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
for cls, box in zip(cls_array.tolist(), box_array.tolist()):
anns.append({
'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
'bbox': [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
'category_id': cls,
'id': len(anns),
'image_id': i,
'iscrowd': 0,
})
fauxcoco = COCO()
fauxcoco.dataset = {
'info': {'description': 'ayy lmao'},
'images': [{'id': i} for i in range(self.__len__())],
'categories': [{'supercategory': 'person',
'id': i, 'name': name} for i, name in enumerate(self.ind_to_classes) if name != '__background__'],
'annotations': anns,
}
fauxcoco.createIndex()
return fauxcoco
def _im_getter(self, idx):
w, h = self.im_sizes[idx, :]
ridx = self.image_index[idx]
im = self.im_refs[ridx]
im = im[:, :h, :w] # crop out
im = im.transpose((1,2,0)) # c h w -> h w c
return im
def __len__(self):
return len(self.image_index)
def __getitem__(self, index):
"""
get dataset item
"""
# get image
img = Image.fromarray(self._im_getter(index)); width, height = img.size
# get object bounding boxes, labels and relations
obj_boxes = self.gt_boxes[index].copy()
obj_labels = self.gt_classes[index].copy()
obj_relation_triplets = self.relationships[index].copy()
if self.filter_duplicate_rels:
# Filter out dupes!
assert self.split == 'train'
old_size = obj_relation_triplets.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in obj_relation_triplets:
all_rel_sets[(o0, o1)].append(r)
obj_relation_triplets = [(k[0], k[1], np.random.choice(v)) for k,v in all_rel_sets.items()]
obj_relation_triplets = np.array(obj_relation_triplets)
obj_relations = np.zeros((obj_boxes.shape[0], obj_boxes.shape[0]))
for i in range(obj_relation_triplets.shape[0]):
subj_id = obj_relation_triplets[i][0]
obj_id = obj_relation_triplets[i][1]
pred = obj_relation_triplets[i][2]
obj_relations[subj_id, obj_id] = pred
target_raw = BoxList(obj_boxes, (width, height), mode="xyxy")
img, target = self.transforms(img, target_raw)
target.add_field("labels", torch.from_numpy(obj_labels))
target.add_field("pred_labels", torch.from_numpy(obj_relations))
target.add_field("relation_labels", torch.from_numpy(obj_relation_triplets))
target = target.clip_to_image(remove_empty=False)
return img, target, index
def get_groundtruth(self, index):
width, height = self.im_sizes[index, :]
# get object bounding boxes, labels and relations
obj_boxes = self.gt_boxes[index].copy()
obj_labels = self.gt_classes[index].copy()
obj_relation_triplets = self.relationships[index].copy()
if self.filter_duplicate_rels:
# Filter out dupes!
assert self.split == 'train'
old_size = obj_relation_triplets.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in obj_relation_triplets:
all_rel_sets[(o0, o1)].append(r)
obj_relation_triplets = [(k[0], k[1], np.random.choice(v)) for k,v in all_rel_sets.items()]
obj_relation_triplets = np.array(obj_relation_triplets)
obj_relations = np.zeros((obj_boxes.shape[0], obj_boxes.shape[0]))
for i in range(obj_relation_triplets.shape[0]):
subj_id = obj_relation_triplets[i][0]
obj_id = obj_relation_triplets[i][1]
pred = obj_relation_triplets[i][2]
obj_relations[subj_id, obj_id] = pred
target = BoxList(obj_boxes, (width, height), mode="xyxy")
target.add_field("labels", torch.from_numpy(obj_labels))
target.add_field("pred_labels", torch.from_numpy(obj_relations))
target.add_field("relation_labels", torch.from_numpy(obj_relation_triplets))
target.add_field("difficult", torch.from_numpy(obj_labels).clone().fill_(0))
return target
def get_img_info(self, img_id):
w, h = self.im_sizes[img_id, :]
return {"height": h, "width": w}
def map_class_id_to_class_name(self, class_id):
return self.ind_to_classes[class_id]
def load_graphs(graphs_file, images_file, mode='train', num_im=-1, num_val_im=0, filter_empty_rels=True,
filter_non_overlap=False):
"""
Load the file containing the GT boxes and relations, as well as the dataset split
:param graphs_file: HDF5
:param mode: (train, val, or test)
:param num_im: Number of images we want
:param num_val_im: Number of validation images
:param filter_empty_rels: (will be filtered otherwise.)
:param filter_non_overlap: If training, filter images that dont overlap.
:return: image_index: numpy array corresponding to the index of images we're using
boxes: List where each element is a [num_gt, 4] array of ground
truth boxes (x1, y1, x2, y2)
gt_classes: List where each element is a [num_gt] array of classes
relationships: List where each element is a [num_r, 3] array of
(box_ind_1, box_ind_2, predicate) relationships
"""
if mode not in ('train', 'val', 'test'):
raise ValueError('{} invalid'.format(mode))
roi_h5 = h5py.File(graphs_file, 'r')
im_h5 = h5py.File(images_file, 'r')
data_split = roi_h5['split'][:]
split = 2 if mode == 'test' else 0
split_mask = data_split == split
# Filter out images without bounding boxes
split_mask &= roi_h5['img_to_first_box'][:] >= 0
if filter_empty_rels:
split_mask &= roi_h5['img_to_first_rel'][:] >= 0
image_index = np.where(split_mask)[0]
if num_im > -1:
image_index = image_index[:num_im]
if num_val_im > 0:
if mode == 'val':
image_index = image_index[:num_val_im]
elif mode == 'train':
image_index = image_index[num_val_im:]
split_mask = np.zeros_like(data_split).astype(bool)
split_mask[image_index] = True
# Get box information
all_labels = roi_h5['labels'][:, 0]
all_boxes = roi_h5['boxes_{}'.format(1024)][:] # will index later
assert np.all(all_boxes[:, :2] >= 0) # sanity check
assert np.all(all_boxes[:, 2:] > 0) # no empty box
# convert from xc, yc, w, h to x1, y1, x2, y2
all_boxes[:, :2] = all_boxes[:, :2] - all_boxes[:, 2:] / 2
all_boxes[:, 2:] = all_boxes[:, :2] + all_boxes[:, 2:]
im_to_first_box = roi_h5['img_to_first_box'][split_mask]
im_to_last_box = roi_h5['img_to_last_box'][split_mask]
im_to_first_rel = roi_h5['img_to_first_rel'][split_mask]
im_to_last_rel = roi_h5['img_to_last_rel'][split_mask]
im_widths = im_h5["image_widths"][split_mask]
im_heights = im_h5["image_heights"][split_mask]
# load relation labels
_relations = roi_h5['relationships'][:]
_relation_predicates = roi_h5['predicates'][:, 0]
assert (im_to_first_rel.shape[0] == im_to_last_rel.shape[0])
assert (_relations.shape[0] == _relation_predicates.shape[0]) # sanity check
# Get everything by image.
im_sizes = []
image_index_valid = []
boxes = []
gt_classes = []
relationships = []
for i in range(len(image_index)):
boxes_i = all_boxes[im_to_first_box[i]:im_to_last_box[i] + 1, :]
gt_classes_i = all_labels[im_to_first_box[i]:im_to_last_box[i] + 1]
if im_to_first_rel[i] >= 0:
predicates = _relation_predicates[im_to_first_rel[i]:im_to_last_rel[i] + 1]
obj_idx = _relations[im_to_first_rel[i]:im_to_last_rel[i] + 1] - im_to_first_box[i]
assert np.all(obj_idx >= 0)
assert np.all(obj_idx < boxes_i.shape[0])
rels = np.column_stack((obj_idx, predicates))
else:
assert not filter_empty_rels
rels = np.zeros((0, 3), dtype=np.int32)
if filter_non_overlap:
assert mode == 'train'
inters = bbox_overlaps(torch.from_numpy(boxes_i).float(), torch.from_numpy(boxes_i).float()).numpy()
rel_overs = inters[rels[:, 0], rels[:, 1]]
inc = np.where(rel_overs > 0.0)[0]
if inc.size > 0:
rels = rels[inc]
else:
split_mask[image_index[i]] = 0
continue
image_index_valid.append(image_index[i])
im_sizes.append(np.array([im_widths[i], im_heights[i]]))
boxes.append(boxes_i)
gt_classes.append(gt_classes_i)
relationships.append(rels)
im_sizes = np.stack(im_sizes, 0)
return split_mask, image_index_valid, im_sizes, boxes, gt_classes, relationships
| 12,110 | 40.618557 | 129 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/build.py | import copy
import bisect
import torch
from torch.utils import data
from .vg_hdf5 import vg_hdf5
from . import samplers
from .transforms import build_transforms
from .collate_batch import BatchCollator
from lib.scene_parser.rcnn.utils.comm import get_world_size, get_rank
def make_data_sampler(dataset, shuffle, distributed):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def _quantize(x, bins):
bins = copy.copy(bins)
bins = sorted(bins)
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
img_info = dataset.get_img_info(i)
aspect_ratio = float(img_info["height"]) / float(img_info["width"])
aspect_ratios.append(aspect_ratio)
return aspect_ratios
def make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_batch, num_iters=None, start_iter=0
):
if aspect_grouping:
if not isinstance(aspect_grouping, (list, tuple)):
aspect_grouping = [aspect_grouping]
aspect_ratios = _compute_aspect_ratios(dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, images_per_batch, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_batch, drop_last=False
)
if num_iters is not None:
batch_sampler = samplers.IterationBasedBatchSampler(
batch_sampler, num_iters, start_iter
)
return batch_sampler
def build_data_loader(cfg, split="train", num_im=-1, is_distributed=False, start_iter=0):
num_gpus = get_world_size()
if cfg.DATASET.NAME == "vg" and cfg.DATASET.MODE == "benchmark":
transforms = build_transforms(cfg, is_train=True if split=="train" else False)
dataset = vg_hdf5(cfg, split=split, transforms=transforms, num_im=num_im)
sampler = make_data_sampler(dataset, True if split == "train" else False, is_distributed)
images_per_batch = cfg.DATASET.TRAIN_BATCH_SIZE if split == "train" else cfg.DATASET.TEST_BATCH_SIZE
if get_rank() == 0:
print("images_per_batch: {}, num_gpus: {}".format(images_per_batch, num_gpus))
images_per_gpu = images_per_batch // num_gpus if split == "train" else images_per_batch
start_iter = start_iter if split == "train" else 0
num_iters = cfg.SOLVER.MAX_ITER if split == "train" else None
aspect_grouping = [1] if cfg.DATASET.ASPECT_RATIO_GROUPING else []
batch_sampler = make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter
)
collator = BatchCollator(cfg.DATASET.SIZE_DIVISIBILITY)
dataloader = data.DataLoader(dataset,
num_workers=images_per_batch,
batch_sampler=batch_sampler,
collate_fn=collator,
)
return dataloader
else:
raise NotImplementedError("Unsupported dataset {}.".format(cfg.DATASET.NAME))
# cfg.data_dir = "data/vg"
| 3,399 | 40.463415 | 108 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/evaluation/gqa_coco/gqa_coco_eval.py | import logging
import tempfile
import os
import torch
from collections import OrderedDict
from tqdm import tqdm
from lib.scene_parser.mask_rcnn.modeling.roi_heads.mask_head.inference import Masker
from lib.scene_parser.mask_rcnn.structures.bounding_box import BoxList
from lib.scene_parser.mask_rcnn.structures.boxlist_ops import boxlist_iou
def do_gqa_coco_evaluation(
dataset,
predictions,
box_only,
output_folder,
iou_types,
expected_results,
expected_results_sigma_tol,
):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.info("Evaluating bbox proposals")
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
res = GQACOCOResults("box_proposal")
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = evaluate_box_proposals(
predictions, dataset, area=area, limit=limit
)
key = "AR{}@{:d}".format(suffix, limit)
res.results["box_proposal"][key] = stats["ar"].item()
logger.info(res)
check_expected_results(res, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
return
logger.info("Preparing results for COCO format")
gqa_coco_results = {}
if "bbox" in iou_types:
logger.info("Preparing bbox results")
gqa_coco_results["bbox"] = prepare_for_gqa_coco_detection(predictions, dataset)
if "segm" in iou_types:
logger.info("Preparing segm results")
gqa_coco_results["segm"] = prepare_for_gqa_coco_segmentation(predictions, dataset)
if 'keypoints' in iou_types:
logger.info('Preparing keypoints results')
gqa_coco_results['keypoints'] = prepare_for_gqa_coco_keypoint(predictions, dataset)
results = GQACOCOResults(*iou_types)
logger.info("Evaluating predictions")
for iou_type in iou_types:
with tempfile.NamedTemporaryFile() as f:
file_path = f.name
if output_folder:
file_path = os.path.join(output_folder, iou_type + ".json")
res = evaluate_predictions_on_gqa_coco(
dataset.coco, gqa_coco_results[iou_type], file_path, iou_type
)
results.update(res)
logger.info(results)
check_expected_results(results, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(results, os.path.join(output_folder, "gqa_coco_results.pth"))
return results, gqa_coco_results
def prepare_for_gqa_coco_detection(predictions, dataset):
# assert isinstance(dataset, COCODataset)
gqa_coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert("xywh")
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
gqa_coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return gqa_coco_results
# inspired from Detectron
def evaluate_box_proposals(
predictions, dataset, thresholds=None, area="all", limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = prediction.get_field("objectness").sort(descending=True)[1]
prediction = prediction[inds]
ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
anno = dataset.coco.loadAnns(ann_ids)
gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def evaluate_predictions_on_gqa_coco(
coco_gt, coco_results, json_result_file, iou_type="bbox"
):
import json
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
coco_dt = coco_gt.loadRes(str(json_result_file)) if coco_results else COCO()
# coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
class GQACOCOResults(object):
METRICS = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"box_proposal": [
"AR@100",
"ARs@100",
"ARm@100",
"ARl@100",
"AR@1000",
"ARs@1000",
"ARm@1000",
"ARl@1000",
],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm", "keypoints")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in GQACOCOResults.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
from pycocotools.cocoeval import COCOeval
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResults.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
results = '\n'
for task, metrics in self.results.items():
results += 'Task: {}\n'.format(task)
metric_names = metrics.keys()
metric_vals = ['{:.4f}'.format(v) for v in metrics.values()]
results += (', '.join(metric_names) + '\n')
results += (', '.join(metric_vals) + '\n')
return results
def check_expected_results(results, expected_results, sigma_tol):
if not expected_results:
return
logger = logging.getLogger("maskrcnn_benchmark.inference")
for task, metric, (mean, std) in expected_results:
actual_val = results.results[task][metric]
lo = mean - sigma_tol * std
hi = mean + sigma_tol * std
ok = (lo < actual_val) and (actual_val < hi)
msg = (
"{} > {} sanity check (actual vs. expected): "
"{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})"
).format(task, metric, actual_val, mean, std, lo, hi)
if not ok:
msg = "FAIL: " + msg
logger.error(msg)
else:
msg = "PASS: " + msg
logger.info(msg)
| 10,982 | 34.201923 | 91 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/evaluation/sg/sg_eval.py | import numpy as np
import torch
from .evaluator import BasicSceneGraphEvaluator
def do_sg_evaluation(dataset, predictions, predictions_pred, output_folder, logger):
"""
scene graph generation evaluation
"""
evaluator = BasicSceneGraphEvaluator.all_modes(multiple_preds=False)
top_Ns = [20, 50, 100]
modes = ["sgdet"]
result_dict = {}
for mode in modes:
result_dict[mode + '_recall'] = {20:[], 50:[], 100:[]}
for image_id, (prediction, prediction_pred) in enumerate(zip(predictions, predictions_pred)):
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
gt_boxlist = dataset.get_groundtruth(image_id)
gt_entry = {
'gt_classes': gt_boxlist.get_field("labels").numpy(),
'gt_relations': gt_boxlist.get_field("relation_labels").numpy().astype(int),
'gt_boxes': gt_boxlist.bbox.numpy(),
}
# import pdb; pdb.set_trace()
prediction = prediction.resize((image_width, image_height))
obj_scores = prediction.get_field("scores").numpy()
all_rels = prediction_pred.get_field("idx_pairs").numpy()
fp_pred = prediction_pred.get_field("scores").numpy()
# multiplier = np.ones((obj_scores.shape[0], obj_scores.shape[0]))
# np.fill_diagonal(multiplier, 0)
# fp_pred = fp_pred * multiplier.reshape(obj_scores.shape[0] * (obj_scores.shape[0] - 1), 1)
scores = np.column_stack((
obj_scores[all_rels[:,0]],
obj_scores[all_rels[:,1]],
fp_pred[:, 1:].max(1)
)).prod(1)
sorted_inds = np.argsort(-scores)
sorted_inds = sorted_inds[scores[sorted_inds] > 0] #[:100]
pred_entry = {
'pred_boxes': prediction.bbox.numpy(),
'pred_classes': prediction.get_field("labels").numpy(),
'obj_scores': prediction.get_field("scores").numpy(),
'pred_rel_inds': all_rels[sorted_inds],
'rel_scores': fp_pred[sorted_inds],
}
evaluator[mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
evaluate(gt_boxlist.get_field("labels"), gt_boxlist.bbox, gt_boxlist.get_field("pred_labels"),
prediction.bbox, prediction.get_field("scores"), prediction.get_field("labels"),
prediction_pred.get_field("idx_pairs"), prediction_pred.get_field("scores"),
top_Ns, result_dict, mode)
evaluator[mode].print_stats(logger)
logger.info('=====================' + mode + '(IMP)' + '=========================')
logger.info("{}-recall@20: {}".format(mode, np.mean(np.array(result_dict[mode + '_recall'][20]))))
logger.info("{}-recall@50: {}".format(mode, np.mean(np.array(result_dict[mode + '_recall'][50]))))
logger.info("{}-recall@100: {}".format(mode, np.mean(np.array(result_dict[mode + '_recall'][100]))))
def evaluate(gt_classes, gt_boxes, gt_rels,
obj_rois, obj_scores, obj_labels,
rel_inds, rel_scores,
top_Ns, result_dict,
mode, iou_thresh=0.5):
gt_classes = gt_classes.cpu()
gt_boxes = gt_boxes.cpu()
gt_rels = gt_rels.cpu()
obj_rois = obj_rois.cpu()
obj_scores = obj_scores.cpu()
obj_labels = obj_labels.cpu()
rel_inds = rel_inds.cpu()
rel_scores = rel_scores.cpu()
if gt_rels.ne(0).sum() == 0:
return (None, None)
rel_sum = ((gt_rels.sum(1) > 0).int() + (gt_rels.sum(0) > 0).int())
ix_w_rel = rel_sum.nonzero().numpy().squeeze()
# label = (((gt_rel_label.sum(1) == 0).int() + (gt_rel_label.sum(0) == 0).int()) == 2)
# change_ix = label.nonzero()
gt_boxes = gt_boxes.numpy()
num_gt_boxes = gt_boxes.shape[0]
gt_relations = gt_rels.nonzero().numpy()
gt_classes = gt_classes.view(-1, 1).numpy()
gt_rels_view = gt_rels.contiguous().view(-1)
gt_pred_labels = gt_rels_view[gt_rels_view.nonzero().squeeze()].contiguous().view(-1, 1).numpy()
num_gt_relations = gt_relations.shape[0]
if num_gt_relations == 0:
return (None, None)
gt_class_scores = np.ones(num_gt_boxes)
gt_predicate_scores = np.ones(num_gt_relations)
gt_triplets, gt_triplet_boxes, _ = _triplet(gt_pred_labels,
gt_relations,
gt_classes,
gt_boxes,
gt_predicate_scores,
gt_class_scores)
# pred
box_preds = obj_rois.numpy()
num_boxes = box_preds.shape[0]
predicate_preds = rel_scores.numpy()
# no bg
predicate_preds = predicate_preds[:, 1:]
predicates = np.argmax(predicate_preds, 1).ravel() + 1
predicate_scores = predicate_preds.max(axis=1).ravel()
relations = rel_inds.numpy()
# if relations.shape[0] != num_boxes * (num_boxes - 1):
# pdb.set_trace()
# assert(relations.shape[0] == num_boxes * (num_boxes - 1))
assert(predicates.shape[0] == relations.shape[0])
num_relations = relations.shape[0]
if mode =='predcls':
# if predicate classification task
# use ground truth bounding boxes
assert(num_boxes == num_gt_boxes)
classes = gt_classes
class_scores = gt_class_scores
boxes = gt_boxes
elif mode =='sgcls':
assert(num_boxes == num_gt_boxes)
# if scene graph classification task
# use gt boxes, but predicted classes
classes = obj_labels.numpy() # np.argmax(class_preds, 1)
class_scores = obj_scores.numpy()
boxes = gt_boxes
elif mode =='sgdet' or mode == 'sgdet+':
# if scene graph detection task
# use preicted boxes and predicted classes
classes = obj_labels.numpy() # np.argmax(class_preds, 1)
class_scores = obj_scores.numpy() # class_preds.max(axis=1)
# boxes = []
# for i, c in enumerate(classes):
# boxes.append(box_preds[i, c*4:(c+1)*4])
# boxes = np.vstack(boxes)
boxes = box_preds
else:
raise NotImplementedError('Incorrect Mode! %s' % mode)
pred_triplets, pred_triplet_boxes, relation_scores = \
_triplet(predicates, relations, classes, boxes,
predicate_scores, class_scores, is_pred=False)
sorted_inds = np.argsort(relation_scores)[::-1]
sorted_inds_obj = np.argsort(class_scores)[::-1]
# compue recall
for k in result_dict[mode + '_recall']:
this_k = min(k, num_relations)
keep_inds = sorted_inds[:this_k]
keep_inds_obj = sorted_inds_obj[:this_k]
# triplets_valid = _relation_recall_triplet(gt_triplets,
# pred_triplets[keep_inds,:],
# gt_triplet_boxes,
# pred_triplet_boxes[keep_inds,:],
# iou_thresh)
recall = _relation_recall(gt_triplets,
pred_triplets[keep_inds,:],
gt_triplet_boxes,
pred_triplet_boxes[keep_inds,:],
iou_thresh)
num_gt = gt_triplets.shape[0]
result_dict[mode + '_recall'][k].append(recall / num_gt)
# result_dict[mode + '_triplets'][k].append(triplets_valid)
# for visualization
return pred_triplets[sorted_inds, :], pred_triplet_boxes[sorted_inds, :]
def _triplet(predicates, relations, classes, boxes,
predicate_scores, class_scores, is_pred=False):
# format predictions into triplets
# compute the overlaps between boxes
if is_pred:
overlaps = bbox_overlaps(torch.from_numpy(boxes).contiguous(), torch.from_numpy(boxes).contiguous())
assert(predicates.shape[0] == relations.shape[0])
num_relations = relations.shape[0]
triplets = np.zeros([num_relations, 3]).astype(np.int32)
triplet_boxes = np.zeros([num_relations, 8]).astype(np.int32)
triplet_scores = np.zeros([num_relations]).astype(np.float32)
for i in range(num_relations):
triplets[i, 1] = predicates[i]
sub_i, obj_i = relations[i,:2]
triplets[i, 0] = classes[sub_i]
triplets[i, 2] = classes[obj_i]
triplet_boxes[i, :4] = boxes[sub_i, :]
triplet_boxes[i, 4:] = boxes[obj_i, :]
# compute triplet score
score = class_scores[sub_i]
score *= class_scores[obj_i]
if is_pred:
if overlaps[sub_i, obj_i] == 0:
score *= 0
else:
score *= predicate_scores[i]
else:
score *= predicate_scores[i]
triplet_scores[i] = score
return triplets, triplet_boxes, triplet_scores
def _relation_recall(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh):
# compute the R@K metric for a set of predicted triplets
num_gt = gt_triplets.shape[0]
num_correct_pred_gt = 0
for gt, gt_box in zip(gt_triplets, gt_boxes):
keep = np.zeros(pred_triplets.shape[0]).astype(bool)
for i, pred in enumerate(pred_triplets):
if gt[0] == pred[0] and gt[1] == pred[1] and gt[2] == pred[2]:
keep[i] = True
if not np.any(keep):
continue
boxes = pred_boxes[keep,:]
sub_iou = iou(gt_box[:4], boxes[:,:4])
obj_iou = iou(gt_box[4:], boxes[:,4:])
inds = np.intersect1d(np.where(sub_iou >= iou_thresh)[0],
np.where(obj_iou >= iou_thresh)[0])
if inds.size > 0:
num_correct_pred_gt += 1
return float(num_correct_pred_gt)
def _relation_recall_triplet(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh):
# compute the R@K metric for a set of predicted triplets
num_gt = gt_triplets.shape[0]
num_correct_pred_gt = 0
triplets_valid = []
boxes_valid = []
for gt, gt_box in zip(gt_triplets, gt_boxes):
keep = np.zeros(pred_triplets.shape[0]).astype(bool)
for i, pred in enumerate(pred_triplets):
if gt[0] == pred[0] and gt[1] == pred[1] and gt[2] == pred[2]:
keep[i] = True
if not np.any(keep):
continue
boxes = pred_boxes[keep,:]
triplets = pred_triplets[keep, :]
sub_iou = iou(gt_box[:4], boxes[:,:4])
obj_iou = iou(gt_box[4:], boxes[:,4:])
inds = np.intersect1d(np.where(sub_iou >= iou_thresh)[0],
np.where(obj_iou >= iou_thresh)[0])
if inds.size > 0:
triplets_valid.append(triplets[inds[0]])
boxes_valid.append(boxes[inds[0]])
num_correct_pred_gt += 1
return triplets_valid, boxes_valid
def _object_recall(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh):
# compute the R@K metric for a set of predicted triplets
num_gt = gt_triplets.shape[0]
num_correct_pred_gt = 0
for gt, gt_box in zip(gt_triplets, gt_boxes):
keep = np.zeros(pred_triplets.shape[0]).astype(bool)
for i, pred in enumerate(pred_triplets):
if gt[0] == pred[0]:
keep[i] = True
if not np.any(keep):
continue
boxes = pred_boxes[keep,:]
box_iou = iou(gt_box[:4], boxes[:,:4])
inds = np.where(box_iou >= iou_thresh)[0]
if inds.size > 0:
num_correct_pred_gt += 1
return float(num_correct_pred_gt)
def _predicate_recall(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh):
# compute the R@K metric for a set of predicted triplets
num_gt = gt_triplets.shape[0]
num_correct_pred_gt = 0
for gt, gt_box in zip(gt_triplets, gt_boxes):
keep = np.zeros(pred_triplets.shape[0]).astype(bool)
for i, pred in enumerate(pred_triplets):
if gt[1] == pred[1]:
keep[i] = True
if not np.any(keep):
continue
boxes = pred_boxes[keep,:]
sub_iou = iou(gt_box[:4], boxes[:,:4])
obj_iou = iou(gt_box[4:], boxes[:,4:])
inds = np.intersect1d(np.where(sub_iou >= iou_thresh)[0],
np.where(obj_iou >= iou_thresh)[0])
if inds.size > 0:
num_correct_pred_gt += 1
return float(num_correct_pred_gt)
def iou(gt_box, pred_boxes):
# computer Intersection-over-Union between two sets of boxes
ixmin = np.maximum(gt_box[0], pred_boxes[:,0])
iymin = np.maximum(gt_box[1], pred_boxes[:,1])
ixmax = np.minimum(gt_box[2], pred_boxes[:,2])
iymax = np.minimum(gt_box[3], pred_boxes[:,3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((gt_box[2] - gt_box[0] + 1.) * (gt_box[3] - gt_box[1] + 1.) +
(pred_boxes[:, 2] - pred_boxes[:, 0] + 1.) *
(pred_boxes[:, 3] - pred_boxes[:, 1] + 1.) - inters)
overlaps = inters / uni
return overlaps
| 13,320 | 39.244713 | 108 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/evaluation/sg/evaluator.py | """
Adapted from Danfei Xu. In particular, slow code was removed
"""
import torch
import numpy as np
from functools import reduce
from lib.utils.pytorch_misc import intersect_2d, argsort_desc
from lib.utils.box import bbox_overlaps
MODES = ('sgdet', 'sgcls', 'predcls')
np.set_printoptions(precision=3)
class BasicSceneGraphEvaluator:
def __init__(self, mode, multiple_preds=False):
self.result_dict = {}
self.mode = mode
self.result_dict[self.mode + '_recall'] = {20: [], 50: [], 100: []}
self.multiple_preds = multiple_preds
@classmethod
def all_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, **kwargs) for m in MODES}
return evaluators
@classmethod
def vrd_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, multiple_preds=True, **kwargs) for m in ('preddet', 'phrdet')}
return evaluators
def evaluate_scene_graph_entry(self, gt_entry, pred_scores, viz_dict=None, iou_thresh=0.5):
res = evaluate_from_dict(gt_entry, pred_scores, self.mode, self.result_dict,
viz_dict=viz_dict, iou_thresh=iou_thresh, multiple_preds=self.multiple_preds)
# self.print_stats()
return res
def save(self, fn):
np.save(fn, self.result_dict)
def print_stats(self, logger):
logger.info('===================' + self.mode + '(motif)' + '=========================')
for k, v in self.result_dict[self.mode + '_recall'].items():
logger.info('%s-recall@%i: %f' % (self.mode, k, np.mean(v)))
def evaluate_from_dict(gt_entry, pred_entry, mode, result_dict, multiple_preds=False,
viz_dict=None, **kwargs):
"""
Shortcut to doing evaluate_recall from dict
:param gt_entry: Dictionary containing gt_relations, gt_boxes, gt_classes
:param pred_entry: Dictionary containing pred_rels, pred_boxes (if detection), pred_classes
:param mode: 'det' or 'cls'
:param result_dict:
:param viz_dict:
:param kwargs:
:return:
"""
gt_rels = gt_entry['gt_relations']
gt_boxes = gt_entry['gt_boxes'].astype(float)
gt_classes = gt_entry['gt_classes']
pred_rel_inds = pred_entry['pred_rel_inds']
rel_scores = pred_entry['rel_scores']
if mode == 'predcls':
pred_boxes = gt_boxes
pred_classes = gt_classes
obj_scores = np.ones(gt_classes.shape[0])
elif mode == 'sgcls':
pred_boxes = gt_boxes
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'sgdet' or mode == 'phrdet':
pred_boxes = pred_entry['pred_boxes'].astype(float)
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'preddet':
# Only extract the indices that appear in GT
prc = intersect_2d(pred_rel_inds, gt_rels[:, :2])
if prc.size == 0:
for k in result_dict[mode + '_recall']:
result_dict[mode + '_recall'][k].append(0.0)
return None, None, None
pred_inds_per_gt = prc.argmax(0)
pred_rel_inds = pred_rel_inds[pred_inds_per_gt]
rel_scores = rel_scores[pred_inds_per_gt]
# Now sort the matching ones
rel_scores_sorted = argsort_desc(rel_scores[:,1:])
rel_scores_sorted[:,1] += 1
rel_scores_sorted = np.column_stack((pred_rel_inds[rel_scores_sorted[:,0]], rel_scores_sorted[:,1]))
matches = intersect_2d(rel_scores_sorted, gt_rels)
for k in result_dict[mode + '_recall']:
rec_i = float(matches[:k].any(0).sum()) / float(gt_rels.shape[0])
result_dict[mode + '_recall'][k].append(rec_i)
return None, None, None
else:
raise ValueError('invalid mode')
if multiple_preds:
obj_scores_per_rel = obj_scores[pred_rel_inds].prod(1)
overall_scores = obj_scores_per_rel[:,None] * rel_scores[:,1:]
score_inds = argsort_desc(overall_scores)[:100]
pred_rels = np.column_stack((pred_rel_inds[score_inds[:,0]], score_inds[:,1]+1))
predicate_scores = rel_scores[score_inds[:,0], score_inds[:,1]+1]
else:
pred_rels = np.column_stack((pred_rel_inds, 1+rel_scores[:,1:].argmax(1)))
predicate_scores = rel_scores[:,1:].max(1)
pred_to_gt, pred_5ples, rel_scores = evaluate_recall(
gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes,
predicate_scores, obj_scores, phrdet= mode=='phrdet',
**kwargs)
for k in result_dict[mode + '_recall']:
match = reduce(np.union1d, pred_to_gt[:k])
rec_i = float(len(match)) / float(gt_rels.shape[0])
result_dict[mode + '_recall'][k].append(rec_i)
return pred_to_gt, pred_5ples, rel_scores
# print(" ".join(["R@{:2d}: {:.3f}".format(k, v[-1]) for k, v in result_dict[mode + '_recall'].items()]))
# Deal with visualization later
# # Optionally, log things to a separate dictionary
# if viz_dict is not None:
# # Caution: pred scores has changed (we took off the 0 class)
# gt_rels_scores = pred_scores[
# gt_rels[:, 0],
# gt_rels[:, 1],
# gt_rels[:, 2] - 1,
# ]
# # gt_rels_scores_cls = gt_rels_scores * pred_class_scores[
# # gt_rels[:, 0]] * pred_class_scores[gt_rels[:, 1]]
#
# viz_dict[mode + '_pred_rels'] = pred_5ples.tolist()
# viz_dict[mode + '_pred_rels_scores'] = max_pred_scores.tolist()
# viz_dict[mode + '_pred_rels_scores_cls'] = max_rel_scores.tolist()
# viz_dict[mode + '_gt_rels_scores'] = gt_rels_scores.tolist()
# viz_dict[mode + '_gt_rels_scores_cls'] = gt_rels_scores_cls.tolist()
#
# # Serialize pred2gt matching as a list of lists, where each sublist is of the form
# # pred_ind, gt_ind1, gt_ind2, ....
# viz_dict[mode + '_pred2gt_rel'] = pred_to_gt
###########################
def evaluate_recall(gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes, rel_scores=None, cls_scores=None,
iou_thresh=0.5, phrdet=False):
"""
Evaluates the recall
:param gt_rels: [#gt_rel, 3] array of GT relations
:param gt_boxes: [#gt_box, 4] array of GT boxes
:param gt_classes: [#gt_box] array of GT classes
:param pred_rels: [#pred_rel, 3] array of pred rels. Assumed these are in sorted order
and refer to IDs in pred classes / pred boxes
(id0, id1, rel)
:param pred_boxes: [#pred_box, 4] array of pred boxes
:param pred_classes: [#pred_box] array of predicted classes for these boxes
:return: pred_to_gt: Matching from predicate to GT
pred_5ples: the predicted (id0, id1, cls0, cls1, rel)
rel_scores: [cls_0score, cls1_score, relscore]
"""
if pred_rels.size == 0:
return [[]], np.zeros((0,5)), np.zeros(0)
num_gt_boxes = gt_boxes.shape[0]
num_gt_relations = gt_rels.shape[0]
assert num_gt_relations != 0
gt_triplets, gt_triplet_boxes, _ = _triplet(gt_rels[:, 2],
gt_rels[:, :2],
gt_classes,
gt_boxes)
num_boxes = pred_boxes.shape[0]
assert pred_rels[:,:2].max() < pred_classes.shape[0]
# Exclude self rels
# assert np.all(pred_rels[:,0] != pred_rels[:,1])
assert np.all(pred_rels[:,2] > 0)
# import pdb; pdb.set_trace()
pred_triplets, pred_triplet_boxes, relation_scores = \
_triplet(pred_rels[:,2], pred_rels[:,:2], pred_classes, pred_boxes,
rel_scores, cls_scores)
scores_overall = relation_scores.prod(1)
sorted_inds = np.argsort(scores_overall)[::-1]
# if not np.all(scores_overall[1:] <= scores_overall[:-1] + 1e-5):
# print("Somehow the relations weren't sorted properly: \n{}".format(scores_overall))
# raise ValueError("Somehow the relations werent sorted properly")
# Compute recall. It's most efficient to match once and then do recall after
pred_to_gt = _compute_pred_matches(
gt_triplets,
pred_triplets[sorted_inds],
gt_triplet_boxes,
pred_triplet_boxes[sorted_inds],
iou_thresh,
phrdet=phrdet,
)
# Contains some extra stuff for visualization. Not needed.
pred_5ples = np.column_stack((
pred_rels[:,:2],
pred_triplets[:, [0, 2, 1]],
))
return pred_to_gt, pred_5ples, relation_scores
def _triplet(predicates, relations, classes, boxes,
predicate_scores=None, class_scores=None):
"""
format predictions into triplets
:param predicates: A 1d numpy array of num_boxes*(num_boxes-1) predicates, corresponding to
each pair of possibilities
:param relations: A (num_boxes*(num_boxes-1), 2) array, where each row represents the boxes
in that relation
:param classes: A (num_boxes) array of the classes for each thing.
:param boxes: A (num_boxes,4) array of the bounding boxes for everything.
:param predicate_scores: A (num_boxes*(num_boxes-1)) array of the scores for each predicate
:param class_scores: A (num_boxes) array of the likelihood for each object.
:return: Triplets: (num_relations, 3) array of class, relation, class
Triplet boxes: (num_relation, 8) array of boxes for the parts
Triplet scores: num_relation array of the scores overall for the triplets
"""
assert (predicates.shape[0] == relations.shape[0])
sub_ob_classes = classes[relations[:, :2]]
triplets = np.column_stack((sub_ob_classes[:, 0], predicates, sub_ob_classes[:, 1]))
triplet_boxes = np.column_stack((boxes[relations[:, 0]], boxes[relations[:, 1]]))
triplet_scores = None
if predicate_scores is not None and class_scores is not None:
triplet_scores = np.column_stack((
class_scores[relations[:, 0]],
class_scores[relations[:, 1]],
predicate_scores,
))
return triplets, triplet_boxes, triplet_scores
def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh, phrdet=False):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
:param gt_triplets:
:param pred_triplets:
:param gt_boxes:
:param pred_boxes:
:param iou_thresh:
:return:
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
# Evaluate where the union box > 0.5
gt_box_union = gt_box.reshape((2, 4))
gt_box_union = np.concatenate((gt_box_union.min(0)[:2], gt_box_union.max(0)[2:]), 0)
box_union = boxes.reshape((-1, 2, 4))
box_union = np.concatenate((box_union.min(1)[:,:2], box_union.max(1)[:,2:]), 1)
inds = bbox_overlaps(torch.from_numpy(gt_box_union[None]), torch.from_numpy(box_union)).numpy() >= iou_thresh
else:
sub_iou = bbox_overlaps(torch.from_numpy(gt_box[None,:4]).contiguous(), torch.from_numpy(boxes[:, :4]).contiguous()).numpy()[0]
obj_iou = bbox_overlaps(torch.from_numpy(gt_box[None,4:]).contiguous(), torch.from_numpy(boxes[:, 4:]).contiguous()).numpy()[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt
| 12,230 | 40.744027 | 139 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/evaluation/coco/coco_eval.py | import logging
import tempfile
import os
import torch
from collections import OrderedDict
from tqdm import tqdm
# from lib.scene_parser.rcnn.modeling.roi_heads.mask_head.inference import Masker
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_iou
def do_coco_evaluation(
dataset,
predictions,
box_only,
output_folder,
iou_types,
expected_results,
expected_results_sigma_tol,
):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.info("Evaluating bbox proposals")
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
res = COCOResults("box_proposal")
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = evaluate_box_proposals(
predictions, dataset, area=area, limit=limit
)
key = "AR{}@{:d}".format(suffix, limit)
res.results["box_proposal"][key] = stats["ar"].item()
logger.info(res)
check_expected_results(res, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
return
logger.info("Preparing results for COCO format")
coco_results = {}
if "bbox" in iou_types:
logger.info("Preparing bbox results")
coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset)
# if "segm" in iou_types:
# logger.info("Preparing segm results")
# coco_results["segm"] = prepare_for_coco_segmentation(predictions, dataset)
# if 'keypoints' in iou_types:
# logger.info('Preparing keypoints results')
# coco_results['keypoints'] = prepare_for_coco_keypoint(predictions, dataset)
results = COCOResults(*iou_types)
logger.info("Evaluating predictions")
for iou_type in iou_types:
with tempfile.NamedTemporaryFile() as f:
file_path = f.name
if output_folder:
file_path = os.path.join(output_folder, iou_type + ".json")
res = evaluate_predictions_on_coco(
dataset.coco, coco_results[iou_type], file_path, iou_type
)
results.update(res)
logger.info(results)
check_expected_results(results, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(results, os.path.join(output_folder, "coco_results.pth"))
return results, coco_results
def prepare_for_coco_detection(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = image_id # dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert("xywh")
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
mapped_labels = [i for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
# def prepare_for_coco_segmentation(predictions, dataset):
# import pycocotools.mask as mask_util
# import numpy as np
#
# masker = Masker(threshold=0.5, padding=1)
# # assert isinstance(dataset, COCODataset)
# coco_results = []
# for image_id, prediction in tqdm(enumerate(predictions)):
# original_id = dataset.id_to_img_map[image_id]
# if len(prediction) == 0:
# continue
#
# img_info = dataset.get_img_info(image_id)
# image_width = img_info["width"]
# image_height = img_info["height"]
# prediction = prediction.resize((image_width, image_height))
# masks = prediction.get_field("mask")
# # t = time.time()
# # Masker is necessary only if masks haven't been already resized.
# if list(masks.shape[-2:]) != [image_height, image_width]:
# masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)
# masks = masks[0]
# # logger.info('Time mask: {}'.format(time.time() - t))
# # prediction = prediction.convert('xywh')
#
# # boxes = prediction.bbox.tolist()
# scores = prediction.get_field("scores").tolist()
# labels = prediction.get_field("labels").tolist()
#
# # rles = prediction.get_field('mask')
#
# rles = [
# mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
# for mask in masks
# ]
# for rle in rles:
# rle["counts"] = rle["counts"].decode("utf-8")
#
# mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
#
# coco_results.extend(
# [
# {
# "image_id": original_id,
# "category_id": mapped_labels[k],
# "segmentation": rle,
# "score": scores[k],
# }
# for k, rle in enumerate(rles)
# ]
# )
# return coco_results
#
#
# def prepare_for_coco_keypoint(predictions, dataset):
# # assert isinstance(dataset, COCODataset)
# coco_results = []
# for image_id, prediction in enumerate(predictions):
# original_id = dataset.id_to_img_map[image_id]
# if len(prediction.bbox) == 0:
# continue
#
# # TODO replace with get_img_info?
# image_width = dataset.coco.imgs[original_id]['width']
# image_height = dataset.coco.imgs[original_id]['height']
# prediction = prediction.resize((image_width, image_height))
# prediction = prediction.convert('xywh')
#
# boxes = prediction.bbox.tolist()
# scores = prediction.get_field('scores').tolist()
# labels = prediction.get_field('labels').tolist()
# keypoints = prediction.get_field('keypoints')
# keypoints = keypoints.resize((image_width, image_height))
# keypoints = keypoints.keypoints.view(keypoints.keypoints.shape[0], -1).tolist()
#
# mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
#
# coco_results.extend([{
# 'image_id': original_id,
# 'category_id': mapped_labels[k],
# 'keypoints': keypoint,
# 'score': scores[k]} for k, keypoint in enumerate(keypoints)])
# return coco_results
# inspired from Detectron
def evaluate_box_proposals(
predictions, dataset, thresholds=None, area="all", limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
original_id = image_id # dataset.id_to_img_map[image_id]
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = prediction.get_field("objectness").sort(descending=True)[1]
prediction = prediction[inds]
ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
anno = dataset.coco.loadAnns(ann_ids)
gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def evaluate_predictions_on_coco(
coco_gt, coco_results, json_result_file, iou_type="bbox"
):
import json
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
coco_dt = coco_gt.loadRes(str(json_result_file)) if coco_results else COCO()
imgIds = list(set([coco_result['image_id'] for coco_result in coco_results]))
# coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.params.imgIds = imgIds
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
class COCOResults(object):
METRICS = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"box_proposal": [
"AR@100",
"ARs@100",
"ARm@100",
"ARl@100",
"AR@1000",
"ARs@1000",
"ARm@1000",
"ARl@1000",
],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm", "keypoints")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in COCOResults.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
from pycocotools.cocoeval import COCOeval
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResults.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
results = '\n'
for task, metrics in self.results.items():
results += 'Task: {}\n'.format(task)
metric_names = metrics.keys()
metric_vals = ['{:.4f}'.format(v) for v in metrics.values()]
results += (', '.join(metric_names) + '\n')
results += (', '.join(metric_vals) + '\n')
return results
def check_expected_results(results, expected_results, sigma_tol):
if not expected_results:
return
logger = logging.getLogger("maskrcnn_benchmark.inference")
for task, metric, (mean, std) in expected_results:
actual_val = results.results[task][metric]
lo = mean - sigma_tol * std
hi = mean + sigma_tol * std
ok = (lo < actual_val) and (actual_val < hi)
msg = (
"{} > {} sanity check (actual vs. expected): "
"{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})"
).format(task, metric, actual_val, mean, std, lo, hi)
if not ok:
msg = "FAIL: " + msg
logger.error(msg)
else:
msg = "PASS: " + msg
logger.info(msg)
| 14,329 | 34.914787 | 89 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
| 4,845 | 40.775862 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/samplers/iteration_based_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch.utils.data.sampler import BatchSampler
class IterationBasedBatchSampler(BatchSampler):
"""
Wraps a BatchSampler, resampling from it until
a specified number of iterations have been sampled
"""
def __init__(self, batch_sampler, num_iterations, start_iter=0):
self.batch_sampler = batch_sampler
self.num_iterations = num_iterations
self.start_iter = start_iter
def __iter__(self):
iteration = self.start_iter
while iteration <= self.num_iterations:
# if the underlying sampler has a set_epoch method, like
# DistributedSampler, used for making each process see
# a different split of the dataset, then set it
if hasattr(self.batch_sampler.sampler, "set_epoch"):
self.batch_sampler.sampler.set_epoch(iteration)
for batch in self.batch_sampler:
iteration += 1
if iteration > self.num_iterations:
break
yield batch
def __len__(self):
return self.num_iterations
| 1,164 | 35.40625 | 71 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,569 | 37.358209 | 86 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/data/transforms/transforms.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target=None):
size = self.get_size(image.size)
image = F.resize(image, size)
if target is None:
return image
target = target.resize(image.size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class RandomVerticalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.vflip(image)
target = target.transpose(1)
return image, target
class ColorJitter(object):
def __init__(self,
brightness=None,
contrast=None,
saturation=None,
hue=None,
):
self.color_jitter = torchvision.transforms.ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,)
def __call__(self, image, target):
image = self.color_jitter(image)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target=None):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image
return image, target
| 3,477 | 27.508197 | 83 | py |
DMCrypt | DMCrypt-main/main.py | import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from sklearn.preprocessing import MinMaxScaler, StandardScaler
#import seaborn as sns
import matplotlib.pyplot as plt
import pickle5 as pickle
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--date', '-d', help="Select the supposed to be the current date", type= str)
args = parser.parse_args()
#print(parser.format_help())
from utils.utils import getPricePrediction
#exec(open("./model/AdaBoost-LSTM.py").read())
getPricePrediction(args.date)
| 641 | 28.181818 | 97 | py |
DMCrypt | DMCrypt-main/model/AdaBoost-LSTM.py | import torch
import torch.nn as nn
import pickle5 as pickle
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import MinMaxScaler, StandardScaler
#import seaborn as sns
import math
import matplotlib.pyplot as plt
dataframe = pd.read_csv("./dataset/full_preprocessed_data.csv", index_col="Timestamp")
dataframe = dataframe.loc[:, dataframe.columns != "total"]
dataframe = dataframe.iloc[:, :]
dataframe_v = dataframe.to_numpy()
window = 7
# Splitting the data
datasize_length = dataframe_v.shape[0]
# The data split percentages
training_percentage = 0.7
testing_percentage = 0.15
validation_percentage = 0.15
testing_15 = 0.15
# Number of samples in each split
num_train_samples = int(training_percentage * datasize_length)
num_test_samples = int(testing_percentage * datasize_length)
num_test_samples_new = int(testing_15 * datasize_length)
num_validation_samples = int(validation_percentage * datasize_length)
def create_sequences(x, window):
newDataframe =[]
for rowIndex in range(x.shape[0]-window):
inputSequence = []
newDataframe.append(x[rowIndex: rowIndex+window])
#newDataframe.append(inputSequence)
return np.array(newDataframe)
newDf = create_sequences(dataframe.to_numpy(), window)
x = dataframe.iloc[:-1, :]
#x = dataframe.iloc[:-1].loc[:,["hash_rate", "Block_size", "Difficulty", "active_addresses", "Block_time", "Average fees", "mining_profitability", "Transactions"]]
y = dataframe.iloc[1:, 1:2]
mm = MinMaxScaler()
ss = StandardScaler()
x_ss = ss.fit_transform(x)
y_mm = mm.fit_transform(y)
x_train = x_ss[:num_train_samples, :]
x_test = x_ss[num_train_samples:, :]
y_train = y_mm[:num_train_samples, :]
y_test = y_mm[num_train_samples:, :]
x_train = create_sequences(x_train, window)
y_train = y_train[:-window]
x_test = create_sequences(x_test, window)
y_test = y_test[:-window]
x_train_tensors = Variable(torch.Tensor(x_train))
x_test_tensors = Variable(torch.Tensor(x_test))
y_train_tensors = Variable(torch.Tensor(y_train))
y_test_tensors = Variable(torch.Tensor(y_test))
x_train_tensors_final = x_train_tensors
x_test_tensors_final = x_test_tensors
num_epochs = 700 #1000 epochs
learning_rate = 0.00003 #0.001 lr
input_size = 18 #number of features
hidden_size = 512 #number of features in hidden state
num_layers = 3 #number of stacked lstm layers
num_classes = 1 #number of output classes
device = "cpu"
class LSTM1(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers, seq_length):
super(LSTM1, self).__init__()
self.num_classes = num_classes #number of classes
self.num_layers = num_layers #number of layers
self.input_size = input_size #input size
self.hidden_size = hidden_size #hidden state
self.seq_length = seq_length #sequence length
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True) #lstm
self.fc_1 = nn.Linear(hidden_size, 128) #fully connected 1
self.fc = nn.Linear(128, num_classes) #fully connected last layer
self.relu = nn.ReLU()
def forward(self,x):
x = x.to(device)
h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)) #hidden state
c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)) #internal state
# Propagate input through LSTM
h_0 = h_0.to(device)
c_0 = c_0.to(device)
output, (hn, cn) = self.lstm(x, (h_0, c_0)) #lstm with input, hidden, and internal state
hn = hn.view(-1, self.hidden_size) #reshaping the data for Dense layer next
out = output[:, -1, :]
out = self.relu(out)
out = self.fc_1(out) #first Dense
out = self.relu(out) #relu
out = self.fc(out) #Final Output
return out
lstm1 = LSTM1(num_classes, input_size, hidden_size, num_layers, x_train_tensors_final.shape[1]) #our lstm class
lstm1 = lstm1.to(device)
criterion = torch.nn.MSELoss() # mean-squared error for regression
optimizer = torch.optim.Adam(lstm1.parameters(), lr=learning_rate)
df = pd.read_csv("./dataset/full_preprocessed_data.csv")
X = df.iloc[:-1, 1:].to_numpy()
y = df.iloc[1:, 2:3].to_numpy().flatten()
train_per = 0.7
test_per = 0.15
val_per = 0.15
train_split = int(train_per * X.shape[0])
test_split = int(test_per * X.shape[0])
X_train = X[test_split+test_split:]
X_test = X[test_split:test_split+test_split]
X_val = X[:test_split]
y_train = y[test_split+test_split:]
y_test = y[test_split:test_split+test_split]
y_val = y[:test_split]
minLoss = np.inf
minEpoch = 0
how_many_to_stop = 100
last = 0
for m in range(4):
for epoch in range(num_epochs):
outputs = lstm1.forward(x_train_tensors_final) #forward pass
optimizer.zero_grad() #caluclate the gradient, manually setting to 0
# obtain the loss function
adaBoostLstm = DecisionTreeRegressor(random_state=0, max_depth=20)
adaBoostLstm = AdaBoostRegressor(base_estimator=adaBoostLstm, random_state=0, n_estimators=80)
adaBoostLstm.fit(X_train, y_train)
y_train_tensors = y_train_tensors.to(device)
loss = criterion(outputs, y_train_tensors)
loss.backward() #calculates the loss of the loss function
optimizer.step() #improve from loss, i.e backprop
with torch.no_grad():
lstm1.eval()
x_test_tensors_final=x_test_tensors_final.to(device)
y_test_tensors=y_test_tensors.to(device)
outputs = lstm1(x_test_tensors_final)
test_loss = criterion(outputs, y_test_tensors)
if (test_loss < minLoss):
print("LOSS DECREASE ========> From: {0}, To: {1}".format(minLoss, test_loss))
minLoss = test_loss
minEpoch = epoch
last=0
last+=1
lstm1.train()
if epoch % 50 == 0:
print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))
if last > how_many_to_stop:
out_test = adaBoostLstm.predict(X_test)
out_val = adaBoostLstm.predict(X_val)
out_train = adaBoostLstm.predict(X_train)
mae_test = mean_absolute_error(y_test, out_test)
mae_train = mean_absolute_error(y_train, out_train)
mae_val = mean_absolute_error(y_val, out_val)
mse_test = mean_squared_error(y_test, out_test)
mse_train = mean_squared_error(y_train, out_train)
mse_val = mean_squared_error(y_val, out_val)
print('''
Training stopped, validation loss is not decreasing anymore!
RMSE Train: {6},
MSE Train: {3},
MAE Train: {0},
RMSE Validation: {7},
MSE Validation: {4},
MAE Validation: {1},
RMSE Test: {8},
MSE Test: {5},
MAE Test: {2},
'''.format(mae_train, mae_val, mae_test, mse_train, mse_val, mse_test, math.sqrt(mse_train), math.sqrt(mse_val), math.sqrt(mse_test), epoch))
break
| 7,495 | 33.703704 | 163 | py |
DMCrypt | DMCrypt-main/model/LSTM.py | import torch
import torch.nn as nn
import pickle
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from sklearn.preprocessing import MinMaxScaler, StandardScaler
#import seaborn as sns
import matplotlib.pyplot as plt
import pickle5 as pickle
device = "cpu"
class LSTM1(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers, seq_length):
super(LSTM1, self).__init__()
self.num_classes = num_classes #number of classes
self.num_layers = num_layers #number of layers
self.input_size = input_size #input size
self.hidden_size = hidden_size #hidden state
self.seq_length = seq_length #sequence length
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True) #lstm
self.fc_1 = nn.Linear(hidden_size, 128) #fully connected 1
self.fc = nn.Linear(128, num_classes) #fully connected last layer
self.relu = nn.ReLU()
def forward(self,x):
x = x.to(device)
h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)) #hidden state
c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)) #internal state
# Propagate input through LSTM
h_0 = h_0.to(device)
c_0 = c_0.to(device)
output, (hn, cn) = self.lstm(x, (h_0, c_0)) #lstm with input, hidden, and internal state
hn = hn.view(-1, self.hidden_size) #reshaping the data for Dense layer next
out = output[:, -1, :]
out = self.relu(out)
out = self.fc_1(out) #first Dense
out = self.relu(out) #relu
out = self.fc(out) #Final Output
return out
| 1,806 | 38.282609 | 97 | py |
paper-log-bilinear-loss | paper-log-bilinear-loss-master/test.py | """
Put it all together with a simple MNIST exmaple
"""
from tensorflow.examples.tutorials.mnist import input_data
from keras.optimizers import Adam
from sklearn.metrics import confusion_matrix
from models import mnist_model
from loss import bilinear_loss
from util import *
DATA_DIR = ""
LRATE = 5e-4 # Learning rate for the model
EPOCHS = 10 # How many epochs to train for
BATCH_SIZE = 50 #
VERBOSITY = 1 #
N_SPOTS = 10 # Number of spots in the mask of "bad errors"
ALPHA = .9 # Trade-off parameter. Higher value puts more weight on not making errors in the mask
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
data = {
"X_train": mnist.train.images.reshape(-1, 28, 28, 1),
"X_valid": mnist.validation.images.reshape(-1, 28, 28, 1),
"X_test": mnist.test.images.reshape(-1, 28, 28, 1),
"Y_train": mnist.train.labels,
"Y_valid": mnist.validation.labels,
"Y_test": mnist.test.labels
}
# generate a random matrix with locations we don't want to make mistakes in
cm = make_random_spots_cm(N_SPOTS, normalize=True)
# generate a loss function (bilinar+cross-entropy) to reflect the random spots
loss = bilinear_loss(cm, alpha=.9)
# train the model
model = mnist_model()
model.summary()
model.compile(loss=loss, optimizer=Adam(LRATE), metrics=['accuracy'])
model.fit(data["X_train"], data["Y_train"],
nb_epoch=EPOCHS,
validation_data=(data["X_valid"], data["Y_valid"]),
verbose=VERBOSITY,
callbacks=None)
# What percent of all errors is in the mask?
mask = cm > 0
pred = model.predict(data["X_test"])
model_cm = confusion_matrix(data["Y_test"].argmax(axis=1), pred.argmax(axis=1))
model_cm_norm = confusion_matrix_normalizer(model_cm, strip_diagonal=True, normalize_rows=False, normalize_matrix=True)
percent_error_in_mask = model_cm_norm[mask].sum() * 100.
print("The percent of all error in the mask is: {:.4f}%".format(percent_error_in_mask))
| 2,044 | 34.877193 | 119 | py |
paper-log-bilinear-loss | paper-log-bilinear-loss-master/loss.py |
import numpy as np
import tensorflow as tf
from keras import backend as K
def loss_function_generator(conf_mat, log=False, alpha=.5):
"""
Generate Bilinear/Log-Bilinear loss functions combined with the rgular cross-entorpy loss
(1 - alpha)*cross_entropy_loss + alpha*bilinar/log-bilinar
:param conf_mat: np.Array
all positive confusion matrix. A higher value in [i, j] indicates a higher penalty for making the mistake of
classifying an example really of class i, as class j (i.e. placing weight there, since the output is a
probability vector).
:param log: bool
generate the log-blinear loss?
:param alpha: float
the trade-off paramter between the cross-entropy and bilinear/log-bilinear parts of the loss
:return: lambda
f: y_true, y_pred -> loss
"""
# Just to be sure -- get rid of the diagonal part of the conf-mat
conf_mat -= np.eye(conf_mat.shape[0]) * np.diag(conf_mat)
# Need a tf.constant version of the conf mat
cm = tf.constant(conf_mat)
I = tf.constant(np.eye(conf_mat.shape[0]), dtype=np.float32)
# The regular cross-entropy loss
diagonal_loss = lambda y_true, y_pred: -K.mean(K.batch_dot(K.expand_dims(K.dot(y_true, I), 1), K.expand_dims(tf.log(y_pred + 1e-10), 2)))
# The off-disgonal part of the loss -- how we weigh the error i->j
if log:
off_diagonal_loss = lambda y_true, y_pred: -K.mean(K.batch_dot(K.expand_dims(K.dot(y_true, cm), 1), K.expand_dims(tf.log(1 - y_pred + 1e-10), 2)))
else:
off_diagonal_loss = lambda y_true, y_pred: K.mean(K.batch_dot(K.expand_dims(K.dot(y_true, cm), 1), K.expand_dims(y_pred, 2)))
return lambda y_true, y_pred: diagonal_loss(y_true, y_pred)*(1-alpha) + off_diagonal_loss(y_true, y_pred)*alpha
def bilinear_loss(cm, alpha=.5):
return loss_function_generator(cm, log=False, alpha=alpha)
def log_bilinear_loss(cm, alpha=.5):
return loss_function_generator(cm, log=True, alpha=alpha) | 1,997 | 38.176471 | 154 | py |
paper-log-bilinear-loss | paper-log-bilinear-loss-master/models.py |
from keras.layers import Dense, Dropout, Activation, Flatten, Convolution2D, MaxPooling2D
from keras.models import Sequential
def mnist_model():
model = Sequential()
model.add(Convolution2D(20, 5, 5, border_mode='same', activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(.2))
model.add(Convolution2D(50, 5, 5, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(.2))
model.add(Dense(500, activation='relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
return model
def cifar10_model():
model = Sequential()
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', input_shape=(32, 32, 3)))
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(.25))
model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(.25))
model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(8, 8)))
model.add(Dropout(.25))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(1000, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
return model
def cifar100_model():
model = Sequential()
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', input_shape=(32, 32, 3)))
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(.25))
model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(.25))
model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(8, 8)))
model.add(Dropout(.25))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(.25))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(.25))
model.add(Dense(100))
model.add(Activation('softmax'))
return model
| 2,914 | 41.246377 | 102 | py |
DCAP | DCAP-main/layer.py | import numpy as np
import torch
import torch.nn.functional as F
from torchfm.utils import get_activation_fn
from torchfm.attention_layer import MultiheadAttentionInnerProduct
class FeaturesLinear(torch.nn.Module):
def __init__(self, field_dims, output_dim=1):
super().__init__()
self.fc = torch.nn.Embedding(sum(field_dims), output_dim)
self.bias = torch.nn.Parameter(torch.zeros((output_dim,)))
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]), dtype=np.long)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return torch.sum(self.fc(x), dim=1) + self.bias
class FeaturesEmbedding(torch.nn.Module):
def __init__(self, field_dims, embed_dim):
super().__init__()
self.embedding = torch.nn.Embedding(sum(field_dims), embed_dim)
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]), dtype=np.long)
torch.nn.init.xavier_uniform_(self.embedding.weight.data)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return self.embedding(x)
class FieldAwareFactorizationMachine(torch.nn.Module):
def __init__(self, field_dims, embed_dim):
super().__init__()
self.num_fields = len(field_dims)
self.embeddings = torch.nn.ModuleList([
torch.nn.Embedding(sum(field_dims), embed_dim) for _ in range(self.num_fields)
])
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]), dtype=np.long)
for embedding in self.embeddings:
torch.nn.init.xavier_uniform_(embedding.weight.data)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
xs = [self.embeddings[i](x) for i in range(self.num_fields)]
ix = list()
for i in range(self.num_fields - 1):
for j in range(i + 1, self.num_fields):
ix.append(xs[j][:, i] * xs[i][:, j])
ix = torch.stack(ix, dim=1)
return ix
class FactorizationMachine(torch.nn.Module):
def __init__(self, reduce_sum=True):
super().__init__()
self.reduce_sum = reduce_sum
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
square_of_sum = torch.sum(x, dim=1) ** 2
sum_of_square = torch.sum(x ** 2, dim=1)
ix = square_of_sum - sum_of_square
if self.reduce_sum:
ix = torch.sum(ix, dim=1, keepdim=True)
return 0.5 * ix
class MultiLayerPerceptron(torch.nn.Module):
def __init__(self, input_dim, embed_dims, dropout, output_layer=True):
super().__init__()
layers = list()
for embed_dim in embed_dims:
layers.append(torch.nn.Linear(input_dim, embed_dim))
layers.append(torch.nn.BatchNorm1d(embed_dim))
layers.append(torch.nn.ReLU())
layers.append(torch.nn.Dropout(p=dropout))
input_dim = embed_dim
if output_layer:
layers.append(torch.nn.Linear(input_dim, 1))
self.mlp = torch.nn.Sequential(*layers)
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, embed_dim)``
"""
return self.mlp(x)
class InnerProductNetwork(torch.nn.Module):
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
num_fields = x.shape[1]
row, col = list(), list()
for i in range(num_fields - 1):
for j in range(i + 1, num_fields):
row.append(i), col.append(j)
return torch.sum(x[:, row] * x[:, col], dim=2)
class OuterProductNetwork(torch.nn.Module):
def __init__(self, num_fields, embed_dim, kernel_type='mat'):
super().__init__()
num_ix = num_fields * (num_fields - 1) // 2
if kernel_type == 'mat':
kernel_shape = embed_dim, num_ix, embed_dim
elif kernel_type == 'vec':
kernel_shape = num_ix, embed_dim
elif kernel_type == 'num':
kernel_shape = num_ix, 1
else:
raise ValueError('unknown kernel type: ' + kernel_type)
self.kernel_type = kernel_type
self.kernel = torch.nn.Parameter(torch.zeros(kernel_shape))
torch.nn.init.xavier_uniform_(self.kernel.data)
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
num_fields = x.shape[1]
row, col = list(), list()
for i in range(num_fields - 1):
for j in range(i + 1, num_fields):
row.append(i), col.append(j)
p, q = x[:, row], x[:, col]
if self.kernel_type == 'mat':
kp = torch.sum(p.unsqueeze(1) * self.kernel, dim=-1).permute(0, 2, 1)
return torch.sum(kp * q, -1)
else:
return torch.sum(p * q * self.kernel.unsqueeze(0), -1)
class CrossProductNetwork(torch.nn.Module):
def __init__(self, num_fields, embed_dim, num_heads, dropout=0.2, kernel_type='mat'):
super().__init__()
num_ix = num_fields * (num_fields - 1) // 2
if kernel_type == 'mat':
kernel_shape = embed_dim, num_ix, embed_dim
elif kernel_type == 'vec':
kernel_shape = num_ix, embed_dim
elif kernel_type == 'num':
kernel_shape = num_ix, 1
else:
raise ValueError('unknown kernel type: ' + kernel_type)
self.kernel_type = kernel_type
self.kernel = torch.nn.Parameter(torch.zeros(kernel_shape))
self.avg_pool = torch.nn.AdaptiveAvgPool1d(num_fields)
# self.fc = torch.nn.Linear(embed_dim, 1)
self.attn = MultiheadAttentionInnerProduct(num_fields, embed_dim, num_heads, dropout)
torch.nn.init.xavier_uniform_(self.kernel.data)
def forward(self, x, x0, attn_mask=None):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
bsz, num_fields, embed_dim = x0.size()
row, col = list(), list()
for i in range(num_fields - 1):
for j in range(i + 1, num_fields):
row.append(i), col.append(j)
if self.kernel_type == 'mat':
x, _ = self.attn(x, x, x, attn_mask)
p, q = x[:, row], x0[:, col]
kp = torch.sum(p.unsqueeze(1) * self.kernel, dim=-1).permute(0, 2, 1) # (bsz, n(n-1)/2, embed_dim)
kpq = kp * q # Outer product
x = self.avg_pool(kpq.permute(0, 2, 1)).permute(0, 2, 1) # (bsz, n, embed_dim)
return x, torch.sum(kpq, dim=-1)
else:
return torch.sum(p * q * self.kernel.unsqueeze(0), -1)
class CrossAttentionalProductNetwork(torch.nn.Module):
def __init__(self, num_fields, embed_dim, num_heads, num_layers, dropout, kernel_type='mat'):
super().__init__()
self.layers = torch.nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(num_fields=num_fields, embed_dim=embed_dim, num_heads=num_heads,
dropout=dropout, kernel_type=kernel_type) for _ in range(num_layers)]
)
# self.layers = torch.nn.ModuleList([
# torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout) for _ in range(num_layers)
# ])
# self.norm = torch.nn.BatchNorm1d(num_fields * (num_fields - 1) // 2)
# self.avg_pool = torch.nn.AdaptiveAvgPool1d(num_fields)
# self.fc = torch.nn.Linear(embed_dim, 1)
def build_encoder_layer(self, num_fields, embed_dim, num_heads, dropout, kernel_type='mat'):
return CrossProductNetwork(num_fields=num_fields, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout, kernel_type=kernel_type)
def forward(self, x, attn_mask=None):
x0 = x
output = []
for layer in self.layers:
x, y = layer(x, x0, attn_mask)
output.append(y)
output = torch.cat(output, dim=1)
return output
class CrossNetwork(torch.nn.Module):
def __init__(self, input_dim, num_layers):
super().__init__()
self.num_layers = num_layers
self.w = torch.nn.ModuleList([
torch.nn.Linear(input_dim, 1, bias=False) for _ in range(num_layers)
])
self.b = torch.nn.ParameterList([
torch.nn.Parameter(torch.zeros((input_dim,))) for _ in range(num_layers)
])
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
x0 = x
for i in range(self.num_layers):
xw = self.w[i](x)
x = x0 * xw + self.b[i] + x
return x
class AttentionalFactorizationMachine(torch.nn.Module):
def __init__(self, embed_dim, attn_size, dropouts):
super().__init__()
self.attention = torch.nn.Linear(embed_dim, attn_size)
self.projection = torch.nn.Linear(attn_size, 1)
self.fc = torch.nn.Linear(embed_dim, 1)
self.dropouts = dropouts
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
num_fields = x.shape[1]
row, col = list(), list()
for i in range(num_fields - 1):
for j in range(i + 1, num_fields):
row.append(i), col.append(j)
p, q = x[:, row], x[:, col]
inner_product = p * q
attn_scores = F.relu(self.attention(inner_product))
attn_scores = F.softmax(self.projection(attn_scores), dim=1)
attn_scores = F.dropout(attn_scores, p=self.dropouts[0], training=self.training)
attn_output = torch.sum(attn_scores * inner_product, dim=1)
attn_output = F.dropout(attn_output, p=self.dropouts[1], training=self.training)
return self.fc(attn_output)
class CompressedInteractionNetwork(torch.nn.Module):
def __init__(self, input_dim, cross_layer_sizes, split_half=True):
super().__init__()
self.num_layers = len(cross_layer_sizes)
self.split_half = split_half
self.conv_layers = torch.nn.ModuleList()
prev_dim, fc_input_dim = input_dim, 0
for i in range(self.num_layers):
cross_layer_size = cross_layer_sizes[i]
self.conv_layers.append(torch.nn.Conv1d(input_dim * prev_dim, cross_layer_size, 1,
stride=1, dilation=1, bias=True))
if self.split_half and i != self.num_layers - 1:
cross_layer_size //= 2
prev_dim = cross_layer_size
fc_input_dim += prev_dim
self.fc = torch.nn.Linear(fc_input_dim, 1)
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
xs = list()
x0, h = x.unsqueeze(2), x
for i in range(self.num_layers):
x = x0 * h.unsqueeze(1)
batch_size, f0_dim, fin_dim, embed_dim = x.shape
x = x.view(batch_size, f0_dim * fin_dim, embed_dim)
x = F.relu(self.conv_layers[i](x))
if self.split_half and i != self.num_layers - 1:
x, h = torch.split(x, x.shape[1] // 2, dim=1)
else:
h = x
xs.append(x)
return self.fc(torch.sum(torch.cat(xs, dim=1), 2))
class AnovaKernel(torch.nn.Module):
def __init__(self, order, reduce_sum=True):
super().__init__()
self.order = order
self.reduce_sum = reduce_sum
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
batch_size, num_fields, embed_dim = x.shape
a_prev = torch.ones((batch_size, num_fields + 1, embed_dim), dtype=torch.float).to(x.device)
for t in range(self.order):
a = torch.zeros((batch_size, num_fields + 1, embed_dim), dtype=torch.float).to(x.device)
a[:, t+1:, :] += x[:, t:, :] * a_prev[:, t:-1, :]
a = torch.cumsum(a, dim=1)
a_prev = a
if self.reduce_sum:
return torch.sum(a[:, -1, :], dim=-1, keepdim=True)
else:
return a[:, -1, :]
| 12,567 | 36.404762 | 141 | py |
DCAP | DCAP-main/utils.py | import torch.nn.functional as F
import torch
def get_activation_fn(activation: str):
""" Returns the activation function corresponding to `activation` """
if activation == "relu":
return F.relu
# elif activation == "gelu":
# return gelu
# elif activation == "gelu_fast":
# deprecation_warning(
# "--activation-fn=gelu_fast has been renamed to gelu_accurate"
# )
# return gelu_accurate
# elif activation == "gelu_accurate":
# return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation)) | 736 | 31.043478 | 81 | py |
DCAP | DCAP-main/attention_layer.py | import numpy as np
import torch
import torch.nn.functional as F
from torchfm.utils import get_activation_fn
class MultiheadAttentionInnerProduct(torch.nn.Module):
def __init__(self, num_fields, embed_dim, num_heads, dropout):
super().__init__()
self.num_fields = num_fields
self.mask = (torch.triu(torch.ones(num_fields, num_fields), diagonal=1) == 1)
self.num_cross_terms = num_fields * (num_fields - 1) // 2
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout_p = dropout
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "head dim is not divisible by embed dim"
self.head_dim = head_dim
self.scale = self.head_dim ** -0.5
self.linear_q = torch.nn.Linear(embed_dim, num_heads * head_dim, bias=True)
self.linear_k = torch.nn.Linear(embed_dim, num_heads * head_dim, bias=True)
# self.linear_vq = torch.nn.Linear(embed_dim, num_heads * head_dim, bias=True)
# self.linear_vk = torch.nn.Linear(embed_dim, num_heads * head_dim, bias=True)
self.avg_pool = torch.nn.AdaptiveAvgPool1d(num_fields)
self.output_layer = torch.nn.Linear(embed_dim, embed_dim, bias=True)
# self.fc = torch.nn.Linear(embed_dim, 1)
def forward(self, query, key, value, attn_mask=None, need_weights=False):
bsz, num_fields, embed_dim = query.size()
q = self.linear_q(query)
q = q.transpose(0, 1).contiguous()
q = q.view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) # [batch size * num_heads, num_fields, head_dim]
q = q * self.scale
k = self.linear_k(key)
k = k.transpose(0, 1).contiguous()
k = k.view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
# vq = self.linear_vq(value)
v = value.transpose(0, 1).contiguous()
v = v.view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_output_weights += attn_mask
attn_output_weights = F.softmax(
attn_output_weights, dim=-1)
attn_output_weights = F.dropout(attn_output_weights, p=self.dropout_p, training=self.training) # [batch size * num_heads, num_fields, num_fields]
# attn_output_weights = attn_output_weights[:, self.mask] # [bsz * num_heads, n(n-1)/2] Upper triangular matrix
# vq and vk share size as [batch_size * num_heads, num_fields, head_dim]
# inner_product = vq[:, self.row] * vk[:, self.col] # [bsz * num_heads, n(n-1)/2, head_dim]
# inner_product = vq * vk
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * self.num_heads, num_fields, self.head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(num_fields, bsz, embed_dim).transpose(0, 1)
# attn_output = attn_output_weights.unsqueeze(-1) * inner_product # same shape with inner product
# assert list(attn_output.size()) == [bsz * self.num_heads, self.num_cross_terms, self.head_dim]
# attn_output = attn_output.transpose(0, 1).contiguous().view(self.num_cross_terms, bsz, self.embed_dim).transpose(0, 1) # [batch_size, num_cross_terms, embed_dim]
attn_output = self.output_layer(attn_output)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, self.num_heads, num_fields, num_fields)
return attn_output, attn_output_weights.sum(dim=0) / bsz
return attn_output, None
class FeaturesInteractionLayer(torch.nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, num_fields, embed_dim, num_heads, ffn_embed_dim, dropout, activation_fn='relu', normalize_before=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.ffn_embed_dim = ffn_embed_dim
self.normalize_before = normalize_before
self.self_attn = self.build_self_attention(num_fields, embed_dim, num_heads, dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(embed_dim)
self.dropout = dropout
self.activation_fn = get_activation_fn(
activation=activation_fn
)
self.activation_dropout = 0.0
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = self.dropout
self.fc1 = self.build_fc1(
embed_dim, ffn_embed_dim
)
self.fc2 = self.build_fc2(
ffn_embed_dim, embed_dim
)
self.final_layer_norm = torch.nn.LayerNorm(embed_dim)
def build_fc1(self, input_dim, output_dim):
return torch.nn.Linear(input_dim, output_dim)
def build_fc2(self, input_dim, output_dim):
return torch.nn.Linear(input_dim, output_dim)
def build_self_attention(self, num_fields, embed_dim, num_heads, dropout):
return MultiheadAttentionInnerProduct(
num_fields,
embed_dim,
num_heads,
dropout=dropout
)
def forward(self, x, memory, attn_mask=None):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where
T_tgt is the length of query, while T_src is the length of key,
though here both query and key is x here,
attn_mask[t_tgt, t_src] = 1 means when calculating embedding
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, y = self.self_attn(
x, memory, x, memory,
attn_mask=attn_mask
)
x = F.dropout(x, p=self.dropout, training=self.training)
# print(x.size(), y.size())
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
# residual = x
# if self.normalize_before:
# x = self.final_layer_norm(x)
# x = self.activation_fn(self.fc1(x))
# x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
# x = self.fc2(x)
# x = F.dropout(x, p=self.dropout, training=self.training)
# x = residual + x
# if not self.normalize_before:
# x = self.final_layer_norm(x)
return x, y
class CrossAttentionalProductNetwork(torch.nn.Module):
def __init__(self, num_fields, embed_dim, num_heads, ffn_embed_dim, num_layers, dropout, activation_fn='relu', normalize_before=True):
super().__init__()
self.layers = torch.nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(num_fields, embed_dim, num_heads, ffn_embed_dim, dropout, activation_fn, normalize_before) for i in range(num_layers)]
)
self.dropout = dropout
if normalize_before:
self.layer_norm = torch.nn.LayerNorm(embed_dim)
else:
self.layer_norm = None
self.fc = torch.nn.Linear(embed_dim, 1)
def build_encoder_layer(self, num_fields, embed_dim, num_heads, ffn_embed_dim, dropout, activation_fn, normalize_before):
return FeaturesInteractionLayer(num_fields, embed_dim, num_heads, ffn_embed_dim, dropout, activation_fn, normalize_before)
def forward(self, x, attn_mask=None):
# x shape: [batch_size, num_fields, embed_dim]
x0 = x
output = []
for layer in self.layers:
x, y = layer(x, x0, attn_mask)
output.append(y)
output = torch.cat(output, dim=1)
if self.layer_norm is not None:
x = self.layer_norm(x)
return self.fc(output)
class FeaturesInteractionDecoderLayer(torch.nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, num_fields, embed_dim, num_heads, ffn_embed_dim, dropout, activation_fn, normalize_before):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.ffn_embed_dim = ffn_embed_dim
self.normalize_before = normalize_before
# self.self_attn = self.build_self_attention(embed_dim, num_heads, dropout)
# self.self_attn_layer_norm = torch.nn.BatchNorm1d(num_fields)
self.cross_attn = self.build_cross_attention(embed_dim, num_heads, dropout)
self.cross_attn_layer_norm = torch.nn.LayerNorm(embed_dim)
self.dropout = dropout
self.activation_fn = get_activation_fn(
activation=activation_fn
)
self.activation_dropout = 0.0
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = self.dropout
self.fc1 = self.build_fc1(
embed_dim, ffn_embed_dim
)
self.fc2 = self.build_fc2(
ffn_embed_dim, embed_dim
)
self.final_layer_norm = torch.nn.LayerNorm(embed_dim)
def build_fc1(self, input_dim, output_dim):
return torch.nn.Linear(input_dim, output_dim)
def build_fc2(self, input_dim, output_dim):
return torch.nn.Linear(input_dim, output_dim)
# def build_self_attention(self, embed_dim, num_heads, dropout):
# return torch.nn.MultiheadAttention(
# embed_dim,
# num_heads,
# dropout=dropout
# )
def build_cross_attention(self, embed_dim, num_heads, dropout):
return torch.nn.MultiheadAttention(
embed_dim,
num_heads,
dropout=dropout
)
def forward(self, x, memory, attn_mask=None):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where
T_tgt is the length of query, while T_src is the length of key,
though here both query and key is x here,
attn_mask[t_tgt, t_src] = 1 means when calculating embedding
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# residual = x
# if self.normalize_before:
# x = self.self_attn_layer_norm(x)
# x, _ = self.self_attn(
# x, x, x,
# attn_mask=attn_mask
# )
# x = F.dropout(x, p=self.dropout, training=self.training)
# x = residual + x
# if not self.normalize_before:
# x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.cross_attn_layer_norm(x)
x, _ = self.cross_attn(
x, memory, memory,
attn_mask=attn_mask
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.cross_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
class CrossAttentionNetwork(torch.nn.Module):
def __init__(self, num_fields, embed_dim, num_heads, ffn_embed_dim, num_layers, dropout, activation_fn='relu', normalize_before=True):
super().__init__()
self.layers = torch.nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(num_fields, embed_dim, num_heads, ffn_embed_dim, dropout, activation_fn, normalize_before) for i in range(num_layers)]
)
self.dropout = dropout
if normalize_before:
self.layer_norm = torch.nn.LayerNorm(embed_dim)
else:
self.layer_norm = None
def build_encoder_layer(self, num_fields, embed_dim, num_heads, ffn_embed_dim, dropout, activation_fn, normalize_before):
return FeaturesInteractionDecoderLayer(num_fields, embed_dim, num_heads, ffn_embed_dim, dropout, activation_fn, normalize_before)
def forward(self, x, attn_mask=None):
# x shape: [batch_size, num_fields, embed_dim]
x0 = x
for layer in self.layers:
x = layer(x, x0, attn_mask)
if self.layer_norm is not None:
x = self.layer_norm(x)
return x | 14,427 | 40.45977 | 171 | py |
DCAP | DCAP-main/dataset/rapid.py | import math
import shutil
import struct
from collections import defaultdict
from functools import lru_cache
from pathlib import Path
import lmdb
import numpy as np
import torch.utils.data
from tqdm import tqdm
class RapidAdvanceDataset(torch.utils.data.Dataset):
"""
MovieLens 1M Dataset
Data preparation
treat samples with a rating less than 3 as negative samples
:param dataset_path: MovieLens dataset path
Reference:
https://grouplens.org/datasets/movielens
"""
def __init__(self, dataset_path):
super().__init__()
ratings_info = pd.read_csv(dataset_path+'/train.csv', sep='::', engine='python')
self.items = ratings_info.iloc[:, 1:] # -1 because ID begins from 1
self.targets = ratings_info.iloc[:, 0].to_numpy().astype(np.float32)
self.items['1_y'][self.items['1_y']=='M'] = 0
self.items['1_y'][self.items['1_y']=='F'] = 1
self.items = self.items.to_numpy().astype(np.int)
self.targets = self.__preprocess_target(self.targets).astype(np.float32)
self.field_dims = np.max(self.items, axis=0) + 1
self.user_field_idx = np.array((0, ), dtype=np.long)
self.item_field_idx = np.array((1,), dtype=np.long)
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
def __preprocess_target(self, target):
target[target <= 3] = 0
target[target > 3] = 1
return target
def __preprocess_items(self, items):
for i in range(15):
items.iloc[:, i] =
@lru_cache(maxsize=None)
def convert_numeric_feature(val: str):
if val == '':
return 'NULL'
v = int(val)
if v > 2:
return str(int(math.log(v) ** 2))
else:
return str(v - 2) | 1,866 | 27.287879 | 88 | py |
DCAP | DCAP-main/dataset/avazu.py | import shutil
import struct
from collections import defaultdict
from pathlib import Path
import lmdb
import numpy as np
import torch.utils.data
from tqdm import tqdm
class AvazuDataset(torch.utils.data.Dataset):
"""
Avazu Click-Through Rate Prediction Dataset
Dataset preparation
Remove the infrequent features (appearing in less than threshold instances) and treat them as a single feature
:param dataset_path: avazu train path
:param cache_path: lmdb cache path
:param rebuild_cache: If True, lmdb cache is refreshed
:param min_threshold: infrequent feature threshold
Reference
https://www.kaggle.com/c/avazu-ctr-prediction
"""
def __init__(self, dataset_path=None, cache_path='.avazu', rebuild_cache=False, min_threshold=4):
self.NUM_FEATS = 22
self.min_threshold = min_threshold
if rebuild_cache or not Path(cache_path).exists():
shutil.rmtree(cache_path, ignore_errors=True)
if dataset_path is None:
raise ValueError('create cache: failed: dataset_path is None')
self.__build_cache(dataset_path, cache_path)
self.env = lmdb.open(cache_path, create=False, lock=False, readonly=True)
with self.env.begin(write=False) as txn:
self.length = txn.stat()['entries'] - 1
self.field_dims = np.frombuffer(txn.get(b'field_dims'), dtype=np.uint32)
def __getitem__(self, index):
with self.env.begin(write=False) as txn:
np_array = np.frombuffer(
txn.get(struct.pack('>I', index)), dtype=np.uint32).astype(dtype=np.long)
return np_array[1:], np_array[0]
def __len__(self):
return int(self.length)
def __build_cache(self, path, cache_path):
feat_mapper, defaults = self.__get_feat_mapper(path)
with lmdb.open(cache_path, map_size=int(1e11)) as env:
field_dims = np.zeros(self.NUM_FEATS, dtype=np.uint32)
for i, fm in feat_mapper.items():
field_dims[i - 1] = len(fm) + 1
with env.begin(write=True) as txn:
txn.put(b'field_dims', field_dims.tobytes())
for buffer in self.__yield_buffer(path, feat_mapper, defaults):
with env.begin(write=True) as txn:
for key, value in buffer:
txn.put(key, value)
def __get_feat_mapper(self, path):
feat_cnts = defaultdict(lambda: defaultdict(int))
with open(path) as f:
f.readline()
pbar = tqdm(f, mininterval=1, smoothing=0.1)
pbar.set_description('Create avazu dataset cache: counting features')
for line in pbar:
values = line.rstrip('\n').split(',')
if len(values) != self.NUM_FEATS + 2:
continue
for i in range(1, self.NUM_FEATS + 1):
feat_cnts[i][values[i + 1]] += 1
feat_mapper = {i: {feat for feat, c in cnt.items() if c >= self.min_threshold} for i, cnt in feat_cnts.items()}
feat_mapper = {i: {feat: idx for idx, feat in enumerate(cnt)} for i, cnt in feat_mapper.items()}
defaults = {i: len(cnt) for i, cnt in feat_mapper.items()}
return feat_mapper, defaults
def __yield_buffer(self, path, feat_mapper, defaults, buffer_size=int(1e5)):
item_idx = 0
buffer = list()
with open(path) as f:
f.readline()
pbar = tqdm(f, mininterval=1, smoothing=0.1)
pbar.set_description('Create avazu dataset cache: setup lmdb')
for line in pbar:
values = line.rstrip('\n').split(',')
if len(values) != self.NUM_FEATS + 2:
continue
np_array = np.zeros(self.NUM_FEATS + 1, dtype=np.uint32)
np_array[0] = int(values[1])
for i in range(1, self.NUM_FEATS + 1):
np_array[i] = feat_mapper[i].get(values[i+1], defaults[i])
buffer.append((struct.pack('>I', item_idx), np_array.tobytes()))
item_idx += 1
if item_idx % buffer_size == 0:
yield buffer
buffer.clear()
yield buffer
| 4,268 | 41.267327 | 119 | py |
DCAP | DCAP-main/dataset/frappe.py | import numpy as np
import pandas as pd
import torch.utils.data
class FrappeDataset(torch.utils.data.Dataset):
"""
Frappe Dataset
Data preparation
treat apps with a rating less than 3 as negative samples
:param dataset_path: frappe dataset path
Reference:
https://?
"""
def __init__(self, dataset_path):
super().__init__()
df = pd.read_csv(dataset_path+'/frappe.csv', sep="\t")
print("In the frappe data set we have {} entries by {} users for {} apps".format(len(df), len(df.user.unique()), len(df.item.unique())))
meta_app = pd.read_csv(dataset_path+'/meta.csv', sep="\t")
df = df.merge(meta_app, on='item')
df = df[df["rating"]!='unknown']
self.targets = df["rating"].to_numpy().astype(np.float32)
self.FEATS = ["user", "item", "daytime", "weekday", "isweekend", "homework", "cost", "weather", "country", "city"]
df = df[self.FEATS]
self.items = self.__preprocess_items(df)
self.items = self.items.to_numpy().astype(np.int)
self.targets = self.__preprocess_target(self.targets).astype(np.float32)
self.field_dims = np.max(self.items, axis=0) + 1
self.user_field_idx = np.array((0, ), dtype=np.long)
self.item_field_idx = np.array((1,), dtype=np.long)
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
def __preprocess_target(self, target):
target[target <= 3] = 0
target[target > 3] = 1
return target
def __preprocess_items(self, df):
for feature in ["daytime", "weekday", "isweekend", "homework", "cost", "weather", "country"]:
df[feature] = pd.factorize(df[feature])[0]
return df
| 1,833 | 33.603774 | 144 | py |
DCAP | DCAP-main/dataset/criteo.py | import math
import shutil
import struct
from collections import defaultdict
from functools import lru_cache
from pathlib import Path
import lmdb
import numpy as np
import torch.utils.data
from tqdm import tqdm
class CriteoDataset(torch.utils.data.Dataset):
"""
Criteo Display Advertising Challenge Dataset
Data prepration:
* Remove the infrequent features (appearing in less than threshold instances) and treat them as a single feature
* Discretize numerical values by log2 transformation which is proposed by the winner of Criteo Competition
:param dataset_path: criteo train.txt path.
:param cache_path: lmdb cache path.
:param rebuild_cache: If True, lmdb cache is refreshed.
:param min_threshold: infrequent feature threshold.
Reference:
https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
https://www.csie.ntu.edu.tw/~r01922136/kaggle-2014-criteo.pdf
"""
def __init__(self, dataset_path=None, cache_path='.criteo', rebuild_cache=False, min_threshold=10):
self.NUM_FEATS = 39
self.NUM_INT_FEATS = 13
self.min_threshold = min_threshold
if rebuild_cache or not Path(cache_path).exists():
shutil.rmtree(cache_path, ignore_errors=True)
if dataset_path is None:
raise ValueError('create cache: failed: dataset_path is None')
self.__build_cache(dataset_path, cache_path)
self.env = lmdb.open(cache_path, create=False, lock=False, readonly=True)
with self.env.begin(write=False) as txn:
self.length = txn.stat()['entries'] - 1
self.field_dims = np.frombuffer(txn.get(b'field_dims'), dtype=np.uint32)
def __getitem__(self, index):
with self.env.begin(write=False) as txn:
np_array = np.frombuffer(
txn.get(struct.pack('>I', index)), dtype=np.uint32).astype(dtype=np.long)
return np_array[1:], np_array[0]
def __len__(self):
return int(self.length)
def __build_cache(self, path, cache_path):
feat_mapper, defaults = self.__get_feat_mapper(path)
with lmdb.open(cache_path, map_size=int(1e11)) as env:
field_dims = np.zeros(self.NUM_FEATS, dtype=np.uint32)
for i, fm in feat_mapper.items():
field_dims[i - 1] = len(fm) + 1
with env.begin(write=True) as txn:
txn.put(b'field_dims', field_dims.tobytes())
for buffer in self.__yield_buffer(path, feat_mapper, defaults):
with env.begin(write=True) as txn:
for key, value in buffer:
txn.put(key, value)
def __get_feat_mapper(self, path):
feat_cnts = defaultdict(lambda: defaultdict(int))
with open(path) as f:
pbar = tqdm(f, mininterval=1, smoothing=0.1)
pbar.set_description('Create criteo dataset cache: counting features')
for line in pbar:
values = line.rstrip('\n').split('\t')
if len(values) != self.NUM_FEATS + 1:
continue
for i in range(1, self.NUM_INT_FEATS + 1):
feat_cnts[i][convert_numeric_feature(values[i])] += 1
for i in range(self.NUM_INT_FEATS + 1, self.NUM_FEATS + 1):
feat_cnts[i][values[i]] += 1
feat_mapper = {i: {feat for feat, c in cnt.items() if c >= self.min_threshold} for i, cnt in feat_cnts.items()}
feat_mapper = {i: {feat: idx for idx, feat in enumerate(cnt)} for i, cnt in feat_mapper.items()}
defaults = {i: len(cnt) for i, cnt in feat_mapper.items()}
return feat_mapper, defaults
def __yield_buffer(self, path, feat_mapper, defaults, buffer_size=int(1e5)):
item_idx = 0
buffer = list()
with open(path) as f:
pbar = tqdm(f, mininterval=1, smoothing=0.1)
pbar.set_description('Create criteo dataset cache: setup lmdb')
for line in pbar:
values = line.rstrip('\n').split('\t')
if len(values) != self.NUM_FEATS + 1:
continue
np_array = np.zeros(self.NUM_FEATS + 1, dtype=np.uint32)
np_array[0] = int(values[0])
for i in range(1, self.NUM_INT_FEATS + 1):
np_array[i] = feat_mapper[i].get(convert_numeric_feature(values[i]), defaults[i])
for i in range(self.NUM_INT_FEATS + 1, self.NUM_FEATS + 1):
np_array[i] = feat_mapper[i].get(values[i], defaults[i])
buffer.append((struct.pack('>I', item_idx), np_array.tobytes()))
item_idx += 1
if item_idx % buffer_size == 0:
yield buffer
buffer.clear()
yield buffer
@lru_cache(maxsize=None)
def convert_numeric_feature(val: str):
if val == '':
return 'NULL'
v = int(val)
if v > 2:
return str(int(math.log(v) ** 2))
else:
return str(v - 2)
| 5,072 | 41.630252 | 120 | py |
DCAP | DCAP-main/dataset/movielens.py | import numpy as np
import pandas as pd
import torch.utils.data
class MovieLens20MDataset(torch.utils.data.Dataset):
"""
MovieLens 20M Dataset
Data preparation
treat samples with a rating less than 3 as negative samples
:param dataset_path: MovieLens dataset path
Reference:
https://grouplens.org/datasets/movielens
"""
def __init__(self, dataset_path, sep=',', engine='c', header='infer'):
data = pd.read_csv(dataset_path, sep=sep, engine=engine, header=header).to_numpy()[:, :3]
self.items = data[:, :2].astype(np.int) - 1 # -1 because ID begins from 1
self.targets = self.__preprocess_target(data[:, 2]).astype(np.float32)
self.field_dims = np.max(self.items, axis=0) + 1
self.user_field_idx = np.array((0, ), dtype=np.long)
self.item_field_idx = np.array((1,), dtype=np.long)
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
def __preprocess_target(self, target):
target[target <= 3] = 0
target[target > 3] = 1
return target
class MovieLens1MDataset(torch.utils.data.Dataset):
"""
MovieLens 1M Dataset
Data preparation
treat samples with a rating less than 3 as negative samples
:param dataset_path: MovieLens dataset path
Reference:
https://grouplens.org/datasets/movielens
"""
def __init__(self, dataset_path):
super().__init__()
ratings_info = pd.read_csv(dataset_path+'/ratings.dat', sep='::', engine='python', header=None)
self.items = ratings_info.iloc[:, :2] # -1 because ID begins from 1
self.targets = ratings_info.iloc[:, 2].to_numpy()
users_info = pd.read_csv(dataset_path+'/users.dat', sep='::', engine='python', header=None)
self.items = self.items.merge(users_info.iloc[:, :4], on=0)
self.items.iloc[:, :2] -= 1
self.items['1_y'][self.items['1_y']=='M'] = 0
self.items['1_y'][self.items['1_y']=='F'] = 1
self.items = self.items.to_numpy().astype(np.int)
self.targets = self.__preprocess_target(self.targets).astype(np.float32)
self.field_dims = np.max(self.items, axis=0) + 1
self.user_field_idx = np.array((0, ), dtype=np.long)
self.item_field_idx = np.array((1,), dtype=np.long)
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
def __preprocess_target(self, target):
target[target <= 3] = 0
target[target > 3] = 1
return target
| 2,695 | 32.7 | 103 | py |
DCAP | DCAP-main/model/dcn.py | import torch
from torchfm.layer import FeaturesEmbedding, CrossNetwork, MultiLayerPerceptron
class DeepCrossNetworkModel(torch.nn.Module):
"""
A pytorch implementation of Deep & Cross Network.
Reference:
R Wang, et al. Deep & Cross Network for Ad Click Predictions, 2017.
"""
def __init__(self, field_dims, embed_dim, num_layers, mlp_dims, dropout):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.cn = CrossNetwork(self.embed_output_dim, num_layers)
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=False)
self.linear = torch.nn.Linear(mlp_dims[-1] + self.embed_output_dim, 1)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x).view(-1, self.embed_output_dim)
x_l1 = self.cn(embed_x)
h_l2 = self.mlp(embed_x)
x_stack = torch.cat([x_l1, h_l2], dim=1)
p = self.linear(x_stack)
return torch.sigmoid(p.squeeze(1))
| 1,159 | 35.25 | 101 | py |
DCAP | DCAP-main/model/fnn.py | import torch
from torchfm.layer import FeaturesEmbedding, MultiLayerPerceptron
class FactorizationSupportedNeuralNetworkModel(torch.nn.Module):
"""
A pytorch implementation of Neural Factorization Machine.
Reference:
W Zhang, et al. Deep Learning over Multi-field Categorical Data - A Case Study on User Response Prediction, 2016.
"""
def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
x = self.mlp(embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
| 924 | 33.259259 | 121 | py |
DCAP | DCAP-main/model/ffm.py | import torch
from torchfm.layer import FeaturesLinear, FieldAwareFactorizationMachine
class FieldAwareFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of Field-aware Factorization Machine.
Reference:
Y Juan, et al. Field-aware Factorization Machines for CTR Prediction, 2015.
"""
def __init__(self, field_dims, embed_dim):
super().__init__()
self.linear = FeaturesLinear(field_dims)
self.ffm = FieldAwareFactorizationMachine(field_dims, embed_dim)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
ffm_term = torch.sum(torch.sum(self.ffm(x), dim=1), dim=1, keepdim=True)
x = self.linear(x) + ffm_term
return torch.sigmoid(x.squeeze(1))
| 809 | 30.153846 | 83 | py |
DCAP | DCAP-main/model/wd.py | import torch
from torchfm.layer import FeaturesLinear, MultiLayerPerceptron, FeaturesEmbedding
class WideAndDeepModel(torch.nn.Module):
"""
A pytorch implementation of wide and deep learning.
Reference:
HT Cheng, et al. Wide & Deep Learning for Recommender Systems, 2016.
"""
def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
super().__init__()
self.linear = FeaturesLinear(field_dims)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
x = self.linear(x) + self.mlp(embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
| 931 | 32.285714 | 81 | py |
DCAP | DCAP-main/model/ncf.py | import torch
from torchfm.layer import FeaturesEmbedding, MultiLayerPerceptron
class NeuralCollaborativeFiltering(torch.nn.Module):
"""
A pytorch implementation of Neural Collaborative Filtering.
Reference:
X He, et al. Neural Collaborative Filtering, 2017.
"""
def __init__(self, field_dims, user_field_idx, item_field_idx, embed_dim, mlp_dims, dropout):
super().__init__()
self.user_field_idx = user_field_idx
self.item_field_idx = item_field_idx
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=False)
self.fc = torch.nn.Linear(mlp_dims[-1] + embed_dim, 1)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_user_fields)``
"""
x = self.embedding(x)
user_x = x[:, self.user_field_idx].squeeze(1)
item_x = x[:, self.item_field_idx].squeeze(1)
x = self.mlp(x.view(-1, self.embed_output_dim))
gmf = user_x * item_x
x = torch.cat([gmf, x], dim=1)
x = self.fc(x).squeeze(1)
return torch.sigmoid(x)
| 1,248 | 35.735294 | 101 | py |
DCAP | DCAP-main/model/dcan.py | import torch
from torchfm.layer import (
FeaturesEmbedding,
FeaturesLinear,
MultiLayerPerceptron
)
from torchfm.attention_layer import CrossAttentionNetwork
class DeepCrossAttentionalNetworkModel(torch.nn.Module):
"""
A pytorch implementation of Multihead Attention Factorization Machine Model.
Reference: on going
"""
def __init__(self, field_dims, embed_dim, attn_embed_dim, num_heads, ffn_embed_dim, num_layers, mlp_dims, dropout):
super().__init__()
self.num_fields = len(field_dims)
self.embed_output_dim = len(field_dims) * embed_dim
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.attn_embedding = torch.nn.Linear(embed_dim, attn_embed_dim)
self.attn_output_dim = len(field_dims) * attn_embed_dim
self.linear = FeaturesLinear(field_dims)
self.can = CrossAttentionNetwork(self.num_fields, attn_embed_dim, num_heads, ffn_embed_dim, num_layers, dropout)
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=True)
self.fc = torch.nn.Linear(len(field_dims) * attn_embed_dim, 1)
self._reset_parameters()
def generate_square_subsequent_mask(self, num_fields):
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(num_fields, num_fields)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_uniform_(p)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
# device = x.device
# attn_mask = self.generate_square_subsequent_mask(x.size(1)).to(device)
embed_x = self.embedding(x)
attn_embed_x = self.attn_embedding(embed_x)
cross_term = attn_embed_x.transpose(0, 1)
x_l1 = self.can(cross_term, attn_mask=None)
h_l2 = self.mlp(embed_x.view(-1, self.embed_output_dim))
x_l1 = x_l1.contiguous().view(-1, self.attn_output_dim)
x = self.linear(x) + self.fc(x_l1) + h_l2
return torch.sigmoid(x.squeeze(1))
| 2,471 | 40.2 | 120 | py |
DCAP | DCAP-main/model/afn.py | import math
import torch
import torch.nn.functional as F
from torchfm.layer import FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
class LNN(torch.nn.Module):
"""
A pytorch implementation of LNN layer
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``.
Arguments
- **in_features** : Embedding of feature.
- **num_fields**: int.The field size of feature.
- **LNN_dim**: int.The number of Logarithmic neuron.
- **bias**: bool.Whether or not use bias in LNN.
"""
def __init__(self, num_fields, embed_dim, LNN_dim, bias=False):
super(LNN, self).__init__()
self.num_fields = num_fields
self.embed_dim = embed_dim
self.LNN_dim = LNN_dim
self.lnn_output_dim = LNN_dim * embed_dim
self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields, embedding_size)``
"""
embed_x_abs = torch.abs(x) # Computes the element-wise absolute value of the given input tensor.
embed_x_afn = torch.add(embed_x_abs, 1e-7)
# Logarithmic Transformation
embed_x_log = torch.log1p(embed_x_afn) # torch.log1p and torch.expm1
lnn_out = torch.matmul(self.weight, embed_x_log)
if self.bias is not None:
lnn_out += self.bias
lnn_exp = torch.expm1(lnn_out)
output = F.relu(lnn_exp).contiguous().view(-1, self.lnn_output_dim)
return output
class AdaptiveFactorizationNetwork(torch.nn.Module):
"""
A pytorch implementation of AFN.
Reference:
Cheng W, et al. Adaptive Factorization Network: Learning Adaptive-Order Feature Interactions, 2019.
"""
def __init__(self, field_dims, embed_dim, LNN_dim, mlp_dims, dropouts):
super().__init__()
self.num_fields = len(field_dims)
self.linear = FeaturesLinear(field_dims) # Linear
self.embedding = FeaturesEmbedding(field_dims, embed_dim) # Embedding
self.LNN_dim = LNN_dim
self.LNN_output_dim = self.LNN_dim * embed_dim
self.LNN = LNN(self.num_fields, embed_dim, LNN_dim)
self.mlp = MultiLayerPerceptron(self.LNN_output_dim, mlp_dims, dropouts[0])
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
lnn_out = self.LNN(embed_x)
x = self.linear(x) + self.mlp(lnn_out)
return torch.sigmoid(x.squeeze(1))
| 3,088 | 35.341176 | 107 | py |
DCAP | DCAP-main/model/fnfm.py | import torch
from torchfm.layer import FieldAwareFactorizationMachine, MultiLayerPerceptron, FeaturesLinear
class FieldAwareNeuralFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of Field-aware Neural Factorization Machine.
Reference:
L Zhang, et al. Field-aware Neural Factorization Machine for Click-Through Rate Prediction, 2019.
"""
def __init__(self, field_dims, embed_dim, mlp_dims, dropouts):
super().__init__()
self.linear = FeaturesLinear(field_dims)
self.ffm = FieldAwareFactorizationMachine(field_dims, embed_dim)
self.ffm_output_dim = len(field_dims) * (len(field_dims) - 1) // 2 * embed_dim
self.bn = torch.nn.BatchNorm1d(self.ffm_output_dim)
self.dropout = torch.nn.Dropout(dropouts[0])
self.mlp = MultiLayerPerceptron(self.ffm_output_dim, mlp_dims, dropouts[1])
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
cross_term = self.ffm(x).view(-1, self.ffm_output_dim)
cross_term = self.bn(cross_term)
cross_term = self.dropout(cross_term)
x = self.linear(x) + self.mlp(cross_term)
return torch.sigmoid(x.squeeze(1))
| 1,251 | 38.125 | 105 | py |
DCAP | DCAP-main/model/dcap.py | import torch
from torchfm.layer import FeaturesEmbedding, FeaturesLinear, CrossAttentionalProductNetwork, MultiLayerPerceptron
class DeepCrossAttentionalProductNetwork(torch.nn.Module):
"""
A pytorch implementation of inner/outer Product Neural Network.
Reference:
Y Qu, et al. Product-based Neural Networks for User Response Prediction, 2016.
"""
def __init__(self, field_dims, embed_dim, num_heads, num_layers, mlp_dims, dropouts):
super().__init__()
num_fields = len(field_dims)
self.cap = CrossAttentionalProductNetwork(num_fields, embed_dim, num_heads, num_layers, dropouts[0])
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.num_layers = num_layers
# self.linear = FeaturesLinear(field_dims)
self.embed_output_dim = num_fields * embed_dim
self.attn_output_dim = num_layers * num_fields * (num_fields - 1) // 2
# self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=True)
# self.linear = torch.nn.Linear(mlp_dims[-1] + num_layers * (num_fields + 1) * num_fields // 2, 1)
self.mlp = MultiLayerPerceptron(self.attn_output_dim + self.embed_output_dim, mlp_dims, dropouts[1])
# self._reset_parameters()
def generate_square_subsequent_mask(self, num_fields):
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(num_fields, num_fields)) == 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
# def _reset_parameters(self):
# r"""Initiate parameters in the transformer model."""
# for p in self.parameters():
# if p.dim() > 1:
# torch.nn.init.xavier_uniform_(p)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
device = x.device
attn_mask = self.generate_square_subsequent_mask(x.size(1)).to(device)
embed_x = self.embedding(x)
cross_term = self.cap(embed_x, attn_mask)
# y = self.mlp_attn(cross_term.view(-1, self.attn_output_dim))
# x = y + self.mlp(embed_x.view(-1, self.embed_output_dim))
# y = self.mlp(embed_x.view(-1, self.embed_output_dim))
y = torch.cat([embed_x.view(-1, self.embed_output_dim), cross_term], dim=1)
# y = torch.cat([embed_x.view(-1, self.embed_output_dim), cross_term], dim=1)
x = self.mlp(y)
# y = torch.cat([cross_term, y], dim=1)
# x = self.linear(y)
# x = self.mlp(embed_x.view(-1, self.embed_output_dim)) + torch.sum(cross_term, dim=1, keepdim=True)
# print(x.size())
return torch.sigmoid(x.squeeze(1))
| 2,887 | 46.344262 | 113 | py |
DCAP | DCAP-main/model/afi.py | import torch
import torch.nn.functional as F
from torchfm.layer import FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
class AutomaticFeatureInteractionModel(torch.nn.Module):
"""
A pytorch implementation of AutoInt.
Reference:
W Song, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks, 2018.
"""
def __init__(self, field_dims, embed_dim, atten_embed_dim, num_heads, num_layers, mlp_dims, dropouts, has_residual=True):
super().__init__()
self.num_fields = len(field_dims)
self.linear = FeaturesLinear(field_dims)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.atten_embedding = torch.nn.Linear(embed_dim, atten_embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.atten_output_dim = len(field_dims) * atten_embed_dim
self.has_residual = has_residual
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropouts[1])
self.self_attns = torch.nn.ModuleList([
torch.nn.MultiheadAttention(atten_embed_dim, num_heads, dropout=dropouts[0]) for _ in range(num_layers)
])
self.attn_fc = torch.nn.Linear(self.atten_output_dim, 1)
if self.has_residual:
self.V_res_embedding = torch.nn.Linear(embed_dim, atten_embed_dim)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
atten_x = self.atten_embedding(embed_x)
# x0 = atten_x.transpose(0, 1)
cross_term = atten_x.transpose(0, 1)
for self_attn in self.self_attns:
cross_term, _ = self_attn(cross_term, cross_term, cross_term)
cross_term = cross_term.transpose(0, 1)
if self.has_residual:
V_res = self.V_res_embedding(embed_x)
cross_term += V_res
cross_term = F.relu(cross_term).contiguous().view(-1, self.atten_output_dim)
x = self.linear(x) + self.attn_fc(cross_term) + self.mlp(embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
| 2,157 | 43.040816 | 125 | py |
DCAP | DCAP-main/model/nfm.py | import torch
from torchfm.layer import FactorizationMachine, FeaturesEmbedding, MultiLayerPerceptron, FeaturesLinear
class NeuralFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of Neural Factorization Machine.
Reference:
X He and TS Chua, Neural Factorization Machines for Sparse Predictive Analytics, 2017.
"""
def __init__(self, field_dims, embed_dim, mlp_dims, dropouts):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.linear = FeaturesLinear(field_dims)
self.fm = torch.nn.Sequential(
FactorizationMachine(reduce_sum=False),
torch.nn.BatchNorm1d(embed_dim),
torch.nn.Dropout(dropouts[0])
)
self.mlp = MultiLayerPerceptron(embed_dim, mlp_dims, dropouts[1])
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
cross_term = self.fm(self.embedding(x))
x = self.linear(x) + self.mlp(cross_term)
return torch.sigmoid(x.squeeze(1))
| 1,096 | 33.28125 | 103 | py |
DCAP | DCAP-main/model/hofm.py | import torch
from torchfm.layer import FeaturesLinear, FactorizationMachine, AnovaKernel, FeaturesEmbedding
class HighOrderFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of Higher-Order Factorization Machines.
Reference:
M Blondel, et al. Higher-Order Factorization Machines, 2016.
"""
def __init__(self, field_dims, order, embed_dim):
super().__init__()
if order < 1:
raise ValueError(f'invalid order: {order}')
self.order = order
self.embed_dim = embed_dim
self.linear = FeaturesLinear(field_dims)
if order >= 2:
self.embedding = FeaturesEmbedding(field_dims, embed_dim * (order - 1))
self.fm = FactorizationMachine(reduce_sum=True)
if order >= 3:
self.kernels = torch.nn.ModuleList([
AnovaKernel(order=i, reduce_sum=True) for i in range(3, order + 1)
])
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
y = self.linear(x).squeeze(1)
if self.order >= 2:
x = self.embedding(x)
x_part = x[:, :, :self.embed_dim]
y += self.fm(x_part).squeeze(1)
for i in range(self.order - 2):
x_part = x[:, :, (i + 1) * self.embed_dim: (i + 2) * self.embed_dim]
y += self.kernels[i](x_part).squeeze(1)
return torch.sigmoid(y)
| 1,473 | 34.095238 | 94 | py |
DCAP | DCAP-main/model/pnn.py | import torch
from torchfm.layer import FeaturesEmbedding, FeaturesLinear, InnerProductNetwork, \
OuterProductNetwork, MultiLayerPerceptron
class ProductNeuralNetworkModel(torch.nn.Module):
"""
A pytorch implementation of inner/outer Product Neural Network.
Reference:
Y Qu, et al. Product-based Neural Networks for User Response Prediction, 2016.
"""
def __init__(self, field_dims, embed_dim, mlp_dims, dropout, method='inner'):
super().__init__()
num_fields = len(field_dims)
if method == 'inner':
self.pn = InnerProductNetwork()
elif method == 'outer':
self.pn = OuterProductNetwork(num_fields, embed_dim)
else:
raise ValueError('unknown product type: ' + method)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.linear = FeaturesLinear(field_dims, embed_dim)
self.embed_output_dim = num_fields * embed_dim
self.mlp = MultiLayerPerceptron(num_fields * (num_fields - 1) // 2 + self.embed_output_dim, mlp_dims, dropout)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
cross_term = self.pn(embed_x)
x = torch.cat([embed_x.view(-1, self.embed_output_dim), cross_term], dim=1)
x = self.mlp(x)
return torch.sigmoid(x.squeeze(1))
| 1,421 | 37.432432 | 118 | py |
DCAP | DCAP-main/model/mhafm.py | import torch
from torchfm.layer import FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
from torchfm.attention_layer import CrossAttentionalProductNetwork
class MultiheadAttentionalFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of Multihead Attention Factorization Machine Model.
Reference: on going
"""
def __init__(self, field_dims, embed_dim, attn_embed_dim, num_heads, ffn_embed_dim, num_layers, mlp_dims, dropout):
super().__init__()
self.num_fields = len(field_dims)
self.embed_output_dim = len(field_dims) * embed_dim
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
# self.attn_embedding = torch.nn.Linear(embed_dim, attn_embed_dim)
self.linear = FeaturesLinear(field_dims)
self.mhafm = CrossAttentionalProductNetwork(self.num_fields, embed_dim, num_heads, ffn_embed_dim, num_layers, dropout)
self.mlp = MultiLayerPerceptron(num_layers * self.num_fields * (self.num_fields + 1) // 2 + self.embed_output_dim, mlp_dims, dropout)
# self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
self._reset_parameters()
def generate_square_subsequent_mask(self, num_fields):
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(num_fields, num_fields)) == 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_uniform_(p)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
device = x.device
attn_mask = self.generate_square_subsequent_mask(x.size(1)).to(device)
embed_x = self.embedding(x)
# attn_x = self.attn_embedding(embed_x)
cross_term = self.mhafm(embed_x, attn_mask)
cross_term = torch.cat([embed_x.view(-1, self.embed_output_dim), cross_term.squeeze(-1)], dim=1)
# x = self.linear(x) + self.mhafm(attn_x, attn_mask) + self.mlp(embed_x.view(-1, self.embed_output_dim))
x = self.linear(x) + self.mlp(cross_term)
return torch.sigmoid(x.squeeze(1))
| 2,488 | 45.092593 | 141 | py |
DCAP | DCAP-main/model/dfm.py | import torch
from torchfm.layer import FactorizationMachine, FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
class DeepFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of DeepFM.
Reference:
H Guo, et al. DeepFM: A Factorization-Machine based Neural Network for CTR Prediction, 2017.
"""
def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
super().__init__()
self.linear = FeaturesLinear(field_dims)
self.fm = FactorizationMachine(reduce_sum=True)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
x = self.linear(x) + self.fm(embed_x) + self.mlp(embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
| 1,049 | 35.206897 | 103 | py |
DCAP | DCAP-main/model/lr.py | import torch
from torchfm.layer import FeaturesLinear
class LogisticRegressionModel(torch.nn.Module):
"""
A pytorch implementation of Logistic Regression.
"""
def __init__(self, field_dims):
super().__init__()
self.linear = FeaturesLinear(field_dims)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
return torch.sigmoid(self.linear(x).squeeze(1))
| 461 | 22.1 | 66 | py |
DCAP | DCAP-main/model/xdfm.py | import torch
from torchfm.layer import CompressedInteractionNetwork, FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
class ExtremeDeepFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of xDeepFM.
Reference:
J Lian, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems, 2018.
"""
def __init__(self, field_dims, embed_dim, mlp_dims, dropout, cross_layer_sizes, split_half=True):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.cin = CompressedInteractionNetwork(len(field_dims), cross_layer_sizes, split_half)
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
self.linear = FeaturesLinear(field_dims)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
x = self.linear(x) + self.cin(embed_x) + self.mlp(embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
| 1,157 | 38.931034 | 115 | py |
DCAP | DCAP-main/model/fm.py | import torch
from torchfm.layer import FactorizationMachine, FeaturesEmbedding, FeaturesLinear
class FactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of Factorization Machine.
Reference:
S Rendle, Factorization Machines, 2010.
"""
def __init__(self, field_dims, embed_dim):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.linear = FeaturesLinear(field_dims)
self.fm = FactorizationMachine(reduce_sum=True)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = self.linear(x) + self.fm(self.embedding(x))
return torch.sigmoid(x.squeeze(1))
| 746 | 27.730769 | 81 | py |
DCAP | DCAP-main/model/afm.py | import torch
from torchfm.layer import FeaturesEmbedding, FeaturesLinear, AttentionalFactorizationMachine
class AttentionalFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of Attentional Factorization Machine.
Reference:
J Xiao, et al. Attentional Factorization Machines: Learning the Weight of Feature Interactions via Attention Networks, 2017.
"""
def __init__(self, field_dims, embed_dim, attn_size, dropouts):
super().__init__()
self.num_fields = len(field_dims)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.linear = FeaturesLinear(field_dims)
self.afm = AttentionalFactorizationMachine(embed_dim, attn_size, dropouts)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = self.linear(x) + self.afm(self.embedding(x))
return torch.sigmoid(x.squeeze(1))
| 956 | 34.444444 | 132 | py |
CropRowDetection | CropRowDetection-main/unet-rgbd/dataRGB.py | # -*- coding:utf-8 -*-
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
import glob
class dataProcess(object):
def __init__(self, out_rows, out_cols, data_path="./data/train/image", label_path="./data/train/label",
test_path="./data/test/image", testlabel_path="./data/test/label", npy_path="./npydata", img_type="jpg"):
self.out_rows = out_rows
self.out_cols = out_cols
self.data_path = data_path
self.label_path = label_path
self.img_type = img_type
self.test_path = test_path
self.testlabel_path = testlabel_path
self.npy_path = npy_path
def create_train_data(self):
i = 0
print('Creating training images...')
imgs = glob.glob(self.data_path+"/*."+self.img_type)
imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 3), dtype=np.uint8)
imglabels = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
for x in range(len(imgs)):
imgpath = imgs[x]
pic_name = imgpath.split('/')[-1]
labelpath = self.label_path + '/' + pic_name
img = load_img(imgpath, color_mode='rgb', target_size=[512, 512])
label = load_img(labelpath, color_mode='grayscale', target_size=[512, 512])
img = img_to_array(img)
label = img_to_array(label)
imgdatas[i] = img
imglabels[i] = label
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, len(imgs)))
i += 1
print('loading done')
np.save(self.npy_path + '/imgs_train.npy', imgdatas)
np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
print('Saving to .npy files done.')
def create_test_all(self):
i = 0
print('Creating testall images...')
imgs = glob.glob(self.test_path+"/*."+self.img_type)
imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 3), dtype=np.uint8)
imglabels = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
for x in range(len(imgs)):
imgpath = imgs[x]
pic_name = imgpath.split('/')[-1]
labelpath = self.testlabel_path + '/' + pic_name
img = load_img(imgpath, color_mode='rgb', target_size=[512, 512])
label = load_img(labelpath, color_mode='grayscale', target_size=[512, 512])
img = img_to_array(img)
label = img_to_array(label)
imgdatas[i] = img
imglabels[i] = label
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, len(imgs)))
i += 1
print('loading done')
np.save(self.npy_path + '/imgs_test.npy', imgdatas)
np.save(self.npy_path + '/imgs_mask_test.npy', imglabels)
print('Saving to .npy files done.')
def create_test_data(self):
i = 0
print('Creating test images...')
imgs = glob.glob(self.test_path + "/*." + self.img_type)
imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 3), dtype=np.uint8)
testpathlist = []
for imgname in imgs:
testpath = imgname
testpathlist.append(testpath)
img = load_img(testpath, color_mode='rgb', target_size=[512, 512])
img = img_to_array(img)
imgdatas[i] = img
i += 1
txtname = './results/pic.txt'
with open(txtname, 'w') as f:
for i in range(len(testpathlist)):
f.writelines(testpathlist[i] + '\n')
print('loading done')
np.save(self.npy_path + '/imgs_test.npy', imgdatas)
print('Saving to imgs_test.npy files done.')
def load_train_data(self):
print('load train images...')
imgs_train = np.load(self.npy_path + "/imgs_train.npy")
imgs_mask_train = np.load(self.npy_path + "/imgs_mask_train.npy")
imgs_train = imgs_train.astype('float32')
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_train /= 255
imgs_mask_train /= 255
imgs_mask_train[imgs_mask_train > 0.5] = 1 # 白
imgs_mask_train[imgs_mask_train <= 0.5] = 0 # 黑
return imgs_train, imgs_mask_train
def load_test_data(self):
print('-' * 30)
print('load test images...')
print('-' * 30)
imgs_test = np.load(self.npy_path + "/imgs_test.npy")
imgs_test = imgs_test.astype('float32')
imgs_test /= 255
return imgs_test
def load_test_labels(self):
print('-' * 30)
print('load test label images...')
print('-' * 30)
imgs_testlabels = np.load(self.npy_path + "/imgs_mask_test.npy")
imgs_testlabels = imgs_testlabels.astype('float32')
imgs_testlabels /= 255
return imgs_testlabels
if __name__ == "__main__":
mydata = dataProcess(512, 512)
mydata.create_train_data()
mydata.create_test_all()
mydata.create_test_data()
| 5,060 | 37.340909 | 122 | py |
CropRowDetection | CropRowDetection-main/unet-rgbd/unetRGB.py | # -*- coding:utf-8 -*-
import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.preprocessing.image import array_to_img
from tensorflow.keras import backend as K
import cv2
from data import *
#import tensorflow.experimental.numpy as np
import numpy as np
from sklearn.cluster import DBSCAN
from collections import defaultdict
import math
from PIL import Image
from matplotlib import pyplot as plt
from focal_loss import BinaryFocalLoss
class myUnet(object):
def __init__(self, img_rows=512, img_cols=512):
self.img_rows = img_rows
self.img_cols = img_cols
self.eps1 = 4
self.eps2 = 3
self.htres = 100
def load_data(self):
mydata = dataProcess(self.img_rows, self.img_cols)
imgs_train, imgs_mask_train = mydata.load_train_data()
imgs_test = mydata.load_test_data()
imgs_mask_test = mydata.load_test_labels()
return imgs_train, imgs_mask_train, imgs_test, imgs_mask_test
def tensor_to_image(self, tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor)>3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
def iou(self, y_true, y_pred):
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(tf.cast(y_true, tf.float32) * tf.cast(y_pred, tf.float32))
score = (intersection + 1.) / (tf.reduce_sum(tf.cast(y_true, tf.float32)) + tf.reduce_sum(tf.cast(y_pred, tf.float32)) - intersection + 1.)
return score
def mylossiou(self, y_true, y_pred):
bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
bcloss = bce(y_true, y_pred)
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(tf.cast(y_true, tf.float32) * tf.cast(y_pred, tf.float32))
score = (intersection + 1.) / (tf.reduce_sum(tf.cast(y_true, tf.float32)) + tf.reduce_sum(tf.cast(y_pred, tf.float32)) - intersection + 1.)
iouloss = 1-score
ret = 0.9*bcloss + 0.1*iouloss
#y_p = K.print_tensor(ret.get_shape().as_list(), message=' ret = ')
return ret
def myloss(self, y_true, y_pred):
loss=tf.nn.weighted_cross_entropy_with_logits(y_true, y_pred, 5.)
return loss
def weightedLoss(self, originalLossFunc, weightsList):
def lossFunc(true, pred):
axis = -1 #if channels last
#axis= 1 #if channels first
#argmax returns the index of the element with the greatest value
#done in the class axis, it returns the class index
classSelectors = K.argmax(true, axis=axis)
#if your loss is sparse, use only true as classSelectors
#considering weights are ordered by class, for each class
#true(1) if the class index is equal to the weight index
classSelectors = [K.equal(i, classSelectors) for i in range(len(weightsList))]
#casting boolean to float for calculations
#each tensor in the list contains 1 where ground true class is equal to its index
#if you sum all these, you will get a tensor full of ones.
classSelectors = [K.cast(x, K.floatx()) for x in classSelectors]
#for each of the selections above, multiply their respective weight
weights = [sel * w for sel,w in zip(classSelectors, weightsList)]
#sums all the selections
#result is a tensor with the respective weight for each element in predictions
weightMultiplier = weights[0]
for i in range(1, len(weights)):
weightMultiplier = weightMultiplier + weights[i]
#make sure your originalLossFunc only collapses the class axis
#you need the other axes intact to multiply the weights tensor
loss = originalLossFunc(true,pred)
loss = loss * weightMultiplier
return loss
return lossFunc
@tf.autograph.experimental.do_not_convert
def loss_angle(self, y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
img = y_true.numpy()
lbl = y_pred.numpy()
img = np.reshape(img, (512,512,1))
lbl = np.reshape(lbl, (512,512,1))
img = img*255
lbl = lbl*255
#y_t = K.print_tensor(img, message='y_true = ')
#y_p = K.print_tensor(lbl, message='y_pred = ')
#img = tf.keras.preprocessing.image.array_to_img(img)
#lbl = tf.keras.preprocessing.image.array_to_img(lbl)
#img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#lbl=cv2.cvtColor(lbl,cv2.COLOR_BGR2GRAY)
#ret,img = cv2.threshold(img, 127, 255, 0)
ret,lbl = cv2.threshold(lbl, 127, 255, 0)
#y_p = K.print_tensor(lbl, message='y_pred2 = ')
skel1 = np.zeros(img.shape, np.float32)
skel2 = np.zeros(lbl.shape, np.float32)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
while True:
openImg = cv2.morphologyEx(img, cv2.MORPH_OPEN, element)
temp = cv2.subtract(img, openImg)
eroded = cv2.erode(img, element)
skel1 = cv2.bitwise_or(skel1,temp)
img = eroded.copy()
if cv2.countNonZero(img)==0:
break
skel1 = np.array(skel1 * 255, dtype = np.uint8)
lines = cv2.HoughLines(skel1,1,np.pi/180,100)
if lines is None:#When no lines are found
lines=np.zeros((1,1,2))
tdeg = lines[:,:,1]*180/np.pi
clustering = DBSCAN(eps=self.eps1, min_samples=1).fit(tdeg)
clusters = defaultdict(list)
slines1 = []
for i,c in enumerate(clustering.labels_): # Sort Clusters into groups
clusters[c].append(tdeg[i])
for i,c in enumerate(clusters): # Select one candidate per cluster
k=(max(list(clusters[i]))[0])//90
slines1.append((k*max(list(clusters[i]))[0])+((1-k)*min(list(clusters[i]))[0]))#choose min if angle<90 or max if angle>90
while True:
openImg = cv2.morphologyEx(lbl, cv2.MORPH_OPEN, element)
temp = cv2.subtract(lbl, openImg)
eroded = cv2.erode(lbl, element)
skel2 = cv2.bitwise_or(skel2,temp)
lbl = eroded.copy()
if cv2.countNonZero(lbl)==0:
break
skel2 = np.array(skel2 * 255, dtype = np.uint8)
lines = cv2.HoughLines(skel2,1,np.pi/180,self.htres)
if lines is None:#When no lines are found
lines=np.zeros((1,1,2))
tdeg = lines[:,:,1]*180/np.pi
clustering = DBSCAN(eps=self.eps1, min_samples=1).fit(tdeg)
clusters = defaultdict(list)
slines2 = []
for i,c in enumerate(clustering.labels_): # Sort Clusters into groups
clusters[c].append(tdeg[i])
for i,c in enumerate(clusters): # Select one candidate per cluster
k=(max(list(clusters[i]))[0])//90
slines2.append((k*max(list(clusters[i]))[0])+((1-k)*min(list(clusters[i]))[0]))#choose min if angle<90 or max if angle>90
slines = slines1+slines2
slines = np.array(slines).reshape(-1,1)
clustering = DBSCAN(eps=self.eps2, min_samples=1).fit(slines)
clusters = defaultdict(list)
for i,c in enumerate(clustering.labels_): # Sort Clusters into groups
clusters[c].append(slines[i])
error = []
for i,c in enumerate(clusters): # Select one candidate per cluster
if (len(list(clusters[i]))) > 1:
error.append(max(clusters[i])-min(clusters[i]))
error = np.array(error)
e = (abs(error[error!=0].mean()))/10
if math.isnan(e):
e = 0.0
#e = tf.convert_to_tensor(e)
#y_p = K.print_tensor(e.type, message='etype = ')
return e
def get_unet(self):
inputs = Input((self.img_rows, self.img_cols, 3))
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
# print(conv1)
print ("conv1 shape:", conv1.shape)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
print ("conv1 shape:", conv1.shape)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
print ("pool1 shape:", pool1.shape)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
print ("conv2 shape:", conv2.shape)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
print ("conv2 shape:", conv2.shape)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
print ("pool2 shape:", pool2.shape)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
print ("conv3 shape:", conv3.shape)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
print ("conv3 shape:", conv3.shape)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
print ("pool3 shape:", pool3.shape)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6],axis=3)
print(up6)
print(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
print(conv6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
print(conv6)
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7],axis=3)
print(up7)
print(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
print(conv7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
print(conv7)
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8],axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
print(up9)
print(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
print(conv9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
print(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
print ("conv9 shape:", conv9.shape)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
print(conv10)
model = Model(inputs, conv10)
weights = [1,2]
model.compile(optimizer=Adam(lr=1e-4), loss= self.weightedLoss(tf.keras.losses.BinaryCrossentropy(from_logits=True), weights), metrics=['accuracy', self.iou, self.loss_angle], run_eagerly=True)
#model.compile(optimizer=Adam(lr=1e-4), loss=BinaryFocalLoss(gamma=2), metrics=['accuracy', self.iou, self.loss_angle], run_eagerly=True)
#model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy', self.iou, self.loss_angle], run_eagerly=True)
#model.compile(optimizer=Adam(lr=1e-4), loss=self.loss_angle, metrics=[tf.keras.metrics.MeanIoU(num_classes=2)])
return model
def train(self):
print("loading data")
imgs_train, imgs_mask_train, imgs_test, imgs_mask_test = self.load_data()
print("loading data done")
model = self.get_unet()
print("got unet")
model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', verbose=1, save_best_only=True)
#class_weights = {0: 1., 1: 2.}
print('Fitting model...')
history = model.fit(imgs_train, imgs_mask_train, batch_size=1, epochs=20, verbose=1,
validation_split=0.2, shuffle=True, callbacks=[model_checkpoint])
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.savefig('accuracy.png')
plt.close('all')
plt.plot(history.history['iou'])
plt.plot(history.history['val_iou'])
plt.title('model iou')
plt.ylabel('iou')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.savefig('iou.png')
plt.close('all')
plt.plot(history.history['loss_angle'])
plt.plot(history.history['val_loss_angle'])
plt.title('angle error')
plt.ylabel('angle error')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.savefig('angle.png')
plt.close('all')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.savefig('loss.png')
plt.close('all')
print('predict test data')
imgs_mask_test = model.predict(imgs_test, batch_size=1, verbose=1)
np.save('./results/imgs_mask_test.npy', imgs_mask_test)
#model.evaluate( imgs_test,imgs_mask_test)
def save_img(self):
print("array to image")
imgs = np.load('./results/imgs_mask_test.npy')
piclist = []
for line in open("./results/pic.txt"):
line = line.strip()
picname = line.split('/')[-1]
piclist.append(picname)
for i in range(imgs.shape[0]):
path = "./results/" + piclist[i]
img = imgs[i]
img = array_to_img(img)
img.save(path)
cv_pic = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
cv_pic = cv2.resize(cv_pic,(512,512),interpolation=cv2.INTER_CUBIC)
binary, cv_save = cv2.threshold(cv_pic, 127, 255, cv2.THRESH_BINARY)
cv2.imwrite(path, cv_save)
if __name__ == '__main__':
myunet = myUnet()
model = myunet.get_unet()
model.summary()
#plot_model(model, to_file='model.png')
myunet.train()
myunet.save_img()
| 15,916 | 42.135501 | 201 | py |
CropRowDetection | CropRowDetection-main/unet-rgbd/unetRGBD.py | # -*- coding:utf-8 -*-
import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.preprocessing.image import array_to_img
from tensorflow.keras import backend as K
import cv2
from data import *
#import tensorflow.experimental.numpy as np
import numpy as np
from sklearn.cluster import DBSCAN
from collections import defaultdict
import math
from PIL import Image
from matplotlib import pyplot as plt
from focal_loss import BinaryFocalLoss
class myUnet(object):
def __init__(self, img_rows=512, img_cols=512):
self.img_rows = img_rows
self.img_cols = img_cols
self.eps1 = 4
self.eps2 = 3
self.htres = 100
def load_data(self):
mydata = dataProcess(self.img_rows, self.img_cols)
imgs_train, imgs_mask_train = mydata.load_train_data()
imgs_test = mydata.load_test_data()
imgs_mask_test = mydata.load_test_labels()
return imgs_train, imgs_mask_train, imgs_test, imgs_mask_test
def tensor_to_image(self, tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor)>3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
def iou(self, y_true, y_pred):
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(tf.cast(y_true, tf.float32) * tf.cast(y_pred, tf.float32))
score = (intersection + 1.) / (tf.reduce_sum(tf.cast(y_true, tf.float32)) + tf.reduce_sum(tf.cast(y_pred, tf.float32)) - intersection + 1.)
return score
def mylossiou(self, y_true, y_pred):
bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
bcloss = bce(y_true, y_pred)
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(tf.cast(y_true, tf.float32) * tf.cast(y_pred, tf.float32))
score = (intersection + 1.) / (tf.reduce_sum(tf.cast(y_true, tf.float32)) + tf.reduce_sum(tf.cast(y_pred, tf.float32)) - intersection + 1.)
iouloss = 1-score
ret = 0.9*bcloss + 0.1*iouloss
#y_p = K.print_tensor(ret.get_shape().as_list(), message=' ret = ')
return ret
def myloss(self, y_true, y_pred):
loss=tf.nn.weighted_cross_entropy_with_logits(y_true, y_pred, 5.)
return loss
def weightedLoss(self, originalLossFunc, weightsList):
def lossFunc(true, pred):
axis = -1 #if channels last
#axis= 1 #if channels first
#argmax returns the index of the element with the greatest value
#done in the class axis, it returns the class index
classSelectors = K.argmax(true, axis=axis)
#if your loss is sparse, use only true as classSelectors
#considering weights are ordered by class, for each class
#true(1) if the class index is equal to the weight index
classSelectors = [K.equal(i, classSelectors) for i in range(len(weightsList))]
#casting boolean to float for calculations
#each tensor in the list contains 1 where ground true class is equal to its index
#if you sum all these, you will get a tensor full of ones.
classSelectors = [K.cast(x, K.floatx()) for x in classSelectors]
#for each of the selections above, multiply their respective weight
weights = [sel * w for sel,w in zip(classSelectors, weightsList)]
#sums all the selections
#result is a tensor with the respective weight for each element in predictions
weightMultiplier = weights[0]
for i in range(1, len(weights)):
weightMultiplier = weightMultiplier + weights[i]
#make sure your originalLossFunc only collapses the class axis
#you need the other axes intact to multiply the weights tensor
loss = originalLossFunc(true,pred)
loss = loss * weightMultiplier
return loss
return lossFunc
@tf.autograph.experimental.do_not_convert
def loss_angle(self, y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
img = y_true.numpy()
lbl = y_pred.numpy()
img = np.reshape(img, (512,512,1))
lbl = np.reshape(lbl, (512,512,1))
img = img*255
lbl = lbl*255
#y_t = K.print_tensor(img, message='y_true = ')
#y_p = K.print_tensor(lbl, message='y_pred = ')
#img = tf.keras.preprocessing.image.array_to_img(img)
#lbl = tf.keras.preprocessing.image.array_to_img(lbl)
#img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#lbl=cv2.cvtColor(lbl,cv2.COLOR_BGR2GRAY)
#ret,img = cv2.threshold(img, 127, 255, 0)
ret,lbl = cv2.threshold(lbl, 127, 255, 0)
#y_p = K.print_tensor(lbl, message='y_pred2 = ')
skel1 = np.zeros(img.shape, np.float32)
skel2 = np.zeros(lbl.shape, np.float32)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
while True:
openImg = cv2.morphologyEx(img, cv2.MORPH_OPEN, element)
temp = cv2.subtract(img, openImg)
eroded = cv2.erode(img, element)
skel1 = cv2.bitwise_or(skel1,temp)
img = eroded.copy()
if cv2.countNonZero(img)==0:
break
skel1 = np.array(skel1 * 255, dtype = np.uint8)
lines = cv2.HoughLines(skel1,1,np.pi/180,100)
if lines is None:#When no lines are found
lines=np.zeros((1,1,2))
tdeg = lines[:,:,1]*180/np.pi
clustering = DBSCAN(eps=self.eps1, min_samples=1).fit(tdeg)
clusters = defaultdict(list)
slines1 = []
for i,c in enumerate(clustering.labels_): # Sort Clusters into groups
clusters[c].append(tdeg[i])
for i,c in enumerate(clusters): # Select one candidate per cluster
k=(max(list(clusters[i]))[0])//90
slines1.append((k*max(list(clusters[i]))[0])+((1-k)*min(list(clusters[i]))[0]))#choose min if angle<90 or max if angle>90
while True:
openImg = cv2.morphologyEx(lbl, cv2.MORPH_OPEN, element)
temp = cv2.subtract(lbl, openImg)
eroded = cv2.erode(lbl, element)
skel2 = cv2.bitwise_or(skel2,temp)
lbl = eroded.copy()
if cv2.countNonZero(lbl)==0:
break
skel2 = np.array(skel2 * 255, dtype = np.uint8)
lines = cv2.HoughLines(skel2,1,np.pi/180,self.htres)
if lines is None:#When no lines are found
lines=np.zeros((1,1,2))
tdeg = lines[:,:,1]*180/np.pi
clustering = DBSCAN(eps=self.eps1, min_samples=1).fit(tdeg)
clusters = defaultdict(list)
slines2 = []
for i,c in enumerate(clustering.labels_): # Sort Clusters into groups
clusters[c].append(tdeg[i])
for i,c in enumerate(clusters): # Select one candidate per cluster
k=(max(list(clusters[i]))[0])//90
slines2.append((k*max(list(clusters[i]))[0])+((1-k)*min(list(clusters[i]))[0]))#choose min if angle<90 or max if angle>90
slines = slines1+slines2
slines = np.array(slines).reshape(-1,1)
clustering = DBSCAN(eps=self.eps2, min_samples=1).fit(slines)
clusters = defaultdict(list)
for i,c in enumerate(clustering.labels_): # Sort Clusters into groups
clusters[c].append(slines[i])
error = []
for i,c in enumerate(clusters): # Select one candidate per cluster
if (len(list(clusters[i]))) > 1:
error.append(max(clusters[i])-min(clusters[i]))
error = np.array(error)
e = (abs(error[error!=0].mean()))/10
if math.isnan(e):
e = 0.0
#e = tf.convert_to_tensor(e)
#y_p = K.print_tensor(e.type, message='etype = ')
return e
def get_unet(self):
inputs = Input((self.img_rows, self.img_cols, 4))
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
# print(conv1)
print ("conv1 shape:", conv1.shape)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
print ("conv1 shape:", conv1.shape)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
print ("pool1 shape:", pool1.shape)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
print ("conv2 shape:", conv2.shape)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
print ("conv2 shape:", conv2.shape)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
print ("pool2 shape:", pool2.shape)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
print ("conv3 shape:", conv3.shape)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
print ("conv3 shape:", conv3.shape)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
print ("pool3 shape:", pool3.shape)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6],axis=3)
print(up6)
print(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
print(conv6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
print(conv6)
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7],axis=3)
print(up7)
print(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
print(conv7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
print(conv7)
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8],axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
print(up9)
print(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
print(conv9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
print(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
print ("conv9 shape:", conv9.shape)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
print(conv10)
model = Model(inputs, conv10)
weights = [1,5]
#model.compile(optimizer=Adam(lr=1e-4), loss= self.weightedLoss(tf.keras.losses.BinaryCrossentropy(from_logits=True), weights), metrics=['accuracy', self.iou, self.loss_angle], run_eagerly=True)
#model.compile(optimizer=Adam(lr=1e-4), loss=BinaryFocalLoss(gamma=2), metrics=['accuracy', self.iou, self.loss_angle], run_eagerly=True)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy', self.iou, self.loss_angle], run_eagerly=True)
#model.compile(optimizer=Adam(lr=1e-4), loss=self.loss_angle, metrics=[tf.keras.metrics.MeanIoU(num_classes=2)])
return model
def train(self):
print("loading data")
imgs_train, imgs_mask_train, imgs_test, imgs_mask_test = self.load_data()
print("loading data done")
model = self.get_unet()
print("got unet")
model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', verbose=1, save_best_only=True)
#class_weights = {0: 1., 1: 2.}
print('Fitting model...')
history = model.fit(imgs_train, imgs_mask_train, batch_size=1, epochs=40, verbose=1,
validation_split=0.2, shuffle=True, callbacks=[model_checkpoint])
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.savefig('accuracy.png')
plt.close('all')
plt.plot(history.history['iou'])
plt.plot(history.history['val_iou'])
plt.title('model iou')
plt.ylabel('iou')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.savefig('iou.png')
plt.close('all')
plt.plot(history.history['loss_angle'])
plt.plot(history.history['val_loss_angle'])
plt.title('angle error')
plt.ylabel('angle error')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.savefig('angle.png')
plt.close('all')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.savefig('loss.png')
plt.close('all')
print('predict test data')
imgs_mask_test = model.predict(imgs_test, batch_size=1, verbose=1)
np.save('./results/imgs_mask_test.npy', imgs_mask_test)
#model.evaluate( imgs_test,imgs_mask_test)
def save_img(self):
print("array to image")
imgs = np.load('./results/imgs_mask_test.npy')
piclist = []
for line in open("./results/pic.txt"):
line = line.strip()
picname = line.split('/')[-1]
piclist.append(picname)
for i in range(imgs.shape[0]):
path = "./results/" + piclist[i]
img = imgs[i]
img = array_to_img(img)
img.save(path)
cv_pic = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
cv_pic = cv2.resize(cv_pic,(512,512),interpolation=cv2.INTER_CUBIC)
binary, cv_save = cv2.threshold(cv_pic, 127, 255, cv2.THRESH_BINARY)
cv2.imwrite(path, cv_save)
if __name__ == '__main__':
myunet = myUnet()
model = myunet.get_unet()
model.summary()
#plot_model(model, to_file='model.png')
myunet.train()
myunet.save_img()
| 15,916 | 42.135501 | 202 | py |
CropRowDetection | CropRowDetection-main/unet-rgbd/dataRGBD.py | # -*- coding:utf-8 -*-
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
import glob
class dataProcess(object):
def __init__(self, out_rows, out_cols, data_path="./data/train/image", depth_path="./data/train/depth", label_path="./data/train/label",
test_path="./data/test/image", testdepth_path="./data/test/depth", testlabel_path="./data/test/label", npy_path="./npydata", img_type="jpg"):
self.out_rows = out_rows
self.out_cols = out_cols
self.data_path = data_path
self.depth_path = depth_path
self.label_path = label_path
self.img_type = img_type
self.test_path = test_path
self.testdepth_path = testdepth_path
self.testlabel_path = testlabel_path
self.npy_path = npy_path
def create_train_data(self):
i = 0
print('Creating training images...')
imgs = glob.glob(self.data_path+"/*."+self.img_type)
depths = glob.glob(self.depth_path+"/*."+self.img_type)
imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 4), dtype=np.uint8)
imglabels = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
for x in range(len(imgs)):
imgpath = imgs[x]
pic_name = imgpath.split('/')[-1]
labelpath = self.label_path + '/' + pic_name
depthpath = self.depth_path + '/' + pic_name
img = load_img(imgpath, color_mode='rgb', target_size=[512, 512])
depth = load_img(depthpath, color_mode='grayscale', target_size=[512, 512])
label = load_img(labelpath, color_mode='grayscale', target_size=[512, 512])
img = img_to_array(img)
depth = img_to_array(depth)
img = np.dstack((img, depth))#rgbd array
label = img_to_array(label)
imgdatas[i] = img
imglabels[i] = label
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, len(imgs)))
i += 1
print('loading done')
np.save(self.npy_path + '/imgs_train.npy', imgdatas)
np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
print('Saving to .npy files done.')
def create_test_all(self):
i = 0
print('Creating testall images...')
imgs = glob.glob(self.test_path+"/*."+self.img_type)
depths = glob.glob(self.depth_path+"/*."+self.img_type)
imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 4), dtype=np.uint8)
imglabels = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
for x in range(len(imgs)):
imgpath = imgs[x]
pic_name = imgpath.split('/')[-1]
labelpath = self.testlabel_path + '/' + pic_name
depthpath = self.testdepth_path + '/' + pic_name
img = load_img(imgpath, color_mode='rgb', target_size=[512, 512])
depth = load_img(depthpath, color_mode='grayscale', target_size=[512, 512])
label = load_img(labelpath, color_mode='grayscale', target_size=[512, 512])
img = img_to_array(img)
depth = img_to_array(depth)
img = np.dstack((img, depth))#rgbd array
label = img_to_array(label)
imgdatas[i] = img
imglabels[i] = label
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, len(imgs)))
i += 1
print('loading done')
np.save(self.npy_path + '/imgs_test.npy', imgdatas)
np.save(self.npy_path + '/imgs_mask_test.npy', imglabels)
print('Saving to .npy files done.')
def create_test_data(self):
i = 0
print('Creating test images...')
imgs = glob.glob(self.test_path + "/*." + self.img_type)
imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 3), dtype=np.uint8)
testpathlist = []
for imgname in imgs:
testpath = imgname
testpathlist.append(testpath)
img = load_img(testpath, color_mode='rgb', target_size=[512, 512])
img = img_to_array(img)
imgdatas[i] = img
i += 1
txtname = './results/pic.txt'
with open(txtname, 'w') as f:
for i in range(len(testpathlist)):
f.writelines(testpathlist[i] + '\n')
print('loading done')
np.save(self.npy_path + '/imgs_test.npy', imgdatas)
print('Saving to imgs_test.npy files done.')
def load_train_data(self):
print('load train images...')
imgs_train = np.load(self.npy_path + "/imgs_train.npy")
imgs_mask_train = np.load(self.npy_path + "/imgs_mask_train.npy")
imgs_train = imgs_train.astype('float32')
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_train /= 255
imgs_mask_train /= 255
imgs_mask_train[imgs_mask_train > 0.5] = 1 # 白
imgs_mask_train[imgs_mask_train <= 0.5] = 0 # 黑
return imgs_train, imgs_mask_train
def load_test_data(self):
print('-' * 30)
print('load test images...')
print('-' * 30)
imgs_test = np.load(self.npy_path + "/imgs_test.npy")
imgs_test = imgs_test.astype('float32')
imgs_test /= 255
return imgs_test
def load_test_labels(self):
print('-' * 30)
print('load test label images...')
print('-' * 30)
imgs_testlabels = np.load(self.npy_path + "/imgs_mask_test.npy")
imgs_testlabels = imgs_testlabels.astype('float32')
imgs_testlabels /= 255
return imgs_testlabels
if __name__ == "__main__":
mydata = dataProcess(512, 512)
mydata.create_train_data()
mydata.create_test_all()
#mydata.create_test_data()
| 5,828 | 39.479167 | 158 | py |
Traffic-Benchmark | Traffic-Benchmark-master/train_benchmark.py | import os
import random
import numpy as np
import torch
# import setproctitle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model',type=str,default='DGCRN',help='model')
parser.add_argument('--data',type=str,default='METR-LA',help='dataset')
args = parser.parse_args()
model = args.model
data = args.data
# setproctitle.setproctitle(model + '_' + data + "@lifuxian")
def main():
if model == 'DGCRN':
if data == 'BJ':
run = 'python ./methods/DGCRN_BJ/train.py --adj_data ~/NE-BJ/adj_mat_BJ_new.pkl --data ~/NE-BJ/ --num_nodes 500 --runs 3 --epochs 250 --print_every 10 --batch_size 16 --tolerance 100 --step_size1 2100 --cl_decay_steps 3500 --expid DGCRN_bj --rnn_size 64 --node_dim 100 --device cuda:5'
os.system(run)
elif data == 'METR-LA':
run = 'python ./methods/DGCRN/train.py --adj_data ./data/sensor_graph/adj_mx.pkl --data ./data/METR-LA --num_nodes 207 --runs 3 --epochs 110 --print_every 10 --batch_size 64 --tolerance 100 --cl_decay_steps 4000 --expid DGCRN_metrla --device cuda:2'
os.system(run)
elif data == 'PEMS-BAY':
run = 'python ./methods/DGCRN/train.py --adj_data ./data/sensor_graph/adj_mx_bay.pkl --data ./data/PEMS-BAY --num_nodes 325 --runs 3 --epochs 110 --print_every 10 --batch_size 64 --tolerance 100 --expid DGCRN_pemsbay --cl_decay_steps 5500 --rnn_size 96 --device cuda:2'
os.system(run)
elif model == 'FNN':
if data == 'BJ':
run = 'CUDA_VISIBLE_DEVICES=2 python dcrnn_train_pytorch.py --config_filename=data/model/stmetanet_BJ500.yaml'
os.system(run)
elif data == 'METR-LA':
run = ''
os.system(run)
elif data == 'PEMS-BAY':
run = ''
os.system(run)
elif model == 'FC-LSTM':
if data == 'BJ':
run = 'CUDA_VISIBLE_DEVICES=0 python ./methods/LSTM/dcrnn_train_pytorch.py --config_filename=data/model/LSTM_BJ500.yaml '
os.system(run)
elif data == 'METR-LA':
run = ''
os.system(run)
elif data == 'PEMS-BAY':
run = ''
os.system(run)
elif model == 'DCRNN':
if data == 'BJ':
run = 'CUDA_VISIBLE_DEVICES=1 python ./methods/DCRNN/dcrnn_train_pytorch.py --config_filename=data/BJ/dcrnn_BJ.yaml'
os.system(run)
elif data == 'METR-LA':
run = 'CUDA_VISIBLE_DEVICES=3 python ./methods/DCRNN/dcrnn_train_pytorch.py --config_filename=data/model/dcrnn_la.yaml'
os.system(run)
elif data == 'PEMS-BAY':
run = 'CUDA_VISIBLE_DEVICES=1 python ./methods/DCRNN/dcrnn_train_pytorch.py --config_filename=data/model/dcrnn_bay.yaml'
os.system(run)
elif model == 'STGCN':
if data == 'BJ':
run = ''
os.system(run)
elif data == 'METR-LA':
run = ''
os.system(run)
elif data == 'PEMS-BAY':
run = ''
os.system(run)
elif model == 'Graph-WaveNet':
if data == 'BJ':
run = 'python ./methods/Graph-WaveNet/train.py --data ~/NE-BJ --adjdata ~/NE-BJ/adj_mat_BJ.pkl --save ./garage/BJ_500_nodedim100 --gcn_bool --adjtype doubletransition --addaptadj --randomadj --device cuda:2 --batch_size 64 --epoch 200 --print_every 10'
os.system(run)
elif data == 'METR-LA':
run = 'python ./methods/Graph-WaveNet/train.py --data=data/METR-LA --gcn_bool --adjtype doubletransition --addaptadj --randomadj'
os.system(run)
elif data == 'PEMS-BAY':
run = 'python ./methods/Graph-WaveNet/train.py --data=data/PEMS-BAY --adjdata data/sensor_graph/adj_mx_bay.pkl --save ./garage/pems --gcn_bool --adjtype doubletransition --addaptadj --randomadj'
os.system(run)
elif model == 'ST-MetaNet':
if data == 'BJ':
run = 'CUDA_VISIBLE_DEVICES=1 python ./methods/ST-MetaNet/dcrnn_train_pytorch.py --config_filename=data/model/stmetanet_BJ500.yaml'
os.system(run)
elif data == 'METR-LA':
run = ''
os.system(run)
elif data == 'PEMS-BAY':
run = ''
os.system(run)
elif model == 'ASTGCN':
if data == 'BJ':
run = 'python ./methods/ASTGCN/train_ASTGCN_r.py --config configurations/BJ.conf'
os.system(run)
elif data == 'METR-LA':
run = 'python ./methods/ASTGCN/train_ASTGCN_r.py --config configurations/METR-LA.conf'
os.system(run)
elif data == 'PEMS-BAY':
run = 'python ./methods/ASTGCN/train_ASTGCN_r.py --config configurations/PEMS-BAY.conf'
os.system(run)
elif model == 'STSGCN': #mxnet-1.41-py3
if data == 'BJ':
run = 'python3 ./method/STSGCN/main.py --config config/BJ/individual_GLU_mask_emb.json --save'
os.system(run)
elif data == 'METR-LA':
run = 'python3 ./method/STSGCN/main.py --config config/METR-LA/individual_GLU_mask_emb.json --save'
os.system(run)
elif data == 'PEMS-BAY':
run = 'python3 ./method/STSGCN/main.py --config config/PEMS-BAY/individual_GLU_mask_emb.json --save'
os.system(run)
elif model == 'AGCRN':
if data == 'BJ':
run = 'python ./methods/AGCRN/model/Run_BJ.py --dataset_dir /data/lifuxian/NE-BJ/ --device cuda:6'
os.system(run)
elif data == 'METR-LA':
run = 'python ./methods/AGCRN/model/Run_METR-LA.py --dataset_dir /data/lifuxian/DCRNN_PyTorch-pytorch_scratch/data/METR-LA --device cuda:5'
os.system(run)
elif data == 'PEMS-BAY':
run = 'python ./methods/AGCRN/model/Run_PEMS-BAY.py --dataset_dir /data/lifuxian/DCRNN_PyTorch-pytorch_scratch/data/PEMS-BAY --device cuda:6'
os.system(run)
elif model == 'GMAN': #tf-2.3-py3
if data == 'BJ':
run = 'CUDA_VISIBLE_DEVICES=4 python ./methods/GMAN/BJ500/train.py --batch_size 8'
os.system(run)
elif data == 'METR-LA':
run = 'CUDA_VISIBLE_DEVICES=4 python ./methods/GMAN/METR/train.py'
os.system(run)
elif data == 'PEMS-BAY':
run = 'CUDA_VISIBLE_DEVICES=4 python ./methods/GMAN/PeMS/train.py'
os.system(run)
elif model == 'MTGNN':
if data == 'BJ':
run = 'python ./methods/MTGNN/train_multi_step.py --adj_data ~/NE-BJ/adj_mat_BJ.pkl --data ~/NE-BJ --num_nodes 500 --runs 3 --device cuda:1 --epochs 1000 --print_every 1000 --buildA_true True --expid 80 --node_dim 80'
os.system(run)
elif data == 'METR-LA':
run = ''
os.system(run)
elif data == 'PEMS-BAY':
run = ''
os.system(run)
if __name__ == "__main__":
main() | 6,887 | 46.833333 | 298 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ST-MetaNet/dcrnn_train_pytorch.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
import setproctitle
setproctitle.setproctitle("stmetanet@lifuxian")
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
data_type = args.config_filename.split('/')[-1].split('.')[0].split('_')[-1] #'bay' or 'la'
supervisor = DCRNNSupervisor(data_type = data_type, LOAD_INITIAL = args.LOAD_INITIAL, adj_mx=adj_mx, **supervisor_config)
if args.TEST_ONLY:
supervisor.evaluate_test()
else:
supervisor.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default=None, type=str,
help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
parser.add_argument('--LOAD_INITIAL', default=False, type=bool, help='If LOAD_INITIAL.')
parser.add_argument('--TEST_ONLY', default=False, type=bool, help='If TEST_ONLY.')
args = parser.parse_args()
main(args)
| 1,459 | 38.459459 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ST-MetaNet/run_demo_pytorch.py | import argparse
import numpy as np
import os
import sys
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
def run_dcrnn(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
mean_score, outputs = supervisor.evaluate('test')
np.savez_compressed(args.output_filename, **outputs)
print("MAE : {}".format(mean_score))
print('Predictions saved as {}.'.format(args.output_filename))
if __name__ == '__main__':
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument('--use_cpu_only', default=False, type=str, help='Whether to run tensorflow on cpu.')
parser.add_argument('--config_filename', default='data/model/pretrained/METR-LA/config.yaml', type=str,
help='Config file for pretrained model.')
parser.add_argument('--output_filename', default='data/dcrnn_predictions.npz')
args = parser.parse_args()
run_dcrnn(args)
| 1,264 | 36.205882 | 108 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ST-MetaNet/model/pytorch/dcrnn_model.py | import numpy as np
import torch
import torch.nn as nn
from model.pytorch.dcrnn_cell import DCGRUCell
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class Seq2SeqAttrs:
def __init__(self, adj_mx, **model_kwargs):
self.adj_mx = adj_mx
self.max_diffusion_step = int(model_kwargs.get('max_diffusion_step', 2))
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.filter_type = model_kwargs.get('filter_type', 'laplacian')
self.num_nodes = int(model_kwargs.get('num_nodes', 1))
self.num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))
self.rnn_units = int(model_kwargs.get('rnn_units'))
self.hidden_state_size = self.num_nodes * self.rnn_units
class EncoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.input_dim = int(model_kwargs.get('input_dim', 1))
self.seq_len = int(model_kwargs.get('seq_len')) # for the encoder
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Encoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.input_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.hidden_state_size)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
batch_size, _ = inputs.size()
if hidden_state is None:
hidden_state = torch.zeros((self.num_rnn_layers, batch_size, self.hidden_state_size),
device=device)
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
return output, torch.stack(hidden_states) # runs in O(num_layers) so not too slow
class DecoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
# super().__init__(is_training, adj_mx, **model_kwargs)
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.output_dim = int(model_kwargs.get('output_dim', 1))
self.horizon = int(model_kwargs.get('horizon', 1)) # for the decoder
self.projection_layer = nn.Linear(self.rnn_units, self.output_dim)
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Decoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.output_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.num_nodes * self.output_dim)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
projected = self.projection_layer(output.view(-1, self.rnn_units))
output = projected.view(-1, self.num_nodes * self.output_dim)
return output, torch.stack(hidden_states)
class DCRNNModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, logger, **model_kwargs):
super().__init__()
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.encoder_model = EncoderModel(adj_mx, **model_kwargs)
self.decoder_model = DecoderModel(adj_mx, **model_kwargs)
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.use_curriculum_learning = bool(model_kwargs.get('use_curriculum_learning', False))
self._logger = logger
def _compute_sampling_threshold(self, batches_seen):
return self.cl_decay_steps / (
self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps))
def encoder(self, inputs):
"""
encoder forward pass on t time steps
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:return: encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
"""
encoder_hidden_state = None
for t in range(self.encoder_model.seq_len):
_, encoder_hidden_state = self.encoder_model(inputs[t], encoder_hidden_state)
return encoder_hidden_state
def decoder(self, encoder_hidden_state, labels=None, batches_seen=None):
"""
Decoder forward pass
:param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
:param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference]
:param batches_seen: global step [optional, not exist for inference]
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
batch_size = encoder_hidden_state.size(1)
go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim),
device=device)
decoder_hidden_state = encoder_hidden_state
decoder_input = go_symbol
outputs = []
for t in range(self.decoder_model.horizon):
decoder_output, decoder_hidden_state = self.decoder_model(decoder_input,
decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
if self.training and self.use_curriculum_learning:
c = np.random.uniform(0, 1)
if c < self._compute_sampling_threshold(batches_seen):
decoder_input = labels[t]
outputs = torch.stack(outputs)
return outputs
def forward(self, inputs, labels=None, batches_seen=None):
"""
seq2seq forward pass
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:param labels: shape (horizon, batch_size, num_sensor * output)
:param batches_seen: batches seen till now
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
encoder_hidden_state = self.encoder(inputs)
self._logger.debug("Encoder complete, starting decoder")
outputs = self.decoder(encoder_hidden_state, labels, batches_seen=batches_seen)
self._logger.debug("Decoder complete")
if batches_seen == 0:
self._logger.info(
"Total trainable parameters {}".format(count_parameters(self))
)
return outputs
####################################################################################################################
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class TimeBlock(nn.Module):
"""
Neural network block that applies a temporal convolution to each node of
a graph in isolation.
"""
def __init__(self, in_channels, out_channels, kernel_size=3):
"""
:param in_channels: Number of input features at each node in each time
step.
:param out_channels: Desired number of output channels at each node in
each time step.
:param kernel_size: Size of the 1D temporal kernel.
"""
super(TimeBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
self.conv2 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
self.conv3 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
def forward(self, X):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels)
:return: Output data of shape (batch_size, num_nodes,
num_timesteps_out, num_features_out=out_channels)
"""
# Convert into NCHW format for pytorch to perform convolutions.
X = X.permute(0, 3, 1, 2)
temp = self.conv1(X) + torch.sigmoid(self.conv2(X))
out = F.relu(temp + self.conv3(X))
# Convert back from NCHW to NHWC
out = out.permute(0, 2, 3, 1)
return out
class STGCNBlock(nn.Module):
"""
Neural network block that applies a temporal convolution on each node in
isolation, followed by a graph convolution, followed by another temporal
convolution on each node.
"""
def __init__(self, in_channels, spatial_channels, out_channels,
num_nodes):
"""
:param in_channels: Number of input features at each node in each time
step.
:param spatial_channels: Number of output channels of the graph
convolutional, spatial sub-block.
:param out_channels: Desired number of output features at each node in
each time step.
:param num_nodes: Number of nodes in the graph.
"""
super(STGCNBlock, self).__init__()
self.temporal1 = TimeBlock(in_channels=in_channels,
out_channels=out_channels)
self.Theta1 = nn.Parameter(torch.FloatTensor(out_channels,
spatial_channels))
self.temporal2 = TimeBlock(in_channels=spatial_channels,
out_channels=out_channels)
self.batch_norm = nn.BatchNorm2d(num_nodes)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.Theta1.shape[1])
self.Theta1.data.uniform_(-stdv, stdv)
def forward(self, X, A_hat):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels).
:param A_hat: Normalized adjacency matrix.
:return: Output data of shape (batch_size, num_nodes,
num_timesteps_out, num_features=out_channels).
"""
t = self.temporal1(X)
lfs = torch.einsum("ij,jklm->kilm", [A_hat, t.permute(1, 0, 2, 3)])
# t2 = F.relu(torch.einsum("ijkl,lp->ijkp", [lfs, self.Theta1]))
t2 = F.relu(torch.matmul(lfs, self.Theta1))
t3 = self.temporal2(t2)
return self.batch_norm(t3)
# return t3
class STGCN(nn.Module):
"""
Spatio-temporal graph convolutional network as described in
https://arxiv.org/abs/1709.04875v3 by Yu et al.
Input should have shape (batch_size, num_nodes, num_input_time_steps,
num_features).
"""
def __init__(self, num_nodes, num_features, num_timesteps_input,
num_timesteps_output):
"""
:param num_nodes: Number of nodes in the graph.
:param num_features: Number of features at each node in each time step.
:param num_timesteps_input: Number of past time steps fed into the
network.
:param num_timesteps_output: Desired number of future time steps
output by the network.
"""
super(STGCN, self).__init__()
self.block1 = STGCNBlock(in_channels=num_features, out_channels=64,
spatial_channels=16, num_nodes=num_nodes)
self.block2 = STGCNBlock(in_channels=64, out_channels=64,
spatial_channels=16, num_nodes=num_nodes)
self.last_temporal = TimeBlock(in_channels=64, out_channels=64)
self.fully = nn.Linear((num_timesteps_input - 2 * 5) * 64,
num_timesteps_output)
self.num_nodes = num_nodes
self.input_dim = num_features
self.seq_len = num_timesteps_input
self.horizon = num_timesteps_output
def forward(self, A_hat, X):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels).
:param A_hat: Normalized adjacency matrix.
"""
X = X.view(self.seq_len, -1, self.num_nodes, self.input_dim).permute(1, 2, 0, 3).contiguous()
out1 = self.block1(X, A_hat)
out2 = self.block2(out1, A_hat)
out3 = self.last_temporal(out2)
out4 = self.fully(out3.reshape((out3.shape[0], out3.shape[1], -1)))
return out4.permute(2, 0, 1).contiguous()
# :return: x: shape (seq_len, batch_size, num_sensor * input_dim)
# y: shape (horizon, batch_size, num_sensor * output_dim)
####################################################################################################################
import math
import random
from typing import List, Tuple
import numpy as np
import dgl
import torch
from dgl import DGLGraph, init
from torch import nn, Tensor
class MultiLayerPerception(nn.Sequential):
def __init__(self, hiddens: List[int], hidden_act, out_act: bool):
super(MultiLayerPerception, self).__init__()
for i in range(1, len(hiddens)):
self.add_module(f'Layer{i}', nn.Linear(hiddens[i - 1], hiddens[i]))
if i < len(hiddens) - 1 or out_act:
self.add_module(f'Activation{i}', hidden_act())
class MetaDense(nn.Module):
def __init__(self, f_in: int, f_out: int, feat_size: int, meta_hiddens: List[int]):
super(MetaDense, self).__init__()
self.weights_mlp = MultiLayerPerception([feat_size] + meta_hiddens + [f_in * f_out], nn.Sigmoid, False)
self.bias_mlp = MultiLayerPerception([feat_size] + meta_hiddens + [f_out], nn.Sigmoid, False)
def forward(self, feature: Tensor, data: Tensor) -> Tensor:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, N, F_in]
:return: tensor, [B, N, F_out]
"""
b, n, f_in = data.shape
data = data.reshape(b, n, 1, f_in)
weights = self.weights_mlp(feature).reshape(1, n, f_in, -1) # [F_in, F_out]
bias = self.bias_mlp(feature) # [n, F_out]
return data.matmul(weights).squeeze(2) + bias
class RNNCell(nn.Module):
def __init__(self):
super(RNNCell, self).__init__()
def one_step(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, N, F]
:param begin_state: None or tensor, [B, N, F]
:return: output, tensor, [B, N, F]
begin_state, [B, N, F]
"""
raise NotImplementedError("Not Implemented")
def forward(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, T, N, F]
:param begin_state: [B, N, F]
:return:
"""
b, t, n, _ = data.shape
outputs, state = list(), begin_state
for i_t in range(t):
output, state = self.one_step(feature, data[:, i_t], state)
outputs.append(output)
return torch.stack(outputs, 1), state
class MetaGRUCell(RNNCell):
def __init__(self, f_in: int, hid_size: int, feat_size: int, meta_hiddens: List[int]):
super(MetaGRUCell, self).__init__()
self.hidden_size = hid_size
self.dense_zr = MetaDense(f_in + hid_size, 2 * hid_size, feat_size, meta_hiddens=meta_hiddens)
self.dense_i2h = MetaDense(f_in, hid_size, feat_size, meta_hiddens=meta_hiddens)
self.dense_h2h = MetaDense(hid_size, hid_size, feat_size, meta_hiddens=meta_hiddens)
def one_step(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
b, n, _ = data.shape
if begin_state is None:
begin_state = torch.zeros(b, n, self.hidden_size, dtype=data.dtype, device=data.device)
data_and_state = torch.cat([data, begin_state], -1)
zr = torch.sigmoid(self.dense_zr(feature, data_and_state))
z, r = zr.split(self.hidden_size, -1)
state = z * begin_state + (1 - z) * torch.tanh(self.dense_i2h(feature, data) + self.dense_h2h(feature, r * begin_state))
# c = torch.tanh(self.dense_i2h(feature, data))
# h = self.dense_h2h(feature, r * begin_state)
#
# state = z * begin_state + torch.sub(1., z) * c + h
return state, state
class NormalGRUCell(RNNCell):
def __init__(self, f_in: int, hid_size: int):
super(NormalGRUCell, self).__init__()
self.cell = nn.GRUCell(f_in, hid_size)
def one_step(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
b, n, _ = data.shape
data = data.reshape(b * n, -1)
if begin_state is not None:
begin_state = begin_state.reshape(b * n, -1)
h = self.cell(data, begin_state)
h = h.reshape(b, n, -1)
return h, h
import sys
class GraphAttNet(nn.Module):
def __init__(self, dist: np.ndarray, edge: list, hid_size: int, feat_size: int, meta_hiddens: List[int]):
super(GraphAttNet, self).__init__()
self.hidden_size = hid_size
self.feature_size = feat_size
self.meta_hiddens = meta_hiddens
self.num_nodes = n = dist.shape[0]
src, dst, dis = list(), list(), list()
for i in range(n):
for j in edge[i]:
src.append(j)
dst.append(i)
dis.append(dist[j, i])
dist = torch.tensor(dis).unsqueeze_(1)
g = DGLGraph()
g.set_n_initializer(init.zero_initializer)
g.add_nodes(n)
g.add_edges(src, dst, {'dist': dist})
self.graph = g
def forward(self, state: Tensor, feature: Tensor) -> Tensor:
"""
:param state: tensor, [B, T, N, F] or [B, N, F]
:param feature: tensor, [N, F]
:return: tensor, [B, T, N, F]
"""
# print(state.shape)
# torch.Size([32, 12, 207, 32])
# shape => [N, B, T, F] or [N, B, F]
state = state.unsqueeze(0).transpose(0, -2).squeeze(-2)
g = self.graph.local_var()
g.to(state.device)
g.ndata['state'] = state
g.ndata['feature'] = feature
g.update_all(self.msg_edge, self.msg_reduce)
state = g.ndata.pop('new_state')
# print(state.shape)
# torch.Size([207, 32, 12, 32])
# sys.exit(0)
return state.unsqueeze(-2).transpose(0, -2).squeeze(0)
def msg_edge(self, edge: dgl.EdgeBatch):
"""
:param edge: a dictionary of edge data.
edge.src['state'] and edge.dst['state']: hidden states of the nodes, with shape [e, b, t, d] or [e, b, d]
edge.src['feature'] and edge.dst['state']: features of the nodes, with shape [e, d]
edge.data['dist']: distance matrix of the edges, with shape [e, d]
:return: a dictionray of messages
"""
raise NotImplementedError('Not implemented.')
def msg_reduce(self, node: dgl.NodeBatch):
"""
:param node:
node.mailbox['state'], tensor, [n, e, b, t, d] or [n, e, b, d]
node.mailbox['alpha'], tensor, [n, e, b, t, d] or [n, e, b, d]
:return: tensor, [n, b, t, d] or [n, b, d]
"""
raise NotImplementedError('Not implemented.')
class MetaGAT(GraphAttNet):
def __init__(self, *args, **kwargs):
super(MetaGAT, self).__init__(*args, **kwargs)
self.w_mlp = MultiLayerPerception(
[self.feature_size * 2 + 1] + self.meta_hiddens + [self.hidden_size * 2 * self.hidden_size],
nn.Sigmoid, False)
self.act = nn.LeakyReLU()
self.weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
def msg_edge(self, edge: dgl.EdgeBatch):
state = torch.cat([edge.src['state'], edge.dst['state']], -1) # [X, B, T, 2H] or [X, B, 2H]
feature = torch.cat([edge.src['feature'], edge.dst['feature'], edge.data['dist']], -1) # [X, 2F + 1]
weight = self.w_mlp(feature).reshape(-1, self.hidden_size * 2, self.hidden_size) # [X, 2H, H]
shape = state.shape
state = state.reshape(shape[0], -1, shape[-1])
# [X, ?, 2H] * [X. 2H, H] => [X, ?, H]
alpha = self.act(torch.bmm(state, weight))
alpha = alpha.reshape(*shape[:-1], self.hidden_size)
return {'alpha': alpha, 'state': edge.src['state']}
def msg_reduce(self, node: dgl.NodeBatch):
state = node.mailbox['state']
alpha = node.mailbox['alpha']
alpha = torch.softmax(alpha, 1)
new_state = torch.relu(torch.sum(alpha * state, dim=1)) * torch.sigmoid(self.weight)
return {'new_state': new_state}
class STMetaEncoder(nn.Module):
def __init__(self, input_dim: int, rnn_types: List[str], rnn_hiddens: List[int], feat_size: int,
meta_hiddens: List[int], graph: Tuple[np.ndarray, list, list]):
super(STMetaEncoder, self).__init__()
dist, e_in, e_out = graph
grus, gats = list(), list()
rnn_hiddens = [input_dim] + rnn_hiddens
for i, rnn_type in enumerate(rnn_types):
in_dim, out_dim = rnn_hiddens[i], rnn_hiddens[i + 1]
if rnn_type == 'NormalGRU':
grus.append(NormalGRUCell(in_dim, out_dim))
elif rnn_type == 'MetaGRU':
grus.append(MetaGRUCell(in_dim, out_dim, feat_size, meta_hiddens))
else:
raise ValueError(f'{rnn_type} is not implemented.')
if i == len(rnn_types) - 1:
break
g1 = MetaGAT(dist.T, e_in, out_dim, feat_size, meta_hiddens)
g2 = MetaGAT(dist, e_out, out_dim, feat_size, meta_hiddens)
gats.append(nn.ModuleList([g1, g2]))
self.grus = nn.ModuleList(grus)
self.gats = nn.ModuleList(gats)
def forward(self, feature: Tensor, data: Tensor) -> List[Tensor]:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, T, N, F]
:return: list of tensors
"""
states = list()
for depth, (g1, g2) in enumerate(self.gats):
data, state = self.grus[depth](feature, data)
states.append(state)
data = g1(data, feature) + g2(data, feature)
else:
_, state = self.grus[-1](feature, data)
states.append(state)
return states
class STMetaDecoder(nn.Module):
def __init__(self, n_preds: int, output_dim: int, rnn_types: List[str], rnn_hiddens: List[int], feat_size: int,
meta_hiddens: List[int], graph: Tuple[np.ndarray, list, list], input_dim):
super(STMetaDecoder, self).__init__()
self.output_dim = output_dim
self.n_preds = n_preds
dist, e_in, e_out = graph
grus, gats = list(), list()
# rnn_hiddens = [output_dim] + rnn_hiddens
rnn_hiddens = [input_dim] + rnn_hiddens
self.input_dim = input_dim
for i, rnn_type in enumerate(rnn_types):
in_dim, out_dim = rnn_hiddens[i], rnn_hiddens[i + 1]
if rnn_type == 'NormalGRU':
grus.append(NormalGRUCell(in_dim, out_dim))
elif rnn_type == 'MetaGRU':
grus.append(MetaGRUCell(in_dim, out_dim, feat_size, meta_hiddens))
else:
raise ValueError(f'{rnn_type} is not implemented.')
if i == len(rnn_types) - 1:
break
g1 = MetaGAT(dist.T, e_in, out_dim, feat_size, meta_hiddens)
g2 = MetaGAT(dist, e_out, out_dim, feat_size, meta_hiddens)
gats.append(nn.ModuleList([g1, g2]))
self.grus = nn.ModuleList(grus)
self.gats = nn.ModuleList(gats)
self.out = nn.Linear(rnn_hiddens[1], output_dim)
# def sampling(self):
# """ Schedule sampling: sampling the ground truth. """
# threshold = self.cl_decay_steps / (self.cl_decay_steps + math.exp(self.global_steps / self.cl_decay_steps))
# return float(random.random() < threshold)
def forward(self, feature: Tensor, begin_states: List[Tensor], targets: Tensor = None,
teacher_force: bool = 0.5) -> Tensor:
"""
:param feature: tensor, [N, F]
:param begin_states: list of tensors, each of [B, N, hidden_size]
:param targets: none or tensor, [B, T, N, input_dim]
:param teacher_force: float, random to use targets as decoder inputs
:return:
"""
b, n, _ = begin_states[0].shape
aux = targets[:,:,:, self.output_dim:] # [b,t,n,d]
label = targets[:,:,:, :self.output_dim] # [b,t,n,d]
go = torch.zeros(b, n, self.input_dim, device=feature.device, dtype=feature.dtype)
# outputs = list()
outputs, states = [], begin_states
for i_pred in range(self.n_preds):
if i_pred == 0:
inputs = go
for depth, (g1, g2) in enumerate(self.gats):
inputs, states[0] = self.grus[depth].one_step(feature, inputs, states[0])
inputs = (g1(inputs, feature) + g2(inputs, feature)) / 2
else:
# print(len(self.grus), len(states))
inputs, states[1] = self.grus[-1].one_step(feature, inputs, states[1])
inputs = self.out(inputs)
outputs.append(inputs)
if self.training and (targets is not None) and (random.random() < teacher_force):
# inputs = targets[:, i_pred]
inputs = label[:, i_pred]
inputs = torch.cat([inputs, aux[:, i_pred, :, :]], -1)
return torch.stack(outputs, 1)
class STMetaNet(nn.Module):
def __init__(self,
graph: Tuple[np.ndarray, list, list],
n_preds: int,
input_dim: int,
output_dim: int,
cl_decay_steps: int,
rnn_types: List[str],
rnn_hiddens: List[int],
meta_hiddens: List[int],
geo_hiddens: List[int]):
super(STMetaNet, self).__init__()
feat_size = geo_hiddens[-1]
self.cl_decay_steps = cl_decay_steps
self.encoder = STMetaEncoder(input_dim, rnn_types, rnn_hiddens, feat_size, meta_hiddens, graph)
self.decoder = STMetaDecoder(n_preds, output_dim, rnn_types, rnn_hiddens, feat_size, meta_hiddens, graph, input_dim)
self.geo_encoder = MultiLayerPerception(geo_hiddens, hidden_act=nn.ReLU, out_act=True)
features = graph[0]
self.num_nodes = features.shape[0]
# self.num_nodes = 500
self.input_dim = input_dim
self.output_dim = output_dim
self.seq_len = 12
self.horizon = n_preds
def forward(self, feature: Tensor, inputs: Tensor, targets: Tensor = None, batch_seen: int = 0) -> Tensor:
"""
dynamic convolutional recurrent neural network
:param feature: [N, d]
:param inputs: [B, n_hist, N, input_dim]
:param targets: exists for training, tensor, [B, n_pred, N, output_dim]
:param batch_seen: int, the number of batches the model has seen
:return: [B, n_pred, N, output_dim]
"""
inputs = inputs.view(self.seq_len, -1, self.num_nodes, self.input_dim).permute(1, 0, 2, 3).contiguous()
targets = targets.view(self.horizon, -1, self.num_nodes, self.input_dim).permute(1, 0, 2, 3).contiguous()
feature = self.geo_encoder(feature.float())
states = self.encoder(feature, inputs)
# targets = None
outputs = self.decoder(feature, states, targets, self._compute_sampling_threshold(batch_seen))
return outputs.permute(1, 0, 2, 3).contiguous().view(self.horizon, -1, self.num_nodes * self.output_dim)
def _compute_sampling_threshold(self, batches_seen: int):
return self.cl_decay_steps / (self.cl_decay_steps + math.exp(batches_seen / self.cl_decay_steps))
# :return: x: shape (seq_len, batch_size, num_sensor * input_dim)
# y: shape (horizon, batch_size, num_sensor * output_dim)
# def test():
# dist = np.random.randn(207, 207)
# edge1, edge2 = [[] for _ in range(207)], [[] for _ in range(207)]
# for i in range(207):
# for j in range(207):
# if np.random.random() < 0.2:
# edge1[i].append(j)
# edge2[j].append(i)
# me = STMetaEncoder(2, 32, 32, 32, [32, 4], (dist, edge1, edge2), 32)
# md = STMetaDecoder(12, 1, 32, 32, 32, [32, 4], (dist, edge1, edge2), 32)
# data = torch.randn(31, 12, 207, 2)
# feature = torch.randn(207, 32)
# states = me(feature, data)
# print(states[0].shape, states[1].shape)
# outputs = md(feature, states)
# m = STMetaNet((dist, edge1, edge2), 12, 2, 1, 2000, ['NormalGRU', 'MetaGRU'], [], [16, 2], [32, 32])
| 29,634 | 40.331939 | 128 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ST-MetaNet/model/pytorch/dcrnn_cell.py | import numpy as np
import torch
from lib import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: torch.nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = torch.nn.Parameter(torch.empty(*shape, device=device))
torch.nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = torch.nn.Parameter(torch.empty(length, device=device))
torch.nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class DCGRUCell(torch.nn.Module):
def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, nonlinearity='tanh',
filter_type="laplacian", use_gc_for_ru=True):
"""
:param num_units:
:param adj_mx:
:param max_diffusion_step:
:param num_nodes:
:param nonlinearity:
:param filter_type: "laplacian", "random_walk", "dual_random_walk".
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super().__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
# support other nonlinearities up here?
self._num_nodes = num_nodes
self._num_units = num_units
self._max_diffusion_step = max_diffusion_step
self._supports = []
self._use_gc_for_ru = use_gc_for_ru
supports = []
if filter_type == "laplacian":
supports.append(utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
elif filter_type == "random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
elif filter_type == "dual_random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
else:
supports.append(utils.calculate_scaled_laplacian(adj_mx))
for support in supports:
self._supports.append(self._build_sparse_matrix(support))
self._fc_params = LayerParams(self, 'fc')
self._gconv_params = LayerParams(self, 'gconv')
@staticmethod
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
# this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
return L
def forward(self, inputs, hx):
"""Gated recurrent unit (GRU) with Graph Convolution.
:param inputs: (B, num_nodes * input_dim)
:param hx: (B, num_nodes * rnn_units)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`.
"""
output_size = 2 * self._num_units
if self._use_gc_for_ru:
fn = self._gconv
else:
fn = self._fc
value = torch.sigmoid(fn(inputs, hx, output_size, bias_start=1.0))
value = torch.reshape(value, (-1, self._num_nodes, output_size))
r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1)
r = torch.reshape(r, (-1, self._num_nodes * self._num_units))
u = torch.reshape(u, (-1, self._num_nodes * self._num_units))
c = self._gconv(inputs, r * hx, self._num_units)
if self._activation is not None:
c = self._activation(c)
new_state = u * hx + (1.0 - u) * c
return new_state
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _fc(self, inputs, state, output_size, bias_start=0.0):
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1))
state = torch.reshape(state, (batch_size * self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=-1)
input_size = inputs_and_state.shape[-1]
weights = self._fc_params.get_weights((input_size, output_size))
value = torch.sigmoid(torch.matmul(inputs_and_state, weights))
biases = self._fc_params.get_biases(output_size, bias_start)
value += biases
return value
def _gconv(self, inputs, state, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
state = torch.reshape(state, (batch_size, self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs_and_state.size(2)
x = inputs_and_state
x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size)
x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
x = torch.unsqueeze(x0, 0)
if self._max_diffusion_step == 0:
pass
else:
for support in self._supports:
x1 = torch.sparse.mm(support, x0)
x = self._concat(x, x1)
for k in range(2, self._max_diffusion_step + 1):
x2 = 2 * torch.sparse.mm(support, x1) - x0
x = self._concat(x, x2)
x1, x0 = x2, x1
num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself.
x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size])
x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order)
x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices])
weights = self._gconv_params.get_weights((input_size * num_matrices, output_size))
x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size)
biases = self._gconv_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
| 6,939 | 41.576687 | 105 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ST-MetaNet/model/pytorch/utils.py | import torch
import numpy as np
def masked_mae_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def get_normalized_adj(A):
"""
Returns the degree normalized adjacency matrix.
"""
A = A + np.diag(np.ones(A.shape[0], dtype=np.float32))
D = np.array(np.sum(A, axis=1)).reshape((-1,))
D[D <= 10e-5] = 10e-5 # Prevent infs
diag = np.reciprocal(np.sqrt(D))
A_wave = np.multiply(np.multiply(diag.reshape((-1, 1)), A),
diag.reshape((1, -1)))
return A_wave | 2,390 | 30.051948 | 88 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/ST-MetaNet/model/pytorch/dcrnn_supervisor.py | import os
import time
import numpy as np
import torch
import torch.nn as nn
# from torch.utils.tensorboard import SummaryWriter
from lib import utils
# from model.pytorch.dcrnn_model import DCRNNModel
from model.pytorch.dcrnn_model import STMetaNet
from model.pytorch.utils import masked_mae_loss, metric, get_normalized_adj
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DCRNNSupervisor:
def __init__(self, data_type, LOAD_INITIAL, adj_mx, **kwargs):
self._kwargs = kwargs
self._data_kwargs = kwargs.get('data')
self._model_kwargs = kwargs.get('model')
self._train_kwargs = kwargs.get('train')
self.max_grad_norm = self._train_kwargs.get('max_grad_norm', 1.)
# logging.
self._log_dir = self._get_log_dir(kwargs)
# self._writer = SummaryWriter('runs/' + self._log_dir)
log_level = self._kwargs.get('log_level', 'INFO')
self._logger = utils.get_logger(self._log_dir, __name__, 'info.log', level=log_level)
# data set
self._data = utils.load_dataset(**self._data_kwargs)
self.standard_scaler = self._data['scaler']
self.num_nodes = int(self._model_kwargs.get('num_nodes', 1))
self.input_dim = int(self._model_kwargs.get('input_dim', 1))
self.seq_len = int(self._model_kwargs.get('seq_len')) # for the encoder
self.output_dim = int(self._model_kwargs.get('output_dim', 1))
self.use_curriculum_learning = bool(
self._model_kwargs.get('use_curriculum_learning', False))
self.horizon = int(self._model_kwargs.get('horizon', 1)) # for the decoder
# features, (dist, e_in_out, e_in_out) = np.load('./data/feat_stmetanet.npy', allow_pickle=True)
# features, (dist, e_in_out, e_in_out) = np.load('./data/feat_stmetanet_metrla.npy', allow_pickle=True)
features, (dist, e_in_out, e_in_out) = np.load('./data/feat_stmetanet_BJ500.npy', allow_pickle=True)
# features, (dist, e_in_out, e_in_out) = np.load('/home/lifuxian/BikeNYC/feat_stmetanet.npy', allow_pickle=True)
self.features = torch.from_numpy(features).to(device)
# setup model
# dcrnn_model = DCRNNModel(adj_mx, self._logger, **self._model_kwargs)
dcrnn_model = STMetaNet(
graph = (dist, e_in_out, e_in_out),#Tuple[np.ndarray, list, list],
n_preds = self.horizon,
input_dim = self.input_dim,
output_dim = self.output_dim,
cl_decay_steps = 2000,
rnn_types = ['NormalGRU', 'MetaGRU'],
# rnn_types = ['NormalGRU', 'NormalGRU'],
rnn_hiddens = [32, 32],
meta_hiddens = [16, 2],
# geo_hiddens = [20, 32, 32]
# geo_hiddens = [20, 32, 32] #list的首个元素表示features的维度(20维)
geo_hiddens = [11, 32, 32] # list的首个元素表示features的维度(11维)
)
self.dcrnn_model = dcrnn_model.cuda() if torch.cuda.is_available() else dcrnn_model
self._logger.info("Model created")
self._epoch_num = self._train_kwargs.get('epoch', 0)
# if self._epoch_num > 0: #事实上self._epoch_num的预设值确实为0
# self.load_model()
self.data_type = data_type
self.LOAD_INITIAL = LOAD_INITIAL
if LOAD_INITIAL:
self.load_lfx()
# self.features = torch.from_numpy(get_normalized_adj(adj_mx)).to(device)
@staticmethod
def _get_log_dir(kwargs):
log_dir = kwargs['train'].get('log_dir')
if log_dir is None:
batch_size = kwargs['data'].get('batch_size')
learning_rate = kwargs['train'].get('base_lr')
max_diffusion_step = kwargs['model'].get('max_diffusion_step')
num_rnn_layers = kwargs['model'].get('num_rnn_layers')
rnn_units = kwargs['model'].get('rnn_units')
structure = '-'.join(
['%d' % rnn_units for _ in range(num_rnn_layers)])
horizon = kwargs['model'].get('horizon')
filter_type = kwargs['model'].get('filter_type')
filter_type_abbr = 'L'
if filter_type == 'random_walk':
filter_type_abbr = 'R'
elif filter_type == 'dual_random_walk':
filter_type_abbr = 'DR'
run_id = 'dcrnn_%s_%d_h_%d_%s_lr_%g_bs_%d_%s/' % (
filter_type_abbr, max_diffusion_step, horizon,
structure, learning_rate, batch_size,
time.strftime('%m%d%H%M%S'))
base_dir = kwargs.get('base_dir')
log_dir = os.path.join(base_dir, run_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
# def save_model(self, epoch):
# if not os.path.exists('models/'):
# os.makedirs('models/')
#
# config = dict(self._kwargs)
# config['model_state_dict'] = self.dcrnn_model.state_dict()
# config['epoch'] = epoch
# torch.save(config, 'models/epo%d.tar' % epoch)
# self._logger.info("Saved model at {}".format(epoch))
# return 'models/epo%d.tar' % epoch
def save_model(self, epoch):
path = 'models/%s_best.tar' % self.data_type
if not os.path.exists('models/'):
os.makedirs('models/')
config = dict(self._kwargs)
config['model_state_dict'] = self.dcrnn_model.state_dict()
config['epoch'] = epoch
torch.save(config, path)
self._logger.info("Saved model at {}".format(epoch))
return path
# def load_model(self):
# self._setup_graph()
# assert os.path.exists('models/epo%d.tar' % self._epoch_num), 'Weights at epoch %d not found' % self._epoch_num
# checkpoint = torch.load('models/epo%d.tar' % self._epoch_num, map_location='cpu')
# self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
# self._logger.info("Loaded model at {}".format(self._epoch_num))
def load_lfx(self):
path = 'models/%s_best.tar' % self.data_type
# self._setup_graph()
assert os.path.exists(path), 'Weights not found'
checkpoint = torch.load(path, map_location='cpu')
self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
self._logger.info("Loaded model successfully!")
self._epoch_num = checkpoint['epoch']
def _setup_graph(self):
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['val_loader'].get_iterator()
for _, (x, y) in enumerate(val_iterator):
x, y, target = self._prepare_data(x, y)
output = self.dcrnn_model(x) #为何要这步处理??
break
def train(self, **kwargs):
kwargs.update(self._train_kwargs)
return self._train(**kwargs)
def evaluate(self, dataset='val', batches_seen=0):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y, target = self._prepare_data(x, y)
# output = self.dcrnn_model(x)
output = self.dcrnn_model(self.features, x, target, batches_seen)
loss = self._compute_loss(y, output)
losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
mean_loss = np.mean(losses)
# self._writer.add_scalar('{} loss'.format(dataset), mean_loss, batches_seen)
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_truths_scaled = []
y_preds_scaled = []
for t in range(y_preds.shape[0]):
# y_truth = self.standard_scaler.inverse_transform(y_truths[t])
y_pred = self.standard_scaler.inverse_transform(y_preds[t])
# y_truths_scaled.append(y_truth)
y_truths_scaled.append(y_truths[t])
y_preds_scaled.append(y_pred)
return mean_loss, {'prediction': y_preds_scaled, 'truth': y_truths_scaled}
def evaluate_test(self, dataset='test'):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
# losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y, target = self._prepare_data(x, y)
# output = self.dcrnn_model(x)
output = self.dcrnn_model(self.features, x, target)
# losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
# mean_loss = np.mean(losses)
# y_preds = np.concatenate(y_preds, axis=1)
# y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_preds = torch.cat(y_preds, dim=1)
y_truths = torch.cat(y_truths, dim=1) # concatenate on batch dimension
# y_truths_scaled = []
# y_preds_scaled = []
for t in range(y_preds.shape[0]):
# y_truth = self.standard_scaler.inverse_transform(y_truths[t])
# y_pred = self.standard_scaler.inverse_transform(y_preds[t])
# y_truths_scaled.append(y_truth)
# y_preds_scaled.append(y_pred)
# loss = self._compute_loss(y_truths[t], y_preds[t])
# log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}'
# print(log.format(t + 1, loss.item()))
metrics = self._compute_metrics(y_truths[t], y_preds[t])
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(t + 1, metrics[0], metrics[1], metrics[2]))
def _train(self, base_lr,
steps, patience=50, epochs=100, lr_decay_ratio=0.1, log_every=1, save_model=1,
test_every_n_epochs=10, epsilon=1e-8, **kwargs):
# steps is used in learning rate - will see if need to use it?
if self.LOAD_INITIAL:
min_val_loss, _ = self.evaluate(dataset='val')
else:
min_val_loss = float('inf')
wait = 0
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=steps,
gamma=lr_decay_ratio)
self._logger.info('Start training ...')
# this will fail if model is loaded with a changed batch_size
num_batches = self._data['train_loader'].num_batch
self._logger.info("num_batches:{}".format(num_batches))
batches_seen = num_batches * self._epoch_num
epochs = 1000
for epoch_num in range(self._epoch_num, epochs):
self.dcrnn_model = self.dcrnn_model.train()
train_iterator = self._data['train_loader'].get_iterator()
losses = []
start_time = time.time()
for _, (x, y) in enumerate(train_iterator):
optimizer.zero_grad()
x, y, target = self._prepare_data(x, y)
# output = self.dcrnn_model(x, y, batches_seen)
output = self.dcrnn_model(self.features, x, target, batches_seen)
if batches_seen == 0:
# this is a workaround to accommodate dynamically registered parameters in DCGRUCell
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
# loss = self._compute_loss(y, output)
loss = self._compute_loss(y, output)
self._logger.debug(loss.item())
losses.append(loss.item())
batches_seen += 1
loss.backward()
# gradient clipping - this does it in place
torch.nn.utils.clip_grad_norm_(self.dcrnn_model.parameters(), self.max_grad_norm)
optimizer.step()
self._logger.info("epoch complete")
lr_scheduler.step()
self._logger.info("evaluating now!")
val_loss, _ = self.evaluate(dataset='val', batches_seen=batches_seen)
end_time = time.time()
# self._writer.add_scalar('training loss',
# np.mean(losses),
# batches_seen)
if (epoch_num % log_every) == log_every - 1:
message = 'Epoch [{}/{}] ({}) train_loss: {:.4f}, val_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), val_loss, lr_scheduler.get_lr()[0],
(end_time - start_time))
self._logger.info(message)
# if (epoch_num % test_every_n_epochs) == test_every_n_epochs - 1:
# test_loss, _ = self.evaluate(dataset='test', batches_seen=batches_seen)
# message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, test_mae: {:.4f}, lr: {:.6f}, ' \
# '{:.1f}s'.format(epoch_num, epochs, batches_seen,
# np.mean(losses), test_loss, lr_scheduler.get_lr()[0],
# (end_time - start_time))
# self._logger.info(message)
if val_loss < min_val_loss:
wait = 0
if save_model:
model_file_name = self.save_model(epoch_num)
self._logger.info(
'Val loss decrease from {:.4f} to {:.4f}, '
'saving to {}'.format(min_val_loss, val_loss, model_file_name))
min_val_loss = val_loss
elif val_loss >= min_val_loss:
wait += 1
if wait == patience:
self._logger.warning('Early stopping at epoch: %d' % epoch_num)
break
self.load_lfx()
self.evaluate_test(dataset='test')
def _prepare_data(self, x, y):
x, y = self._get_x_y(x, y)
x, y, target = self._get_x_y_in_correct_dims(x, y)
return x.to(device), y.to(device), target.to(device)
def _get_x_y(self, x, y):
"""
:param x: shape (batch_size, seq_len, num_sensor, input_dim)
:param y: shape (batch_size, horizon, num_sensor, input_dim)
:returns x shape (seq_len, batch_size, num_sensor, input_dim)
y shape (horizon, batch_size, num_sensor, input_dim)
"""
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).float()
self._logger.debug("X: {}".format(x.size()))
self._logger.debug("y: {}".format(y.size()))
x = x.permute(1, 0, 2, 3)
y = y.permute(1, 0, 2, 3)
return x, y
def _get_x_y_in_correct_dims(self, x, y):
"""
:param x: shape (seq_len, batch_size, num_sensor, input_dim)
:param y: shape (horizon, batch_size, num_sensor, input_dim)
:return: x: shape (seq_len, batch_size, num_sensor * input_dim)
y: shape (horizon, batch_size, num_sensor * output_dim)
"""
batch_size = x.size(1)
x = x.view(self.seq_len, batch_size, self.num_nodes * self.input_dim)
target = torch.cat([self.standard_scaler.transform(y[..., :1]), y[..., 1:]], -1).view(self.horizon, batch_size,
self.num_nodes * self.input_dim)
y = y[..., :self.output_dim].view(self.horizon, batch_size,
self.num_nodes * self.output_dim)
return x, y, target
def _compute_loss(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mae_loss(y_predicted, y_true)
def _compute_loss_mse(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return nn.MSELoss()(y_predicted, y_true)
def _compute_metrics(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return metric(y_predicted, y_true) | 17,117 | 40.853301 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DCRNN/dcrnn_train_pytorch.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
import setproctitle
setproctitle.setproctitle("dcrnn@lifuxian")
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
data_type = args.config_filename.split('/')[-1].split('.')[0].split('_')[-1] #'bay' or 'la'
supervisor = DCRNNSupervisor(data_type = data_type, LOAD_INITIAL = args.LOAD_INITIAL, adj_mx=adj_mx, **supervisor_config)
if args.TEST_ONLY:
supervisor.evaluate_test()
else:
supervisor.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default=None, type=str,
help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
parser.add_argument('--LOAD_INITIAL', default=False, type=bool, help='If LOAD_INITIAL.')
parser.add_argument('--TEST_ONLY', default=False, type=bool, help='If TEST_ONLY.')
args = parser.parse_args()
main(args)
| 1,455 | 38.351351 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DCRNN/run_demo_pytorch.py | import argparse
import numpy as np
import os
import sys
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
def run_dcrnn(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
mean_score, outputs = supervisor.evaluate('test')
np.savez_compressed(args.output_filename, **outputs)
print("MAE : {}".format(mean_score))
print('Predictions saved as {}.'.format(args.output_filename))
if __name__ == '__main__':
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument('--use_cpu_only', default=False, type=str, help='Whether to run tensorflow on cpu.')
parser.add_argument('--config_filename', default='data/model/pretrained/METR-LA/config.yaml', type=str,
help='Config file for pretrained model.')
parser.add_argument('--output_filename', default='data/dcrnn_predictions.npz')
args = parser.parse_args()
run_dcrnn(args)
| 1,264 | 36.205882 | 108 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DCRNN/model/pytorch/dcrnn_model.py | import numpy as np
import torch
import torch.nn as nn
from model.pytorch.dcrnn_cell import DCGRUCell
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class Seq2SeqAttrs:
def __init__(self, adj_mx, **model_kwargs):
self.adj_mx = adj_mx
self.max_diffusion_step = int(model_kwargs.get('max_diffusion_step', 2))
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.filter_type = model_kwargs.get('filter_type', 'laplacian')
self.num_nodes = int(model_kwargs.get('num_nodes', 1))
self.num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))
self.rnn_units = int(model_kwargs.get('rnn_units'))
self.hidden_state_size = self.num_nodes * self.rnn_units
class EncoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.input_dim = int(model_kwargs.get('input_dim', 1))
self.seq_len = int(model_kwargs.get('seq_len')) # for the encoder
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Encoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.input_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.hidden_state_size)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
batch_size, _ = inputs.size()
if hidden_state is None:
hidden_state = torch.zeros((self.num_rnn_layers, batch_size, self.hidden_state_size),
device=device)
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
return output, torch.stack(hidden_states) # runs in O(num_layers) so not too slow
class DecoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
# super().__init__(is_training, adj_mx, **model_kwargs)
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.output_dim = int(model_kwargs.get('output_dim', 1))
self.horizon = int(model_kwargs.get('horizon', 1)) # for the decoder
self.projection_layer = nn.Linear(self.rnn_units, self.output_dim)
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Decoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.output_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.num_nodes * self.output_dim)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
projected = self.projection_layer(output.view(-1, self.rnn_units))
output = projected.view(-1, self.num_nodes * self.output_dim)
return output, torch.stack(hidden_states)
class DCRNNModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, logger, **model_kwargs):
super().__init__()
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.encoder_model = EncoderModel(adj_mx, **model_kwargs)
self.decoder_model = DecoderModel(adj_mx, **model_kwargs)
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.use_curriculum_learning = bool(model_kwargs.get('use_curriculum_learning', False))
self._logger = logger
def _compute_sampling_threshold(self, batches_seen):
return self.cl_decay_steps / (
self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps))
def encoder(self, inputs):
"""
encoder forward pass on t time steps
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:return: encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
"""
encoder_hidden_state = None
for t in range(self.encoder_model.seq_len):
_, encoder_hidden_state = self.encoder_model(inputs[t], encoder_hidden_state)
return encoder_hidden_state
def decoder(self, encoder_hidden_state, labels=None, batches_seen=None):
"""
Decoder forward pass
:param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
:param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference]
:param batches_seen: global step [optional, not exist for inference]
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
batch_size = encoder_hidden_state.size(1)
go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim),
device=device)
decoder_hidden_state = encoder_hidden_state
decoder_input = go_symbol
outputs = []
for t in range(self.decoder_model.horizon):
decoder_output, decoder_hidden_state = self.decoder_model(decoder_input,
decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
if self.training and self.use_curriculum_learning:
c = np.random.uniform(0, 1)
if c < self._compute_sampling_threshold(batches_seen):
decoder_input = labels[t]
outputs = torch.stack(outputs)
return outputs
def forward(self, inputs, labels=None, batches_seen=None):
"""
seq2seq forward pass
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:param labels: shape (horizon, batch_size, num_sensor * output)
:param batches_seen: batches seen till now
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
encoder_hidden_state = self.encoder(inputs)
self._logger.debug("Encoder complete, starting decoder")
outputs = self.decoder(encoder_hidden_state, labels, batches_seen=batches_seen)
self._logger.debug("Decoder complete")
if batches_seen == 0:
self._logger.info(
"Total trainable parameters {}".format(count_parameters(self))
)
return outputs
| 7,642 | 44.494048 | 119 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DCRNN/model/pytorch/loss.py | import torch
def masked_mae_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
| 309 | 24.833333 | 88 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DCRNN/model/pytorch/dcrnn_cell.py | import numpy as np
import torch
from lib import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: torch.nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = torch.nn.Parameter(torch.empty(*shape, device=device))
torch.nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = torch.nn.Parameter(torch.empty(length, device=device))
torch.nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class DCGRUCell(torch.nn.Module):
def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, nonlinearity='tanh',
filter_type="laplacian", use_gc_for_ru=True):
"""
:param num_units:
:param adj_mx:
:param max_diffusion_step:
:param num_nodes:
:param nonlinearity:
:param filter_type: "laplacian", "random_walk", "dual_random_walk".
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super().__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
# support other nonlinearities up here?
self._num_nodes = num_nodes
self._num_units = num_units
self._max_diffusion_step = max_diffusion_step
self._supports = []
self._use_gc_for_ru = use_gc_for_ru
supports = []
if filter_type == "laplacian":
supports.append(utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
elif filter_type == "random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
elif filter_type == "dual_random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
else:
supports.append(utils.calculate_scaled_laplacian(adj_mx))
for support in supports:
self._supports.append(self._build_sparse_matrix(support))
self._fc_params = LayerParams(self, 'fc')
self._gconv_params = LayerParams(self, 'gconv')
@staticmethod
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
# this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
return L
def forward(self, inputs, hx):
"""Gated recurrent unit (GRU) with Graph Convolution.
:param inputs: (B, num_nodes * input_dim)
:param hx: (B, num_nodes * rnn_units)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`.
"""
output_size = 2 * self._num_units
if self._use_gc_for_ru:
fn = self._gconv
else:
fn = self._fc
value = torch.sigmoid(fn(inputs, hx, output_size, bias_start=1.0))
value = torch.reshape(value, (-1, self._num_nodes, output_size))
r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1)
r = torch.reshape(r, (-1, self._num_nodes * self._num_units))
u = torch.reshape(u, (-1, self._num_nodes * self._num_units))
c = self._gconv(inputs, r * hx, self._num_units)
if self._activation is not None:
c = self._activation(c)
new_state = u * hx + (1.0 - u) * c
return new_state
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _fc(self, inputs, state, output_size, bias_start=0.0):
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1))
state = torch.reshape(state, (batch_size * self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=-1)
input_size = inputs_and_state.shape[-1]
weights = self._fc_params.get_weights((input_size, output_size))
value = torch.sigmoid(torch.matmul(inputs_and_state, weights))
biases = self._fc_params.get_biases(output_size, bias_start)
value += biases
return value
def _gconv(self, inputs, state, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
state = torch.reshape(state, (batch_size, self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs_and_state.size(2)
x = inputs_and_state
x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size)
x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
x = torch.unsqueeze(x0, 0)
if self._max_diffusion_step == 0:
pass
else:
for support in self._supports:
x1 = torch.sparse.mm(support, x0)
x = self._concat(x, x1)
for k in range(2, self._max_diffusion_step + 1):
x2 = 2 * torch.sparse.mm(support, x1) - x0
x = self._concat(x, x2)
x1, x0 = x2, x1
num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself.
x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size])
x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order)
x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices])
weights = self._gconv_params.get_weights((input_size * num_matrices, output_size))
x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size)
biases = self._gconv_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
| 6,939 | 41.576687 | 105 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DCRNN/model/pytorch/utils.py | import torch
import numpy as np
def masked_mae_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def get_normalized_adj(A):
"""
Returns the degree normalized adjacency matrix.
"""
A = A + np.diag(np.ones(A.shape[0], dtype=np.float32))
D = np.array(np.sum(A, axis=1)).reshape((-1,))
D[D <= 10e-5] = 10e-5 # Prevent infs
diag = np.reciprocal(np.sqrt(D))
A_wave = np.multiply(np.multiply(diag.reshape((-1, 1)), A),
diag.reshape((1, -1)))
return A_wave | 2,390 | 30.051948 | 88 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/DCRNN/model/pytorch/dcrnn_supervisor.py | import os
import time
import numpy as np
import torch
# from torch.utils.tensorboard import SummaryWriter
from lib import utils
from model.pytorch.dcrnn_model import DCRNNModel
from model.pytorch.utils import masked_mae_loss, metric, get_normalized_adj
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DCRNNSupervisor:
def __init__(self, data_type, LOAD_INITIAL, adj_mx, **kwargs):
self._kwargs = kwargs
self._data_kwargs = kwargs.get('data')
self._model_kwargs = kwargs.get('model')
self._train_kwargs = kwargs.get('train')
self.max_grad_norm = self._train_kwargs.get('max_grad_norm', 1.)
# logging.
self._log_dir = self._get_log_dir(kwargs)
# self._writer = SummaryWriter('runs/' + self._log_dir)
log_level = self._kwargs.get('log_level', 'INFO')
self._logger = utils.get_logger(self._log_dir, __name__, 'info.log', level=log_level)
# data set
self._data = utils.load_dataset(**self._data_kwargs)
self.standard_scaler = self._data['scaler']
self.num_nodes = int(self._model_kwargs.get('num_nodes', 1))
self.input_dim = int(self._model_kwargs.get('input_dim', 1))
self.seq_len = int(self._model_kwargs.get('seq_len')) # for the encoder
self.output_dim = int(self._model_kwargs.get('output_dim', 1))
self.use_curriculum_learning = bool(
self._model_kwargs.get('use_curriculum_learning', False))
self.horizon = int(self._model_kwargs.get('horizon', 1)) # for the decoder
# setup model
dcrnn_model = DCRNNModel(adj_mx, self._logger, **self._model_kwargs)
self.dcrnn_model = dcrnn_model.cuda() if torch.cuda.is_available() else dcrnn_model
self._logger.info("Model created")
self._epoch_num = self._train_kwargs.get('epoch', 0)
# if self._epoch_num > 0: #事实上self._epoch_num的预设值确实为0
# self.load_model()
self.data_type = data_type
self.LOAD_INITIAL = LOAD_INITIAL
if LOAD_INITIAL:
self.load_lfx()
@staticmethod
def _get_log_dir(kwargs):
log_dir = kwargs['train'].get('log_dir')
if log_dir is None:
batch_size = kwargs['data'].get('batch_size')
learning_rate = kwargs['train'].get('base_lr')
max_diffusion_step = kwargs['model'].get('max_diffusion_step')
num_rnn_layers = kwargs['model'].get('num_rnn_layers')
rnn_units = kwargs['model'].get('rnn_units')
structure = '-'.join(
['%d' % rnn_units for _ in range(num_rnn_layers)])
horizon = kwargs['model'].get('horizon')
filter_type = kwargs['model'].get('filter_type')
filter_type_abbr = 'L'
if filter_type == 'random_walk':
filter_type_abbr = 'R'
elif filter_type == 'dual_random_walk':
filter_type_abbr = 'DR'
run_id = 'dcrnn_%s_%d_h_%d_%s_lr_%g_bs_%d_%s/' % (
filter_type_abbr, max_diffusion_step, horizon,
structure, learning_rate, batch_size,
time.strftime('%m%d%H%M%S'))
base_dir = kwargs.get('base_dir')
log_dir = os.path.join(base_dir, run_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
# def save_model(self, epoch):
# if not os.path.exists('models/'):
# os.makedirs('models/')
#
# config = dict(self._kwargs)
# config['model_state_dict'] = self.dcrnn_model.state_dict()
# config['epoch'] = epoch
# torch.save(config, 'models/epo%d.tar' % epoch)
# self._logger.info("Saved model at {}".format(epoch))
# return 'models/epo%d.tar' % epoch
def save_model(self, epoch):
path = 'models/%s_best.tar' % self.data_type
if not os.path.exists('models/'):
os.makedirs('models/')
config = dict(self._kwargs)
config['model_state_dict'] = self.dcrnn_model.state_dict()
config['epoch'] = epoch
torch.save(config, path)
self._logger.info("Saved model at {}".format(epoch))
return path
# def load_model(self):
# self._setup_graph()
# assert os.path.exists('models/epo%d.tar' % self._epoch_num), 'Weights at epoch %d not found' % self._epoch_num
# checkpoint = torch.load('models/epo%d.tar' % self._epoch_num, map_location='cpu')
# self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
# self._logger.info("Loaded model at {}".format(self._epoch_num))
def load_lfx(self):
path = 'models/%s_best.tar' % self.data_type
self._setup_graph()
assert os.path.exists(path), 'Weights not found'
checkpoint = torch.load(path, map_location='cpu')
self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
self._logger.info("Loaded model successfully!")
self._epoch_num = checkpoint['epoch']
def _setup_graph(self):
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['val_loader'].get_iterator()
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output = self.dcrnn_model(x) #为何要这步处理??
break
def train(self, **kwargs):
kwargs.update(self._train_kwargs)
return self._train(**kwargs)
def evaluate(self, dataset='val', batches_seen=0):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output = self.dcrnn_model(x)
loss = self._compute_loss(y, output)
losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
mean_loss = np.mean(losses)
# self._writer.add_scalar('{} loss'.format(dataset), mean_loss, batches_seen)
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_truths_scaled = []
y_preds_scaled = []
for t in range(y_preds.shape[0]):
y_truth = self.standard_scaler.inverse_transform(y_truths[t])
y_pred = self.standard_scaler.inverse_transform(y_preds[t])
y_truths_scaled.append(y_truth)
y_preds_scaled.append(y_pred)
return mean_loss, {'prediction': y_preds_scaled, 'truth': y_truths_scaled}
def evaluate_test(self, dataset='test'):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
# losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output = self.dcrnn_model(x)
# losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
# mean_loss = np.mean(losses)
# y_preds = np.concatenate(y_preds, axis=1)
# y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_preds = torch.cat(y_preds, dim=1)
y_truths = torch.cat(y_truths, dim=1) # concatenate on batch dimension
# y_truths_scaled = []
# y_preds_scaled = []
for t in range(y_preds.shape[0]):
# y_truth = self.standard_scaler.inverse_transform(y_truths[t])
# y_pred = self.standard_scaler.inverse_transform(y_preds[t])
# y_truths_scaled.append(y_truth)
# y_preds_scaled.append(y_pred)
# loss = self._compute_loss(y_truths[t], y_preds[t])
# log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}'
# print(log.format(t + 1, loss.item()))
metrics = self._compute_metrics(y_truths[t], y_preds[t])
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(t + 1, metrics[0], metrics[1], metrics[2]))
def _train(self, base_lr,
steps, patience=50, epochs=100, lr_decay_ratio=0.1, log_every=1, save_model=1,
test_every_n_epochs=10, epsilon=1e-8, **kwargs):
# steps is used in learning rate - will see if need to use it?
if self.LOAD_INITIAL:
min_val_loss, _ = self.evaluate(dataset='val')
else:
min_val_loss = float('inf')
wait = 0
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=steps,
gamma=lr_decay_ratio)
self._logger.info('Start training ...')
# this will fail if model is loaded with a changed batch_size
num_batches = self._data['train_loader'].num_batch
self._logger.info("num_batches:{}".format(num_batches))
batches_seen = num_batches * self._epoch_num
epochs = 1000
for epoch_num in range(self._epoch_num, epochs):
self.dcrnn_model = self.dcrnn_model.train()
train_iterator = self._data['train_loader'].get_iterator()
losses = []
start_time = time.time()
count_lfx = 0
for _, (x, y) in enumerate(train_iterator):
optimizer.zero_grad()
x, y = self._prepare_data(x, y)
output = self.dcrnn_model(x, y, batches_seen)
if batches_seen == 0:
# this is a workaround to accommodate dynamically registered parameters in DCGRUCell
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
loss = self._compute_loss(y, output)
self._logger.debug(loss.item())
losses.append(loss.item())
batches_seen += 1
loss.backward()
count_lfx+=1
# print(count_lfx)
# gradient clipping - this does it in place
torch.nn.utils.clip_grad_norm_(self.dcrnn_model.parameters(), self.max_grad_norm)
optimizer.step()
self._logger.info("epoch complete")
lr_scheduler.step()
self._logger.info("evaluating now!")
val_loss, _ = self.evaluate(dataset='val', batches_seen=batches_seen)
end_time = time.time()
# self._writer.add_scalar('training loss',
# np.mean(losses),
# batches_seen)
if (epoch_num % log_every) == log_every - 1:
message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, val_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), val_loss, lr_scheduler.get_lr()[0],
(end_time - start_time))
self._logger.info(message)
# if (epoch_num % test_every_n_epochs) == test_every_n_epochs - 1:
# test_loss, _ = self.evaluate(dataset='test', batches_seen=batches_seen)
# message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, test_mae: {:.4f}, lr: {:.6f}, ' \
# '{:.1f}s'.format(epoch_num, epochs, batches_seen,
# np.mean(losses), test_loss, lr_scheduler.get_lr()[0],
# (end_time - start_time))
# self._logger.info(message)
if val_loss < min_val_loss:
wait = 0
if save_model:
model_file_name = self.save_model(epoch_num)
self._logger.info(
'Val loss decrease from {:.4f} to {:.4f}, '
'saving to {}'.format(min_val_loss, val_loss, model_file_name))
min_val_loss = val_loss
elif val_loss >= min_val_loss:
wait += 1
if wait == patience:
self._logger.warning('Early stopping at epoch: %d' % epoch_num)
break
self.load_lfx()
self.evaluate_test(dataset='test')
def _prepare_data(self, x, y):
x, y = self._get_x_y(x, y)
x, y = self._get_x_y_in_correct_dims(x, y)
return x.to(device), y.to(device)
def _get_x_y(self, x, y):
"""
:param x: shape (batch_size, seq_len, num_sensor, input_dim)
:param y: shape (batch_size, horizon, num_sensor, input_dim)
:returns x shape (seq_len, batch_size, num_sensor, input_dim)
y shape (horizon, batch_size, num_sensor, input_dim)
"""
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).float()
self._logger.debug("X: {}".format(x.size()))
self._logger.debug("y: {}".format(y.size()))
x = x.permute(1, 0, 2, 3)
y = y.permute(1, 0, 2, 3)
return x, y
def _get_x_y_in_correct_dims(self, x, y):
"""
:param x: shape (seq_len, batch_size, num_sensor, input_dim)
:param y: shape (horizon, batch_size, num_sensor, input_dim)
:return: x: shape (seq_len, batch_size, num_sensor * input_dim)
y: shape (horizon, batch_size, num_sensor * output_dim)
"""
batch_size = x.size(1)
x = x.view(self.seq_len, batch_size, self.num_nodes * self.input_dim)
y = y[..., :self.output_dim].view(self.horizon, batch_size,
self.num_nodes * self.output_dim)
return x, y
def _compute_loss(self, y_true, y_predicted):
y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mae_loss(y_predicted, y_true)
def _compute_metrics(self, y_true, y_predicted):
y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return metric(y_predicted, y_true) | 14,986 | 39.287634 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/Graph-WaveNet/engine.py | import torch.optim as optim
from model import *
import util
class trainer():
def __init__(self, scaler, in_dim, seq_length, num_nodes, nhid , dropout, lrate, wdecay, device, supports, gcn_bool, addaptadj, aptinit):
self.model = gwnet(device, num_nodes, dropout, supports=supports, gcn_bool=gcn_bool, addaptadj=addaptadj, aptinit=aptinit, in_dim=in_dim, out_dim=seq_length, residual_channels=nhid, dilation_channels=nhid, skip_channels=nhid * 8, end_channels=nhid * 16)
self.model.to(device)
self.optimizer = optim.Adam(self.model.parameters(), lr=lrate, weight_decay=wdecay)
self.loss = util.masked_mae
self.scaler = scaler
self.clip = 5
def train(self, input, real_val):
self.model.train()
self.optimizer.zero_grad()
input = nn.functional.pad(input,(1,0,0,0))
output = self.model(input)
output = output.transpose(1,3)
#output = [batch_size,12,num_nodes,1]
real = torch.unsqueeze(real_val,dim=1)
predict = self.scaler.inverse_transform(output)
loss = self.loss(predict, real, 0.0)
loss.backward()
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
self.optimizer.step()
mape = util.masked_mape(predict,real,0.0).item()
rmse = util.masked_rmse(predict,real,0.0).item()
return loss.item(),mape,rmse
def eval(self, input, real_val):
self.model.eval()
input = nn.functional.pad(input,(1,0,0,0))
output = self.model(input)
output = output.transpose(1,3)
#output = [batch_size,12,num_nodes,1]
real = torch.unsqueeze(real_val,dim=1)
predict = self.scaler.inverse_transform(output)
loss = self.loss(predict, real, 0.0)
mape = util.masked_mape(predict,real,0.0).item()
rmse = util.masked_rmse(predict,real,0.0).item()
return loss.item(),mape,rmse
| 1,963 | 43.636364 | 261 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/Graph-WaveNet/test.py | import util
import argparse
from model import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
parser = argparse.ArgumentParser()
parser.add_argument('--device',type=str,default='cuda:3',help='')
parser.add_argument('--data',type=str,default='data/METR-LA',help='data path')
parser.add_argument('--adjdata',type=str,default='data/sensor_graph/adj_mx.pkl',help='adj data path')
parser.add_argument('--adjtype',type=str,default='doubletransition',help='adj type')
parser.add_argument('--gcn_bool',action='store_true',help='whether to add graph convolution layer')
parser.add_argument('--aptonly',action='store_true',help='whether only adaptive adj')
parser.add_argument('--addaptadj',action='store_true',help='whether add adaptive adj')
parser.add_argument('--randomadj',action='store_true',help='whether random initialize adaptive adj')
parser.add_argument('--seq_length',type=int,default=12,help='')
parser.add_argument('--nhid',type=int,default=32,help='')
parser.add_argument('--in_dim',type=int,default=2,help='inputs dimension')
parser.add_argument('--num_nodes',type=int,default=207,help='number of nodes')
parser.add_argument('--batch_size',type=int,default=64,help='batch size')
parser.add_argument('--learning_rate',type=float,default=0.001,help='learning rate')
parser.add_argument('--dropout',type=float,default=0.3,help='dropout rate')
parser.add_argument('--weight_decay',type=float,default=0.0001,help='weight decay rate')
parser.add_argument('--checkpoint',type=str,help='')
parser.add_argument('--plotheatmap',type=str,default='True',help='')
args = parser.parse_args()
def main():
device = torch.device(args.device)
_, _, adj_mx = util.load_adj(args.adjdata,args.adjtype)
supports = [torch.tensor(i).to(device) for i in adj_mx]
if args.randomadj:
adjinit = None
else:
adjinit = supports[0]
if args.aptonly:
supports = None
model = gwnet(device, args.num_nodes, args.dropout, supports=supports, gcn_bool=args.gcn_bool, addaptadj=args.addaptadj, aptinit=adjinit)
model.to(device)
model.load_state_dict(torch.load(args.checkpoint))
model.eval()
print('model load successfully')
dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
scaler = dataloader['scaler']
outputs = []
realy = torch.Tensor(dataloader['y_test']).to(device)
realy = realy.transpose(1,3)[:,0,:,:]
for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1,3)
with torch.no_grad():
preds = model(testx).transpose(1,3)
outputs.append(preds.squeeze())
yhat = torch.cat(outputs,dim=0)
yhat = yhat[:realy.size(0),...]
amae = []
amape = []
armse = []
for i in range(12):
pred = scaler.inverse_transform(yhat[:,:,i])
real = realy[:,:,i]
metrics = util.metric(pred,real)
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
amae.append(metrics[0])
amape.append(metrics[1])
armse.append(metrics[2])
log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))
if args.plotheatmap == "True":
adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)), dim=1)
device = torch.device('cpu')
adp.to(device)
adp = adp.cpu().detach().numpy()
adp = adp*(1/np.max(adp))
df = pd.DataFrame(adp)
sns.heatmap(df, cmap="RdYlBu")
plt.savefig("./emb"+ '.pdf')
y12 = realy[:,99,11].cpu().detach().numpy()
yhat12 = scaler.inverse_transform(yhat[:,99,11]).cpu().detach().numpy()
y3 = realy[:,99,2].cpu().detach().numpy()
yhat3 = scaler.inverse_transform(yhat[:,99,2]).cpu().detach().numpy()
df2 = pd.DataFrame({'real12':y12,'pred12':yhat12, 'real3': y3, 'pred3':yhat3})
df2.to_csv('./wave.csv',index=False)
if __name__ == "__main__":
main()
| 4,230 | 36.776786 | 142 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/Graph-WaveNet/train_demo.py | import torch
import numpy as np
import argparse
import time
import util
import matplotlib.pyplot as plt
from engine import trainer
parser = argparse.ArgumentParser()
parser.add_argument('--device',type=str,default='cuda:3',help='')
parser.add_argument('--data',type=str,default='data/METR-LA',help='data path')
parser.add_argument('--adjdata',type=str,default='data/sensor_graph/adj_mx.pkl',help='adj data path')
parser.add_argument('--adjtype',type=str,default='doubletransition',help='adj type')
parser.add_argument('--gcn_bool',action='store_true',help='whether to add graph convolution layer')
parser.add_argument('--aptonly',action='store_true',help='whether only adaptive adj')
parser.add_argument('--addaptadj',action='store_true',help='whether add adaptive adj')
parser.add_argument('--randomadj',action='store_true',help='whether random initialize adaptive adj')
parser.add_argument('--seq_length',type=int,default=12,help='')
parser.add_argument('--nhid',type=int,default=32,help='')
parser.add_argument('--in_dim',type=int,default=2,help='inputs dimension')
parser.add_argument('--num_nodes',type=int,default=207,help='number of nodes')
parser.add_argument('--batch_size',type=int,default=64,help='batch size')
parser.add_argument('--learning_rate',type=float,default=0.001,help='learning rate')
parser.add_argument('--dropout',type=float,default=0.3,help='dropout rate')
parser.add_argument('--weight_decay',type=float,default=0.0001,help='weight decay rate')
parser.add_argument('--epochs',type=int,default=100,help='')
parser.add_argument('--print_every',type=int,default=50,help='')
#parser.add_argument('--seed',type=int,default=99,help='random seed')
parser.add_argument('--save',type=str,default='./garage/metr',help='save path')
parser.add_argument('--expid',type=int,default=1,help='experiment id')
parser.add_argument('--runs', type=int, default=3, help='number of runs')
args = parser.parse_args()
import os
if not os.path.exists('./garage/'):
os.makedirs('./garage/')
import setproctitle
setproctitle.setproctitle("Graph-WaveNet@lifuxian")
import random
import numpy as np
def init_seed(seed):
'''
Disable cudnn to maximize reproducibility
'''
torch.cuda.cudnn_enabled = False
torch.backends.cudnn.deterministic = True
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# init_seed(args.seed)
def main(runid):
init_seed(runid)
#set seed
#torch.manual_seed(args.seed)
#np.random.seed(args.seed)
#load data
# device = torch.device(args.device)
# sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata,args.adjtype)
# dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
# scaler = dataloader['scaler']
# supports = [torch.tensor(i).to(device) for i in adj_mx]
# print(args)
# if args.randomadj:
# adjinit = None
# else:
# adjinit = supports[0]
# if args.aptonly:
# supports = None
# # engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,
# # args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
# # adjinit)
# num_nodes = adj_mx[-1].shape[0]
# engine = trainer(scaler, args.in_dim, args.seq_length, num_nodes, args.nhid, args.dropout,
# args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
# adjinit)
# print("start training...",flush=True)
# his_loss =[]
# val_time = []
# train_time = []
# for i in range(1,args.epochs+1):
# #if i % 10 == 0:
# #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
# #for g in engine.optimizer.param_groups:
# #g['lr'] = lr
# train_loss = []
# train_mape = []
# train_rmse = []
# t1 = time.time()
# dataloader['train_loader'].shuffle()
# for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()):
# trainx = torch.Tensor(x).to(device)
# trainx= trainx.transpose(1, 3)
# trainy = torch.Tensor(y).to(device)
# trainy = trainy.transpose(1, 3)
# metrics = engine.train(trainx, trainy[:,0,:,:])
# train_loss.append(metrics[0])
# train_mape.append(metrics[1])
# train_rmse.append(metrics[2])
# # if iter % args.print_every == 0 :
# # log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
# # print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]),flush=True)
# t2 = time.time()
# train_time.append(t2-t1)
# #validation
# valid_loss = []
# valid_mape = []
# valid_rmse = []
# s1 = time.time()
# for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
# testx = torch.Tensor(x).to(device)
# testx = testx.transpose(1, 3)
# testy = torch.Tensor(y).to(device)
# testy = testy.transpose(1, 3)
# metrics = engine.eval(testx, testy[:,0,:,:])
# valid_loss.append(metrics[0])
# valid_mape.append(metrics[1])
# valid_rmse.append(metrics[2])
# s2 = time.time()
# # log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
# # print(log.format(i,(s2-s1)))
# val_time.append(s2-s1)
# mtrain_loss = np.mean(train_loss)
# mtrain_mape = np.mean(train_mape)
# mtrain_rmse = np.mean(train_rmse)
# mvalid_loss = np.mean(valid_loss)
# mvalid_mape = np.mean(valid_mape)
# mvalid_rmse = np.mean(valid_rmse)
# his_loss.append(mvalid_loss)
# # if i % args.print_every == 0 :
# # log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
# # print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1)),flush=True)
# torch.save(engine.model.state_dict(), args.save+"_epoch_"+str(i)+"_"+str(round(mvalid_loss,2))+".pth")
# print("Average Training Time: {:.4f} secs/epoch".format(np.mean(train_time)))
# print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))
# #testing
# bestid = np.argmin(his_loss)
# engine.model.load_state_dict(torch.load(args.save+"_epoch_"+str(bestid+1)+"_"+str(round(his_loss[bestid],2))+".pth"))
# outputs = []
# realy = torch.Tensor(dataloader['y_test']).to(device)
# realy = realy.transpose(1,3)[:,0,:,:]
# for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
# testx = torch.Tensor(x).to(device)
# testx = testx.transpose(1,3)
# with torch.no_grad():
# preds = engine.model(testx).transpose(1,3)
# outputs.append(preds.squeeze())
# yhat = torch.cat(outputs,dim=0)
# yhat = yhat[:realy.size(0),...]
print("Training finished")
# print("The valid loss on best model is", str(round(his_loss[bestid],4)))
# amae = []
# amape = []
# armse = []
# for i in range(12):
# pred = scaler.inverse_transform(yhat[:,:,i])
# real = realy[:,:,i]
# metrics = util.metric(pred,real)
# log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
# print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
# amae.append(metrics[0])
# amape.append(metrics[1])
# armse.append(metrics[2])
# log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
# print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))
# torch.save(engine.model.state_dict(), args.save+"_exp"+str(args.expid)+"_best_"+str(round(his_loss[bestid],2))+".pth")
# return amae, amape, armse
return 1, 2, 3
if __name__ == "__main__":
# t1 = time.time()
# main()
# t2 = time.time()
# print("Total time spent: {:.4f}".format(t2-t1))
vmae = []
vmape = []
vrmse = []
mae = []
mape = []
rmse = []
for i in range(args.runs):
# if args.TEST_ONLY:
# main(i)
# else:
m1, m2, m3 = main(i)
# vmae.append(vm1)
# vmape.append(vm2)
# vrmse.append(vm3)
mae.append(m1)
mape.append(m2)
rmse.append(m3)
print('hhhhhhh')
print(torch.randn(6))
mae = np.array(mae)
mape = np.array(mape)
rmse = np.array(rmse)
amae = np.mean(mae, 0)
amape = np.mean(mape, 0)
armse = np.mean(rmse, 0)
smae = np.std(mae, 0)
smape = np.std(mape, 0)
srmse = np.std(rmse, 0)
# print('\n\nResults for 10 runs\n\n')
# # valid data
# print('valid\tMAE\tRMSE\tMAPE')
# log = 'mean:\t{:.4f}\t{:.4f}\t{:.4f}'
# print(log.format(np.mean(vmae), np.mean(vrmse), np.mean(vmape)))
# log = 'std:\t{:.4f}\t{:.4f}\t{:.4f}'
# print(log.format(np.std(vmae), np.std(vrmse), np.std(vmape)))
# print('\n\n')
# test data
print('test|horizon\tMAE-mean\tRMSE-mean\tMAPE-mean\tMAE-std\tRMSE-std\tMAPE-std')
# for i in [2, 5, 11]:
# log = '{:d}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'
# print(log.format(i + 1, amae[i], armse[i], amape[i], smae[i], srmse[i], smape[i]))
| 9,623 | 37.650602 | 186 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/Graph-WaveNet/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import sys
class nconv(nn.Module):
def __init__(self):
super(nconv,self).__init__()
def forward(self,x, A):
x = torch.einsum('ncvl,vw->ncwl',(x,A))
return x.contiguous()
class linear(nn.Module):
def __init__(self,c_in,c_out):
super(linear,self).__init__()
self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0,0), stride=(1,1), bias=True)
def forward(self,x):
return self.mlp(x)
class gcn(nn.Module):
def __init__(self,c_in,c_out,dropout,support_len=3,order=2):
super(gcn,self).__init__()
self.nconv = nconv()
c_in = (order*support_len+1)*c_in
self.mlp = linear(c_in,c_out)
self.dropout = dropout
self.order = order
def forward(self,x,support):
out = [x]
for a in support:
x1 = self.nconv(x,a)
out.append(x1)
for k in range(2, self.order + 1):
x2 = self.nconv(x1,a)
out.append(x2)
x1 = x2
h = torch.cat(out,dim=1)
h = self.mlp(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class gwnet(nn.Module):
def __init__(self, device, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addaptadj=True, aptinit=None, in_dim=2,out_dim=12,residual_channels=32,dilation_channels=32,skip_channels=256,end_channels=512,kernel_size=2,blocks=4,layers=2):
super(gwnet, self).__init__()
self.dropout = dropout
self.blocks = blocks
self.layers = layers
self.gcn_bool = gcn_bool
self.addaptadj = addaptadj
self.filter_convs = nn.ModuleList()
self.gate_convs = nn.ModuleList()
self.residual_convs = nn.ModuleList()
self.skip_convs = nn.ModuleList()
self.bn = nn.ModuleList()
self.gconv = nn.ModuleList()
self.start_conv = nn.Conv2d(in_channels=in_dim,
out_channels=residual_channels,
kernel_size=(1,1))
self.supports = supports
receptive_field = 1
self.supports_len = 0
if supports is not None:
self.supports_len += len(supports)
if gcn_bool and addaptadj:
if aptinit is None:
if supports is None:
self.supports = []
self.nodevec1 = nn.Parameter(torch.randn(num_nodes, 10).to(device), requires_grad=True).to(device)
self.nodevec2 = nn.Parameter(torch.randn(10, num_nodes).to(device), requires_grad=True).to(device)
self.supports_len +=1
else:
if supports is None:
self.supports = []
m, p, n = torch.svd(aptinit)
initemb1 = torch.mm(m[:, :10], torch.diag(p[:10] ** 0.5))
initemb2 = torch.mm(torch.diag(p[:10] ** 0.5), n[:, :10].t())
self.nodevec1 = nn.Parameter(initemb1, requires_grad=True).to(device)
self.nodevec2 = nn.Parameter(initemb2, requires_grad=True).to(device)
self.supports_len += 1
for b in range(blocks):
additional_scope = kernel_size - 1
new_dilation = 1
for i in range(layers):
# dilated convolutions
self.filter_convs.append(nn.Conv2d(in_channels=residual_channels,
out_channels=dilation_channels,
kernel_size=(1,kernel_size),dilation=new_dilation))
self.gate_convs.append(nn.Conv1d(in_channels=residual_channels,
out_channels=dilation_channels,
kernel_size=(1, kernel_size), dilation=new_dilation))
# 1x1 convolution for residual connection
self.residual_convs.append(nn.Conv1d(in_channels=dilation_channels,
out_channels=residual_channels,
kernel_size=(1, 1)))
# 1x1 convolution for skip connection
self.skip_convs.append(nn.Conv1d(in_channels=dilation_channels,
out_channels=skip_channels,
kernel_size=(1, 1)))
self.bn.append(nn.BatchNorm2d(residual_channels))
new_dilation *=2
receptive_field += additional_scope
additional_scope *= 2
if self.gcn_bool:
self.gconv.append(gcn(dilation_channels,residual_channels,dropout,support_len=self.supports_len))
self.end_conv_1 = nn.Conv2d(in_channels=skip_channels,
out_channels=end_channels,
kernel_size=(1,1),
bias=True)
self.end_conv_2 = nn.Conv2d(in_channels=end_channels,
out_channels=out_dim,
kernel_size=(1,1),
bias=True)
self.receptive_field = receptive_field
def forward(self, input):
in_len = input.size(3)
if in_len<self.receptive_field:
x = nn.functional.pad(input,(self.receptive_field-in_len,0,0,0))
else:
x = input
x = self.start_conv(x)
skip = 0
# calculate the current adaptive adj matrix once per iteration
new_supports = None
if self.gcn_bool and self.addaptadj and self.supports is not None:
adp = F.softmax(F.relu(torch.mm(self.nodevec1, self.nodevec2)), dim=1)
new_supports = self.supports + [adp]
# WaveNet layers
for i in range(self.blocks * self.layers):
# |----------------------------------------| *residual*
# | |
# | |-- conv -- tanh --| |
# -> dilate -|----| * ----|-- 1x1 -- + --> *input*
# |-- conv -- sigm --| |
# 1x1
# |
# ---------------------------------------> + -------------> *skip*
#(dilation, init_dilation) = self.dilations[i]
#residual = dilation_func(x, dilation, init_dilation, i)
residual = x
# dilated convolution
filter = self.filter_convs[i](residual)
filter = torch.tanh(filter)
gate = self.gate_convs[i](residual)
gate = torch.sigmoid(gate)
x = filter * gate
# parametrized skip connection
s = x
s = self.skip_convs[i](s)
try:
skip = skip[:, :, :, -s.size(3):]
except:
skip = 0
skip = s + skip
if self.gcn_bool and self.supports is not None:
if self.addaptadj:
x = self.gconv[i](x, new_supports)
else:
x = self.gconv[i](x,self.supports)
else:
x = self.residual_convs[i](x)
x = x + residual[:, :, :, -x.size(3):]
x = self.bn[i](x)
x = F.relu(skip)
x = F.relu(self.end_conv_1(x))
x = self.end_conv_2(x)
return x
| 7,730 | 35.466981 | 245 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/Graph-WaveNet/util.py | import pickle
import numpy as np
import os
import scipy.sparse as sp
import torch
from scipy.sparse import linalg
class DataLoader(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
self.xs = xs
self.ys = ys
def shuffle(self):
permutation = np.random.permutation(self.size)
xs, ys = self.xs[permutation], self.ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size, self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind: end_ind, ...]
y_i = self.ys[start_ind: end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class StandardScaler():
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def sym_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()
def asym_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1)).flatten()
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat= sp.diags(d_inv)
return d_mat.dot(adj).astype(np.float32).todense()
def calculate_normalized_laplacian(adj):
"""
# L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2
# D = diag(A 1)
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32).todense()
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename, adjtype):
sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename)
if adjtype == "scalap":
adj = [calculate_scaled_laplacian(adj_mx)]
elif adjtype == "normlap":
adj = [calculate_normalized_laplacian(adj_mx).astype(np.float32).todense()]
elif adjtype == "symnadj":
adj = [sym_adj(adj_mx)]
elif adjtype == "transition":
adj = [asym_adj(adj_mx)]
elif adjtype == "doubletransition":
adj = [asym_adj(adj_mx), asym_adj(np.transpose(adj_mx))]
elif adjtype == "identity":
adj = [np.diag(np.ones(adj_mx.shape[0])).astype(np.float32)]
else:
error = 0
assert error, "adj type not defined"
return sensor_ids, sensor_id_to_ind, adj
def load_dataset(dataset_dir, batch_size, valid_batch_size= None, test_batch_size=None):
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std())
# Data format
for category in ['train', 'val', 'test']:
data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
data['train_loader'] = DataLoader(data['x_train'], data['y_train'], batch_size)
data['val_loader'] = DataLoader(data['x_val'], data['y_val'], valid_batch_size)
data['test_loader'] = DataLoader(data['x_test'], data['y_test'], test_batch_size)
data['scaler'] = scaler
return data
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
| 7,185 | 32.896226 | 113 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/Graph-WaveNet/train.py | import torch
import numpy as np
import argparse
import time
import util
import matplotlib.pyplot as plt
from engine import trainer
parser = argparse.ArgumentParser()
parser.add_argument('--device',type=str,default='cuda:3',help='')
parser.add_argument('--data',type=str,default='data/METR-LA',help='data path')
parser.add_argument('--adjdata',type=str,default='data/sensor_graph/adj_mx.pkl',help='adj data path')
parser.add_argument('--adjtype',type=str,default='doubletransition',help='adj type')
parser.add_argument('--gcn_bool',action='store_true',help='whether to add graph convolution layer')
parser.add_argument('--aptonly',action='store_true',help='whether only adaptive adj')
parser.add_argument('--addaptadj',action='store_true',help='whether add adaptive adj')
parser.add_argument('--randomadj',action='store_true',help='whether random initialize adaptive adj')
parser.add_argument('--seq_length',type=int,default=12,help='')
parser.add_argument('--nhid',type=int,default=32,help='')
parser.add_argument('--in_dim',type=int,default=2,help='inputs dimension')
parser.add_argument('--num_nodes',type=int,default=207,help='number of nodes')
parser.add_argument('--batch_size',type=int,default=64,help='batch size')
parser.add_argument('--learning_rate',type=float,default=0.001,help='learning rate')
parser.add_argument('--dropout',type=float,default=0.3,help='dropout rate')
parser.add_argument('--weight_decay',type=float,default=0.0001,help='weight decay rate')
parser.add_argument('--epochs',type=int,default=100,help='')
parser.add_argument('--print_every',type=int,default=50,help='')
#parser.add_argument('--seed',type=int,default=99,help='random seed')
parser.add_argument('--save',type=str,default='./garage/metr',help='save path')
parser.add_argument('--expid',type=int,default=1,help='experiment id')
parser.add_argument('--runs', type=int, default=3, help='number of runs')
args = parser.parse_args()
import os
if not os.path.exists('./garage/'):
os.makedirs('./garage/')
import setproctitle
setproctitle.setproctitle("Graph-WaveNet@lifuxian")
def main(runid):
#set seed
#torch.manual_seed(args.seed)
#np.random.seed(args.seed)
#load data
device = torch.device(args.device)
sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata,args.adjtype)
dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
scaler = dataloader['scaler']
supports = [torch.tensor(i).to(device) for i in adj_mx]
print(args)
if args.randomadj:
adjinit = None
else:
adjinit = supports[0]
if args.aptonly:
supports = None
# engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,
# args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
# adjinit)
num_nodes = adj_mx[-1].shape[0]
engine = trainer(scaler, args.in_dim, args.seq_length, num_nodes, args.nhid, args.dropout,
args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
adjinit)
print("start training...",flush=True)
his_loss =[]
val_time = []
train_time = []
for i in range(1,args.epochs+1):
#if i % 10 == 0:
#lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
#for g in engine.optimizer.param_groups:
#g['lr'] = lr
train_loss = []
train_mape = []
train_rmse = []
t1 = time.time()
dataloader['train_loader'].shuffle()
for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()):
trainx = torch.Tensor(x).to(device)
trainx= trainx.transpose(1, 3)
trainy = torch.Tensor(y).to(device)
trainy = trainy.transpose(1, 3)
metrics = engine.train(trainx, trainy[:,0,:,:])
train_loss.append(metrics[0])
train_mape.append(metrics[1])
train_rmse.append(metrics[2])
# if iter % args.print_every == 0 :
# log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
# print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]),flush=True)
t2 = time.time()
train_time.append(t2-t1)
#validation
valid_loss = []
valid_mape = []
valid_rmse = []
s1 = time.time()
for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
metrics = engine.eval(testx, testy[:,0,:,:])
valid_loss.append(metrics[0])
valid_mape.append(metrics[1])
valid_rmse.append(metrics[2])
s2 = time.time()
# log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
# print(log.format(i,(s2-s1)))
val_time.append(s2-s1)
mtrain_loss = np.mean(train_loss)
mtrain_mape = np.mean(train_mape)
mtrain_rmse = np.mean(train_rmse)
mvalid_loss = np.mean(valid_loss)
mvalid_mape = np.mean(valid_mape)
mvalid_rmse = np.mean(valid_rmse)
his_loss.append(mvalid_loss)
# if i % args.print_every == 0 :
# log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
# print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1)),flush=True)
torch.save(engine.model.state_dict(), args.save+"_epoch_"+str(i)+"_"+str(round(mvalid_loss,2))+".pth")
print("Average Training Time: {:.4f} secs/epoch".format(np.mean(train_time)))
print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))
#testing
bestid = np.argmin(his_loss)
engine.model.load_state_dict(torch.load(args.save+"_epoch_"+str(bestid+1)+"_"+str(round(his_loss[bestid],2))+".pth"))
outputs = []
realy = torch.Tensor(dataloader['y_test']).to(device)
realy = realy.transpose(1,3)[:,0,:,:]
for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1,3)
with torch.no_grad():
preds = engine.model(testx).transpose(1,3)
outputs.append(preds.squeeze())
yhat = torch.cat(outputs,dim=0)
yhat = yhat[:realy.size(0),...]
print("Training finished")
print("The valid loss on best model is", str(round(his_loss[bestid],4)))
amae = []
amape = []
armse = []
for i in range(12):
pred = scaler.inverse_transform(yhat[:,:,i])
real = realy[:,:,i]
metrics = util.metric(pred,real)
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
amae.append(metrics[0])
amape.append(metrics[1])
armse.append(metrics[2])
log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))
torch.save(engine.model.state_dict(), args.save+"_exp"+str(args.expid)+"_best_"+str(round(his_loss[bestid],2))+".pth")
return amae, amape, armse
if __name__ == "__main__":
# t1 = time.time()
# main()
# t2 = time.time()
# print("Total time spent: {:.4f}".format(t2-t1))
vmae = []
vmape = []
vrmse = []
mae = []
mape = []
rmse = []
for i in range(args.runs):
# if args.TEST_ONLY:
# main(i)
# else:
m1, m2, m3 = main(i)
# vmae.append(vm1)
# vmape.append(vm2)
# vrmse.append(vm3)
mae.append(m1)
mape.append(m2)
rmse.append(m3)
mae = np.array(mae)
mape = np.array(mape)
rmse = np.array(rmse)
amae = np.mean(mae, 0)
amape = np.mean(mape, 0)
armse = np.mean(rmse, 0)
smae = np.std(mae, 0)
smape = np.std(mape, 0)
srmse = np.std(rmse, 0)
# print('\n\nResults for 10 runs\n\n')
# # valid data
# print('valid\tMAE\tRMSE\tMAPE')
# log = 'mean:\t{:.4f}\t{:.4f}\t{:.4f}'
# print(log.format(np.mean(vmae), np.mean(vrmse), np.mean(vmape)))
# log = 'std:\t{:.4f}\t{:.4f}\t{:.4f}'
# print(log.format(np.std(vmae), np.std(vrmse), np.std(vmape)))
# print('\n\n')
# test data
print('test|horizon\tMAE-mean\tRMSE-mean\tMAPE-mean\tMAE-std\tRMSE-std\tMAPE-std')
for i in [2, 5, 11]:
log = '{:d}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'
print(log.format(i + 1, amae[i], armse[i], amape[i], smae[i], srmse[i], smape[i]))
| 8,970 | 38.346491 | 184 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/FNN/dcrnn_train_pytorch.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
import setproctitle
setproctitle.setproctitle("stmetanet@lifuxian")
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
data_type = args.config_filename.split('/')[-1].split('.')[0].split('_')[-1] #'bay' or 'la'
supervisor = DCRNNSupervisor(data_type = data_type, LOAD_INITIAL = args.LOAD_INITIAL, adj_mx=adj_mx, **supervisor_config)
if args.TEST_ONLY:
supervisor.evaluate_test()
else:
supervisor.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default=None, type=str,
help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
parser.add_argument('--LOAD_INITIAL', default=False, type=bool, help='If LOAD_INITIAL.')
parser.add_argument('--TEST_ONLY', default=False, type=bool, help='If TEST_ONLY.')
args = parser.parse_args()
main(args)
| 1,459 | 38.459459 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/FNN/run_demo_pytorch.py | import argparse
import numpy as np
import os
import sys
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
def run_dcrnn(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
mean_score, outputs = supervisor.evaluate('test')
np.savez_compressed(args.output_filename, **outputs)
print("MAE : {}".format(mean_score))
print('Predictions saved as {}.'.format(args.output_filename))
if __name__ == '__main__':
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument('--use_cpu_only', default=False, type=str, help='Whether to run tensorflow on cpu.')
parser.add_argument('--config_filename', default='data/model/pretrained/METR-LA/config.yaml', type=str,
help='Config file for pretrained model.')
parser.add_argument('--output_filename', default='data/dcrnn_predictions.npz')
args = parser.parse_args()
run_dcrnn(args)
| 1,264 | 36.205882 | 108 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/FNN/model/pytorch/dcrnn_model.py | import numpy as np
import torch
import torch.nn as nn
from model.pytorch.dcrnn_cell import DCGRUCell
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class Seq2SeqAttrs:
def __init__(self, adj_mx, **model_kwargs):
self.adj_mx = adj_mx
self.max_diffusion_step = int(model_kwargs.get('max_diffusion_step', 2))
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.filter_type = model_kwargs.get('filter_type', 'laplacian')
self.num_nodes = int(model_kwargs.get('num_nodes', 1))
self.num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))
self.rnn_units = int(model_kwargs.get('rnn_units'))
self.hidden_state_size = self.num_nodes * self.rnn_units
class EncoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.input_dim = int(model_kwargs.get('input_dim', 1))
self.seq_len = int(model_kwargs.get('seq_len')) # for the encoder
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Encoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.input_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.hidden_state_size)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
batch_size, _ = inputs.size()
if hidden_state is None:
hidden_state = torch.zeros((self.num_rnn_layers, batch_size, self.hidden_state_size),
device=device)
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
return output, torch.stack(hidden_states) # runs in O(num_layers) so not too slow
class DecoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, **model_kwargs):
# super().__init__(is_training, adj_mx, **model_kwargs)
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.output_dim = int(model_kwargs.get('output_dim', 1))
self.horizon = int(model_kwargs.get('horizon', 1)) # for the decoder
self.projection_layer = nn.Linear(self.rnn_units, self.output_dim)
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, hidden_state=None):
"""
Decoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.output_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.num_nodes * self.output_dim)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(output, hidden_state[layer_num])
hidden_states.append(next_hidden_state)
output = next_hidden_state
projected = self.projection_layer(output.view(-1, self.rnn_units))
output = projected.view(-1, self.num_nodes * self.output_dim)
return output, torch.stack(hidden_states)
class DCRNNModel(nn.Module, Seq2SeqAttrs):
def __init__(self, adj_mx, logger, **model_kwargs):
super().__init__()
Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs)
self.encoder_model = EncoderModel(adj_mx, **model_kwargs)
self.decoder_model = DecoderModel(adj_mx, **model_kwargs)
self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
self.use_curriculum_learning = bool(model_kwargs.get('use_curriculum_learning', False))
self._logger = logger
def _compute_sampling_threshold(self, batches_seen):
return self.cl_decay_steps / (
self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps))
def encoder(self, inputs):
"""
encoder forward pass on t time steps
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:return: encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
"""
encoder_hidden_state = None
for t in range(self.encoder_model.seq_len):
_, encoder_hidden_state = self.encoder_model(inputs[t], encoder_hidden_state)
return encoder_hidden_state
def decoder(self, encoder_hidden_state, labels=None, batches_seen=None):
"""
Decoder forward pass
:param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
:param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference]
:param batches_seen: global step [optional, not exist for inference]
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
batch_size = encoder_hidden_state.size(1)
go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim),
device=device)
decoder_hidden_state = encoder_hidden_state
decoder_input = go_symbol
outputs = []
for t in range(self.decoder_model.horizon):
decoder_output, decoder_hidden_state = self.decoder_model(decoder_input,
decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
if self.training and self.use_curriculum_learning:
c = np.random.uniform(0, 1)
if c < self._compute_sampling_threshold(batches_seen):
decoder_input = labels[t]
outputs = torch.stack(outputs)
return outputs
def forward(self, inputs, labels=None, batches_seen=None):
"""
seq2seq forward pass
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:param labels: shape (horizon, batch_size, num_sensor * output)
:param batches_seen: batches seen till now
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
encoder_hidden_state = self.encoder(inputs)
self._logger.debug("Encoder complete, starting decoder")
outputs = self.decoder(encoder_hidden_state, labels, batches_seen=batches_seen)
self._logger.debug("Decoder complete")
if batches_seen == 0:
self._logger.info(
"Total trainable parameters {}".format(count_parameters(self))
)
return outputs
####################################################################################################################
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class TimeBlock(nn.Module):
"""
Neural network block that applies a temporal convolution to each node of
a graph in isolation.
"""
def __init__(self, in_channels, out_channels, kernel_size=3):
"""
:param in_channels: Number of input features at each node in each time
step.
:param out_channels: Desired number of output channels at each node in
each time step.
:param kernel_size: Size of the 1D temporal kernel.
"""
super(TimeBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
self.conv2 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
self.conv3 = nn.Conv2d(in_channels, out_channels, (1, kernel_size))
def forward(self, X):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels)
:return: Output data of shape (batch_size, num_nodes,
num_timesteps_out, num_features_out=out_channels)
"""
# Convert into NCHW format for pytorch to perform convolutions.
X = X.permute(0, 3, 1, 2)
temp = self.conv1(X) + torch.sigmoid(self.conv2(X))
out = F.relu(temp + self.conv3(X))
# Convert back from NCHW to NHWC
out = out.permute(0, 2, 3, 1)
return out
class STGCNBlock(nn.Module):
"""
Neural network block that applies a temporal convolution on each node in
isolation, followed by a graph convolution, followed by another temporal
convolution on each node.
"""
def __init__(self, in_channels, spatial_channels, out_channels,
num_nodes):
"""
:param in_channels: Number of input features at each node in each time
step.
:param spatial_channels: Number of output channels of the graph
convolutional, spatial sub-block.
:param out_channels: Desired number of output features at each node in
each time step.
:param num_nodes: Number of nodes in the graph.
"""
super(STGCNBlock, self).__init__()
self.temporal1 = TimeBlock(in_channels=in_channels,
out_channels=out_channels)
self.Theta1 = nn.Parameter(torch.FloatTensor(out_channels,
spatial_channels))
self.temporal2 = TimeBlock(in_channels=spatial_channels,
out_channels=out_channels)
self.batch_norm = nn.BatchNorm2d(num_nodes)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.Theta1.shape[1])
self.Theta1.data.uniform_(-stdv, stdv)
def forward(self, X, A_hat):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels).
:param A_hat: Normalized adjacency matrix.
:return: Output data of shape (batch_size, num_nodes,
num_timesteps_out, num_features=out_channels).
"""
t = self.temporal1(X)
lfs = torch.einsum("ij,jklm->kilm", [A_hat, t.permute(1, 0, 2, 3)])
# t2 = F.relu(torch.einsum("ijkl,lp->ijkp", [lfs, self.Theta1]))
t2 = F.relu(torch.matmul(lfs, self.Theta1))
t3 = self.temporal2(t2)
return self.batch_norm(t3)
# return t3
class STGCN(nn.Module):
"""
Spatio-temporal graph convolutional network as described in
https://arxiv.org/abs/1709.04875v3 by Yu et al.
Input should have shape (batch_size, num_nodes, num_input_time_steps,
num_features).
"""
def __init__(self, num_nodes, num_features, num_timesteps_input,
num_timesteps_output):
"""
:param num_nodes: Number of nodes in the graph.
:param num_features: Number of features at each node in each time step.
:param num_timesteps_input: Number of past time steps fed into the
network.
:param num_timesteps_output: Desired number of future time steps
output by the network.
"""
super(STGCN, self).__init__()
self.block1 = STGCNBlock(in_channels=num_features, out_channels=64,
spatial_channels=16, num_nodes=num_nodes)
self.block2 = STGCNBlock(in_channels=64, out_channels=64,
spatial_channels=16, num_nodes=num_nodes)
self.last_temporal = TimeBlock(in_channels=64, out_channels=64)
self.fully = nn.Linear((num_timesteps_input - 2 * 5) * 64,
num_timesteps_output)
self.num_nodes = num_nodes
self.input_dim = num_features
self.seq_len = num_timesteps_input
self.horizon = num_timesteps_output
def forward(self, A_hat, X):
"""
:param X: Input data of shape (batch_size, num_nodes, num_timesteps,
num_features=in_channels).
:param A_hat: Normalized adjacency matrix.
"""
X = X.view(self.seq_len, -1, self.num_nodes, self.input_dim).permute(1, 2, 0, 3).contiguous()
out1 = self.block1(X, A_hat)
out2 = self.block2(out1, A_hat)
out3 = self.last_temporal(out2)
out4 = self.fully(out3.reshape((out3.shape[0], out3.shape[1], -1)))
return out4.permute(2, 0, 1).contiguous()
# :return: x: shape (seq_len, batch_size, num_sensor * input_dim)
# y: shape (horizon, batch_size, num_sensor * output_dim)
####################################################################################################################
import math
import random
from typing import List, Tuple
import numpy as np
import dgl
import torch
from dgl import DGLGraph, init
from torch import nn, Tensor
class MultiLayerPerception(nn.Sequential):
def __init__(self, hiddens: List[int], hidden_act, out_act: bool):
super(MultiLayerPerception, self).__init__()
for i in range(1, len(hiddens)):
self.add_module(f'Layer{i}', nn.Linear(hiddens[i - 1], hiddens[i]))
if i < len(hiddens) - 1 or out_act:
self.add_module(f'Activation{i}', hidden_act())
class MetaDense(nn.Module):
def __init__(self, f_in: int, f_out: int, feat_size: int, meta_hiddens: List[int]):
super(MetaDense, self).__init__()
self.weights_mlp = MultiLayerPerception([feat_size] + meta_hiddens + [f_in * f_out], nn.Sigmoid, False)
self.bias_mlp = MultiLayerPerception([feat_size] + meta_hiddens + [f_out], nn.Sigmoid, False)
def forward(self, feature: Tensor, data: Tensor) -> Tensor:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, N, F_in]
:return: tensor, [B, N, F_out]
"""
b, n, f_in = data.shape
data = data.reshape(b, n, 1, f_in)
weights = self.weights_mlp(feature).reshape(1, n, f_in, -1) # [F_in, F_out]
bias = self.bias_mlp(feature) # [n, F_out]
return data.matmul(weights).squeeze(2) + bias
class RNNCell(nn.Module):
def __init__(self):
super(RNNCell, self).__init__()
def one_step(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, N, F]
:param begin_state: None or tensor, [B, N, F]
:return: output, tensor, [B, N, F]
begin_state, [B, N, F]
"""
raise NotImplementedError("Not Implemented")
def forward(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, T, N, F]
:param begin_state: [B, N, F]
:return:
"""
b, t, n, _ = data.shape
outputs, state = list(), begin_state
for i_t in range(t):
output, state = self.one_step(feature, data[:, i_t], state)
outputs.append(output)
return torch.stack(outputs, 1), state
class MetaGRUCell(RNNCell):
def __init__(self, f_in: int, hid_size: int, feat_size: int, meta_hiddens: List[int]):
super(MetaGRUCell, self).__init__()
self.hidden_size = hid_size
self.dense_zr = MetaDense(f_in + hid_size, 2 * hid_size, feat_size, meta_hiddens=meta_hiddens)
self.dense_i2h = MetaDense(f_in, hid_size, feat_size, meta_hiddens=meta_hiddens)
self.dense_h2h = MetaDense(hid_size, hid_size, feat_size, meta_hiddens=meta_hiddens)
def one_step(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
b, n, _ = data.shape
if begin_state is None:
begin_state = torch.zeros(b, n, self.hidden_size, dtype=data.dtype, device=data.device)
data_and_state = torch.cat([data, begin_state], -1)
zr = torch.sigmoid(self.dense_zr(feature, data_and_state))
z, r = zr.split(self.hidden_size, -1)
state = z * begin_state + (1 - z) * torch.tanh(self.dense_i2h(feature, data) + self.dense_h2h(feature, r * begin_state))
# c = torch.tanh(self.dense_i2h(feature, data))
# h = self.dense_h2h(feature, r * begin_state)
#
# state = z * begin_state + torch.sub(1., z) * c + h
return state, state
class NormalGRUCell(RNNCell):
def __init__(self, f_in: int, hid_size: int):
super(NormalGRUCell, self).__init__()
self.cell = nn.GRUCell(f_in, hid_size)
def one_step(self, feature: Tensor, data: Tensor, begin_state: Tensor = None) -> Tuple[Tensor, Tensor]:
b, n, _ = data.shape
data = data.reshape(b * n, -1)
if begin_state is not None:
begin_state = begin_state.reshape(b * n, -1)
h = self.cell(data, begin_state)
h = h.reshape(b, n, -1)
return h, h
import sys
class GraphAttNet(nn.Module):
def __init__(self, dist: np.ndarray, edge: list, hid_size: int, feat_size: int, meta_hiddens: List[int]):
super(GraphAttNet, self).__init__()
self.hidden_size = hid_size
self.feature_size = feat_size
self.meta_hiddens = meta_hiddens
self.num_nodes = n = dist.shape[0]
src, dst, dis = list(), list(), list()
for i in range(n):
for j in edge[i]:
src.append(j)
dst.append(i)
dis.append(dist[j, i])
dist = torch.tensor(dis).unsqueeze_(1)
g = DGLGraph()
g.set_n_initializer(init.zero_initializer)
g.add_nodes(n)
g.add_edges(src, dst, {'dist': dist})
self.graph = g
def forward(self, state: Tensor, feature: Tensor) -> Tensor:
"""
:param state: tensor, [B, T, N, F] or [B, N, F]
:param feature: tensor, [N, F]
:return: tensor, [B, T, N, F]
"""
# print(state.shape)
# torch.Size([32, 12, 207, 32])
# shape => [N, B, T, F] or [N, B, F]
state = state.unsqueeze(0).transpose(0, -2).squeeze(-2)
g = self.graph.local_var()
g.to(state.device)
g.ndata['state'] = state
g.ndata['feature'] = feature
g.update_all(self.msg_edge, self.msg_reduce)
state = g.ndata.pop('new_state')
# print(state.shape)
# torch.Size([207, 32, 12, 32])
# sys.exit(0)
return state.unsqueeze(-2).transpose(0, -2).squeeze(0)
def msg_edge(self, edge: dgl.EdgeBatch):
"""
:param edge: a dictionary of edge data.
edge.src['state'] and edge.dst['state']: hidden states of the nodes, with shape [e, b, t, d] or [e, b, d]
edge.src['feature'] and edge.dst['state']: features of the nodes, with shape [e, d]
edge.data['dist']: distance matrix of the edges, with shape [e, d]
:return: a dictionray of messages
"""
raise NotImplementedError('Not implemented.')
def msg_reduce(self, node: dgl.NodeBatch):
"""
:param node:
node.mailbox['state'], tensor, [n, e, b, t, d] or [n, e, b, d]
node.mailbox['alpha'], tensor, [n, e, b, t, d] or [n, e, b, d]
:return: tensor, [n, b, t, d] or [n, b, d]
"""
raise NotImplementedError('Not implemented.')
class MetaGAT(GraphAttNet):
def __init__(self, *args, **kwargs):
super(MetaGAT, self).__init__(*args, **kwargs)
self.w_mlp = MultiLayerPerception(
[self.feature_size * 2 + 1] + self.meta_hiddens + [self.hidden_size * 2 * self.hidden_size],
nn.Sigmoid, False)
self.act = nn.LeakyReLU()
self.weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
def msg_edge(self, edge: dgl.EdgeBatch):
state = torch.cat([edge.src['state'], edge.dst['state']], -1) # [X, B, T, 2H] or [X, B, 2H]
feature = torch.cat([edge.src['feature'], edge.dst['feature'], edge.data['dist']], -1) # [X, 2F + 1]
weight = self.w_mlp(feature).reshape(-1, self.hidden_size * 2, self.hidden_size) # [X, 2H, H]
shape = state.shape
state = state.reshape(shape[0], -1, shape[-1])
# [X, ?, 2H] * [X. 2H, H] => [X, ?, H]
alpha = self.act(torch.bmm(state, weight))
alpha = alpha.reshape(*shape[:-1], self.hidden_size)
return {'alpha': alpha, 'state': edge.src['state']}
def msg_reduce(self, node: dgl.NodeBatch):
state = node.mailbox['state']
alpha = node.mailbox['alpha']
alpha = torch.softmax(alpha, 1)
new_state = torch.relu(torch.sum(alpha * state, dim=1)) * torch.sigmoid(self.weight)
return {'new_state': new_state}
class STMetaEncoder(nn.Module):
def __init__(self, input_dim: int, rnn_types: List[str], rnn_hiddens: List[int], feat_size: int,
meta_hiddens: List[int], graph: Tuple[np.ndarray, list, list]):
super(STMetaEncoder, self).__init__()
dist, e_in, e_out = graph
grus, gats = list(), list()
rnn_hiddens = [input_dim] + rnn_hiddens
for i, rnn_type in enumerate(rnn_types):
in_dim, out_dim = rnn_hiddens[i], rnn_hiddens[i + 1]
if rnn_type == 'NormalGRU':
grus.append(NormalGRUCell(in_dim, out_dim))
elif rnn_type == 'MetaGRU':
grus.append(MetaGRUCell(in_dim, out_dim, feat_size, meta_hiddens))
else:
raise ValueError(f'{rnn_type} is not implemented.')
if i == len(rnn_types) - 1:
break
g1 = MetaGAT(dist.T, e_in, out_dim, feat_size, meta_hiddens)
g2 = MetaGAT(dist, e_out, out_dim, feat_size, meta_hiddens)
gats.append(nn.ModuleList([g1, g2]))
self.grus = nn.ModuleList(grus)
self.gats = nn.ModuleList(gats)
def forward(self, feature: Tensor, data: Tensor) -> List[Tensor]:
"""
:param feature: tensor, [N, F]
:param data: tensor, [B, T, N, F]
:return: list of tensors
"""
states = list()
for depth, (g1, g2) in enumerate(self.gats):
data, state = self.grus[depth](feature, data)
states.append(state)
data = g1(data, feature) + g2(data, feature)
else:
_, state = self.grus[-1](feature, data)
states.append(state)
return states
class STMetaDecoder(nn.Module):
def __init__(self, n_preds: int, output_dim: int, rnn_types: List[str], rnn_hiddens: List[int], feat_size: int,
meta_hiddens: List[int], graph: Tuple[np.ndarray, list, list], input_dim):
super(STMetaDecoder, self).__init__()
self.output_dim = output_dim
self.n_preds = n_preds
dist, e_in, e_out = graph
grus, gats = list(), list()
# rnn_hiddens = [output_dim] + rnn_hiddens
rnn_hiddens = [input_dim] + rnn_hiddens
self.input_dim = input_dim
for i, rnn_type in enumerate(rnn_types):
in_dim, out_dim = rnn_hiddens[i], rnn_hiddens[i + 1]
if rnn_type == 'NormalGRU':
grus.append(NormalGRUCell(in_dim, out_dim))
elif rnn_type == 'MetaGRU':
grus.append(MetaGRUCell(in_dim, out_dim, feat_size, meta_hiddens))
else:
raise ValueError(f'{rnn_type} is not implemented.')
if i == len(rnn_types) - 1:
break
g1 = MetaGAT(dist.T, e_in, out_dim, feat_size, meta_hiddens)
g2 = MetaGAT(dist, e_out, out_dim, feat_size, meta_hiddens)
gats.append(nn.ModuleList([g1, g2]))
self.grus = nn.ModuleList(grus)
self.gats = nn.ModuleList(gats)
self.out = nn.Linear(rnn_hiddens[1], output_dim)
# def sampling(self):
# """ Schedule sampling: sampling the ground truth. """
# threshold = self.cl_decay_steps / (self.cl_decay_steps + math.exp(self.global_steps / self.cl_decay_steps))
# return float(random.random() < threshold)
def forward(self, feature: Tensor, begin_states: List[Tensor], targets: Tensor = None,
teacher_force: bool = 0.5) -> Tensor:
"""
:param feature: tensor, [N, F]
:param begin_states: list of tensors, each of [B, N, hidden_size]
:param targets: none or tensor, [B, T, N, input_dim]
:param teacher_force: float, random to use targets as decoder inputs
:return:
"""
b, n, _ = begin_states[0].shape
aux = targets[:,:,:, self.output_dim:] # [b,t,n,d]
label = targets[:,:,:, :self.output_dim] # [b,t,n,d]
go = torch.zeros(b, n, self.input_dim, device=feature.device, dtype=feature.dtype)
# outputs = list()
outputs, states = [], begin_states
for i_pred in range(self.n_preds):
if i_pred == 0:
inputs = go
for depth, (g1, g2) in enumerate(self.gats):
inputs, states[0] = self.grus[depth].one_step(feature, inputs, states[0])
inputs = (g1(inputs, feature) + g2(inputs, feature)) / 2
else:
# print(len(self.grus), len(states))
inputs, states[1] = self.grus[-1].one_step(feature, inputs, states[1])
inputs = self.out(inputs)
outputs.append(inputs)
if self.training and (targets is not None) and (random.random() < teacher_force):
# inputs = targets[:, i_pred]
inputs = label[:, i_pred]
inputs = torch.cat([inputs, aux[:, i_pred, :, :]], -1)
return torch.stack(outputs, 1)
class STMetaNet(nn.Module):
def __init__(self,
graph: Tuple[np.ndarray, list, list],
n_preds: int,
input_dim: int,
output_dim: int,
cl_decay_steps: int,
rnn_types: List[str],
rnn_hiddens: List[int],
meta_hiddens: List[int],
geo_hiddens: List[int]):
super(STMetaNet, self).__init__()
feat_size = geo_hiddens[-1]
self.cl_decay_steps = cl_decay_steps
self.encoder = STMetaEncoder(input_dim, rnn_types, rnn_hiddens, feat_size, meta_hiddens, graph)
self.decoder = STMetaDecoder(n_preds, output_dim, rnn_types, rnn_hiddens, feat_size, meta_hiddens, graph, input_dim)
self.geo_encoder = MultiLayerPerception(geo_hiddens, hidden_act=nn.ReLU, out_act=True)
features = graph[0]
self.num_nodes = features.shape[0]
# self.num_nodes = 500
self.input_dim = input_dim
self.output_dim = output_dim
self.seq_len = 12
self.horizon = n_preds
self.fc1 = nn.Linear(input_dim * self.seq_len, 256)
self.fc2 = nn.Linear(256, self.horizon * self.output_dim)
# FNN Feed forward neural network with two hidden layers, each layer contains 256 units. The initial learning rate is 1e−3, and reduces to 1 10every 20 epochs starting at the 50th epochs.
# In addition, for all hidden layers, dropout with ratio 0.5 and L2 weight decay 1e−2is used. The model is trained with batch size 64 and MAE as the loss function. Early stop is performed by monitoring the validation error.
def forward(self, feature: Tensor, inputs: Tensor, targets: Tensor = None, batch_seen: int = 0) -> Tensor:
"""
dynamic convolutional recurrent neural network
:param feature: [N, d]
:param inputs: [B, n_hist, N, input_dim]
:param targets: exists for training, tensor, [B, n_pred, N, output_dim]
:param batch_seen: int, the number of batches the model has seen
:return: [B, n_pred, N, output_dim]
"""
inputs = inputs.view(self.seq_len, -1, self.num_nodes, self.input_dim).permute(1, 2, 0, 3).contiguous().view(-1, self.num_nodes, self.seq_len * self.input_dim)
# targets = targets.view(self.horizon, -1, self.num_nodes, self.input_dim).permute(1, 0, 2, 3).contiguous()
#
# feature = self.geo_encoder(feature.float())
# states = self.encoder(feature, inputs)
# targets = None
# outputs = self.decoder(feature, states, targets, self._compute_sampling_threshold(batch_seen))
outputs = self.fc2(torch.sigmoid(self.fc1(inputs))) #bnt
# return outputs.permute(1, 0, 2, 3).contiguous().view(self.horizon, -1, self.num_nodes * self.output_dim)
return outputs.view(-1, self.num_nodes, self.horizon, self.output_dim).permute(2, 0, 1, 3).contiguous().view(self.horizon, -1, self.num_nodes * self.output_dim)
def _compute_sampling_threshold(self, batches_seen: int):
return self.cl_decay_steps / (self.cl_decay_steps + math.exp(batches_seen / self.cl_decay_steps))
# :return: x: shape (seq_len, batch_size, num_sensor * input_dim)
# y: shape (horizon, batch_size, num_sensor * output_dim)
# def test():
# dist = np.random.randn(207, 207)
# edge1, edge2 = [[] for _ in range(207)], [[] for _ in range(207)]
# for i in range(207):
# for j in range(207):
# if np.random.random() < 0.2:
# edge1[i].append(j)
# edge2[j].append(i)
# me = STMetaEncoder(2, 32, 32, 32, [32, 4], (dist, edge1, edge2), 32)
# md = STMetaDecoder(12, 1, 32, 32, 32, [32, 4], (dist, edge1, edge2), 32)
# data = torch.randn(31, 12, 207, 2)
# feature = torch.randn(207, 32)
# states = me(feature, data)
# print(states[0].shape, states[1].shape)
# outputs = md(feature, states)
# m = STMetaNet((dist, edge1, edge2), 12, 2, 1, 2000, ['NormalGRU', 'MetaGRU'], [], [16, 2], [32, 32])
| 30,485 | 40.933975 | 223 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/FNN/model/pytorch/dcrnn_cell.py | import numpy as np
import torch
from lib import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: torch.nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = torch.nn.Parameter(torch.empty(*shape, device=device))
torch.nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = torch.nn.Parameter(torch.empty(length, device=device))
torch.nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class DCGRUCell(torch.nn.Module):
def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, nonlinearity='tanh',
filter_type="laplacian", use_gc_for_ru=True):
"""
:param num_units:
:param adj_mx:
:param max_diffusion_step:
:param num_nodes:
:param nonlinearity:
:param filter_type: "laplacian", "random_walk", "dual_random_walk".
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super().__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
# support other nonlinearities up here?
self._num_nodes = num_nodes
self._num_units = num_units
self._max_diffusion_step = max_diffusion_step
self._supports = []
self._use_gc_for_ru = use_gc_for_ru
supports = []
if filter_type == "laplacian":
supports.append(utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
elif filter_type == "random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
elif filter_type == "dual_random_walk":
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
else:
supports.append(utils.calculate_scaled_laplacian(adj_mx))
for support in supports:
self._supports.append(self._build_sparse_matrix(support))
self._fc_params = LayerParams(self, 'fc')
self._gconv_params = LayerParams(self, 'gconv')
@staticmethod
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
# this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
return L
def forward(self, inputs, hx):
"""Gated recurrent unit (GRU) with Graph Convolution.
:param inputs: (B, num_nodes * input_dim)
:param hx: (B, num_nodes * rnn_units)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`.
"""
output_size = 2 * self._num_units
if self._use_gc_for_ru:
fn = self._gconv
else:
fn = self._fc
value = torch.sigmoid(fn(inputs, hx, output_size, bias_start=1.0))
value = torch.reshape(value, (-1, self._num_nodes, output_size))
r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1)
r = torch.reshape(r, (-1, self._num_nodes * self._num_units))
u = torch.reshape(u, (-1, self._num_nodes * self._num_units))
c = self._gconv(inputs, r * hx, self._num_units)
if self._activation is not None:
c = self._activation(c)
new_state = u * hx + (1.0 - u) * c
return new_state
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _fc(self, inputs, state, output_size, bias_start=0.0):
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1))
state = torch.reshape(state, (batch_size * self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=-1)
input_size = inputs_and_state.shape[-1]
weights = self._fc_params.get_weights((input_size, output_size))
value = torch.sigmoid(torch.matmul(inputs_and_state, weights))
biases = self._fc_params.get_biases(output_size, bias_start)
value += biases
return value
def _gconv(self, inputs, state, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
state = torch.reshape(state, (batch_size, self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs_and_state.size(2)
x = inputs_and_state
x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size)
x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
x = torch.unsqueeze(x0, 0)
if self._max_diffusion_step == 0:
pass
else:
for support in self._supports:
x1 = torch.sparse.mm(support, x0)
x = self._concat(x, x1)
for k in range(2, self._max_diffusion_step + 1):
x2 = 2 * torch.sparse.mm(support, x1) - x0
x = self._concat(x, x2)
x1, x0 = x2, x1
num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself.
x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size])
x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order)
x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices])
weights = self._gconv_params.get_weights((input_size * num_matrices, output_size))
x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size)
biases = self._gconv_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
| 6,939 | 41.576687 | 105 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/FNN/model/pytorch/utils.py | import torch
import numpy as np
def masked_mae_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def get_normalized_adj(A):
"""
Returns the degree normalized adjacency matrix.
"""
A = A + np.diag(np.ones(A.shape[0], dtype=np.float32))
D = np.array(np.sum(A, axis=1)).reshape((-1,))
D[D <= 10e-5] = 10e-5 # Prevent infs
diag = np.reciprocal(np.sqrt(D))
A_wave = np.multiply(np.multiply(diag.reshape((-1, 1)), A),
diag.reshape((1, -1)))
return A_wave | 2,390 | 30.051948 | 88 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/FNN/model/pytorch/dcrnn_supervisor.py | import os
import time
import numpy as np
import torch
import torch.nn as nn
# from torch.utils.tensorboard import SummaryWriter
from lib import utils
# from model.pytorch.dcrnn_model import DCRNNModel
from model.pytorch.dcrnn_model import STMetaNet
from model.pytorch.utils import masked_mae_loss, metric, get_normalized_adj
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DCRNNSupervisor:
def __init__(self, data_type, LOAD_INITIAL, adj_mx, **kwargs):
self._kwargs = kwargs
self._data_kwargs = kwargs.get('data')
self._model_kwargs = kwargs.get('model')
self._train_kwargs = kwargs.get('train')
self.max_grad_norm = self._train_kwargs.get('max_grad_norm', 1.)
# logging.
self._log_dir = self._get_log_dir(kwargs)
# self._writer = SummaryWriter('runs/' + self._log_dir)
log_level = self._kwargs.get('log_level', 'INFO')
self._logger = utils.get_logger(self._log_dir, __name__, 'info.log', level=log_level)
# data set
self._data = utils.load_dataset(**self._data_kwargs)
self.standard_scaler = self._data['scaler']
self.num_nodes = int(self._model_kwargs.get('num_nodes', 1))
self.input_dim = int(self._model_kwargs.get('input_dim', 1))
self.seq_len = int(self._model_kwargs.get('seq_len')) # for the encoder
self.output_dim = int(self._model_kwargs.get('output_dim', 1))
self.use_curriculum_learning = bool(
self._model_kwargs.get('use_curriculum_learning', False))
self.horizon = int(self._model_kwargs.get('horizon', 1)) # for the decoder
# features, (dist, e_in_out, e_in_out) = np.load('./data/feat_stmetanet.npy', allow_pickle=True)
# features, (dist, e_in_out, e_in_out) = np.load('./data/feat_stmetanet_metrla.npy', allow_pickle=True)
features, (dist, e_in_out, e_in_out) = np.load('./data/feat_stmetanet_BJ500.npy', allow_pickle=True)
# features, (dist, e_in_out, e_in_out) = np.load('/home/lifuxian/BikeNYC/feat_stmetanet.npy', allow_pickle=True)
self.features = torch.from_numpy(features).to(device)
# setup model
# dcrnn_model = DCRNNModel(adj_mx, self._logger, **self._model_kwargs)
dcrnn_model = STMetaNet(
graph = (dist, e_in_out, e_in_out),#Tuple[np.ndarray, list, list],
n_preds = self.horizon,
input_dim = self.input_dim,
output_dim = self.output_dim,
cl_decay_steps = 2000,
rnn_types = ['NormalGRU', 'MetaGRU'],
# rnn_types = ['NormalGRU', 'NormalGRU'],
rnn_hiddens = [32, 32],
meta_hiddens = [16, 2],
# geo_hiddens = [20, 32, 32]
# geo_hiddens = [20, 32, 32] #list的首个元素表示features的维度(20维)
geo_hiddens = [11, 32, 32] # list的首个元素表示features的维度(11维)
)
self.dcrnn_model = dcrnn_model.cuda() if torch.cuda.is_available() else dcrnn_model
self._logger.info("Model created")
self._epoch_num = self._train_kwargs.get('epoch', 0)
# if self._epoch_num > 0: #事实上self._epoch_num的预设值确实为0
# self.load_model()
self.data_type = data_type
self.LOAD_INITIAL = LOAD_INITIAL
if LOAD_INITIAL:
self.load_lfx()
# self.features = torch.from_numpy(get_normalized_adj(adj_mx)).to(device)
@staticmethod
def _get_log_dir(kwargs):
log_dir = kwargs['train'].get('log_dir')
if log_dir is None:
batch_size = kwargs['data'].get('batch_size')
learning_rate = kwargs['train'].get('base_lr')
max_diffusion_step = kwargs['model'].get('max_diffusion_step')
num_rnn_layers = kwargs['model'].get('num_rnn_layers')
rnn_units = kwargs['model'].get('rnn_units')
structure = '-'.join(
['%d' % rnn_units for _ in range(num_rnn_layers)])
horizon = kwargs['model'].get('horizon')
filter_type = kwargs['model'].get('filter_type')
filter_type_abbr = 'L'
if filter_type == 'random_walk':
filter_type_abbr = 'R'
elif filter_type == 'dual_random_walk':
filter_type_abbr = 'DR'
run_id = 'dcrnn_%s_%d_h_%d_%s_lr_%g_bs_%d_%s/' % (
filter_type_abbr, max_diffusion_step, horizon,
structure, learning_rate, batch_size,
time.strftime('%m%d%H%M%S'))
base_dir = kwargs.get('base_dir')
log_dir = os.path.join(base_dir, run_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
# def save_model(self, epoch):
# if not os.path.exists('models/'):
# os.makedirs('models/')
#
# config = dict(self._kwargs)
# config['model_state_dict'] = self.dcrnn_model.state_dict()
# config['epoch'] = epoch
# torch.save(config, 'models/epo%d.tar' % epoch)
# self._logger.info("Saved model at {}".format(epoch))
# return 'models/epo%d.tar' % epoch
def save_model(self, epoch):
path = 'models/%s_best.tar' % self.data_type
if not os.path.exists('models/'):
os.makedirs('models/')
config = dict(self._kwargs)
config['model_state_dict'] = self.dcrnn_model.state_dict()
config['epoch'] = epoch
torch.save(config, path)
self._logger.info("Saved model at {}".format(epoch))
return path
# def load_model(self):
# self._setup_graph()
# assert os.path.exists('models/epo%d.tar' % self._epoch_num), 'Weights at epoch %d not found' % self._epoch_num
# checkpoint = torch.load('models/epo%d.tar' % self._epoch_num, map_location='cpu')
# self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
# self._logger.info("Loaded model at {}".format(self._epoch_num))
def load_lfx(self):
path = 'models/%s_best.tar' % self.data_type
# self._setup_graph()
assert os.path.exists(path), 'Weights not found'
checkpoint = torch.load(path, map_location='cpu')
self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
self._logger.info("Loaded model successfully!")
self._epoch_num = checkpoint['epoch']
def _setup_graph(self):
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['val_loader'].get_iterator()
for _, (x, y) in enumerate(val_iterator):
x, y, target = self._prepare_data(x, y)
output = self.dcrnn_model(x) #为何要这步处理??
break
def train(self, **kwargs):
kwargs.update(self._train_kwargs)
return self._train(**kwargs)
def evaluate(self, dataset='val', batches_seen=0):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y, target = self._prepare_data(x, y)
# output = self.dcrnn_model(x)
output = self.dcrnn_model(self.features, x, target, batches_seen)
loss = self._compute_loss(y, output)
losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
mean_loss = np.mean(losses)
# self._writer.add_scalar('{} loss'.format(dataset), mean_loss, batches_seen)
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_truths_scaled = []
y_preds_scaled = []
for t in range(y_preds.shape[0]):
# y_truth = self.standard_scaler.inverse_transform(y_truths[t])
y_pred = self.standard_scaler.inverse_transform(y_preds[t])
# y_truths_scaled.append(y_truth)
y_truths_scaled.append(y_truths[t])
y_preds_scaled.append(y_pred)
return mean_loss, {'prediction': y_preds_scaled, 'truth': y_truths_scaled}
def evaluate_test(self, dataset='test'):
"""
Computes mean L1Loss
:return: mean L1Loss
"""
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
# losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y, target = self._prepare_data(x, y)
# output = self.dcrnn_model(x)
output = self.dcrnn_model(self.features, x, target)
# losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
# mean_loss = np.mean(losses)
# y_preds = np.concatenate(y_preds, axis=1)
# y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_preds = torch.cat(y_preds, dim=1)
y_truths = torch.cat(y_truths, dim=1) # concatenate on batch dimension
# y_truths_scaled = []
# y_preds_scaled = []
for t in range(y_preds.shape[0]):
# y_truth = self.standard_scaler.inverse_transform(y_truths[t])
# y_pred = self.standard_scaler.inverse_transform(y_preds[t])
# y_truths_scaled.append(y_truth)
# y_preds_scaled.append(y_pred)
# loss = self._compute_loss(y_truths[t], y_preds[t])
# log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}'
# print(log.format(t + 1, loss.item()))
metrics = self._compute_metrics(y_truths[t], y_preds[t])
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(t + 1, metrics[0], metrics[1], metrics[2]))
def _train(self, base_lr,
steps, patience=50, epochs=100, lr_decay_ratio=0.1, log_every=1, save_model=1,
test_every_n_epochs=10, epsilon=1e-8, **kwargs):
# steps is used in learning rate - will see if need to use it?
if self.LOAD_INITIAL:
min_val_loss, _ = self.evaluate(dataset='val')
else:
min_val_loss = float('inf')
wait = 0
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=steps,
gamma=lr_decay_ratio)
self._logger.info('Start training ...')
# this will fail if model is loaded with a changed batch_size
num_batches = self._data['train_loader'].num_batch
self._logger.info("num_batches:{}".format(num_batches))
batches_seen = num_batches * self._epoch_num
epochs = 1000
for epoch_num in range(self._epoch_num, epochs):
self.dcrnn_model = self.dcrnn_model.train()
train_iterator = self._data['train_loader'].get_iterator()
losses = []
start_time = time.time()
for _, (x, y) in enumerate(train_iterator):
optimizer.zero_grad()
x, y, target = self._prepare_data(x, y)
# output = self.dcrnn_model(x, y, batches_seen)
output = self.dcrnn_model(self.features, x, target, batches_seen)
if batches_seen == 0:
# this is a workaround to accommodate dynamically registered parameters in DCGRUCell
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
# loss = self._compute_loss(y, output)
loss = self._compute_loss(y, output)
self._logger.debug(loss.item())
losses.append(loss.item())
batches_seen += 1
loss.backward()
# gradient clipping - this does it in place
torch.nn.utils.clip_grad_norm_(self.dcrnn_model.parameters(), self.max_grad_norm)
optimizer.step()
self._logger.info("epoch complete")
lr_scheduler.step()
self._logger.info("evaluating now!")
val_loss, _ = self.evaluate(dataset='val', batches_seen=batches_seen)
end_time = time.time()
# self._writer.add_scalar('training loss',
# np.mean(losses),
# batches_seen)
if (epoch_num % log_every) == log_every - 1:
message = 'Epoch [{}/{}] ({}) train_loss: {:.4f}, val_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), val_loss, lr_scheduler.get_lr()[0],
(end_time - start_time))
self._logger.info(message)
# if (epoch_num % test_every_n_epochs) == test_every_n_epochs - 1:
# test_loss, _ = self.evaluate(dataset='test', batches_seen=batches_seen)
# message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, test_mae: {:.4f}, lr: {:.6f}, ' \
# '{:.1f}s'.format(epoch_num, epochs, batches_seen,
# np.mean(losses), test_loss, lr_scheduler.get_lr()[0],
# (end_time - start_time))
# self._logger.info(message)
if val_loss < min_val_loss:
wait = 0
if save_model:
model_file_name = self.save_model(epoch_num)
self._logger.info(
'Val loss decrease from {:.4f} to {:.4f}, '
'saving to {}'.format(min_val_loss, val_loss, model_file_name))
min_val_loss = val_loss
elif val_loss >= min_val_loss:
wait += 1
if wait == patience:
self._logger.warning('Early stopping at epoch: %d' % epoch_num)
break
self.load_lfx()
self.evaluate_test(dataset='test')
def _prepare_data(self, x, y):
x, y = self._get_x_y(x, y)
x, y, target = self._get_x_y_in_correct_dims(x, y)
return x.to(device), y.to(device), target.to(device)
def _get_x_y(self, x, y):
"""
:param x: shape (batch_size, seq_len, num_sensor, input_dim)
:param y: shape (batch_size, horizon, num_sensor, input_dim)
:returns x shape (seq_len, batch_size, num_sensor, input_dim)
y shape (horizon, batch_size, num_sensor, input_dim)
"""
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).float()
self._logger.debug("X: {}".format(x.size()))
self._logger.debug("y: {}".format(y.size()))
x = x.permute(1, 0, 2, 3)
y = y.permute(1, 0, 2, 3)
return x, y
def _get_x_y_in_correct_dims(self, x, y):
"""
:param x: shape (seq_len, batch_size, num_sensor, input_dim)
:param y: shape (horizon, batch_size, num_sensor, input_dim)
:return: x: shape (seq_len, batch_size, num_sensor * input_dim)
y: shape (horizon, batch_size, num_sensor * output_dim)
"""
batch_size = x.size(1)
x = x.view(self.seq_len, batch_size, self.num_nodes * self.input_dim)
target = torch.cat([self.standard_scaler.transform(y[..., :1]), y[..., 1:]], -1).view(self.horizon, batch_size,
self.num_nodes * self.input_dim)
y = y[..., :self.output_dim].view(self.horizon, batch_size,
self.num_nodes * self.output_dim)
return x, y, target
def _compute_loss(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mae_loss(y_predicted, y_true)
def _compute_loss_mse(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return nn.MSELoss()(y_predicted, y_true)
def _compute_metrics(self, y_true, y_predicted):
# y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return metric(y_predicted, y_true) | 17,117 | 40.853301 | 129 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/MTGNN/layer.py | from __future__ import division
import torch
import torch.nn as nn
from torch.nn import init
import numbers
import torch.nn.functional as F
class nconv(nn.Module):
def __init__(self):
super(nconv,self).__init__()
def forward(self,x, A):
x = torch.einsum('ncvl,vw->ncwl',(x,A))
return x.contiguous()
class dy_nconv(nn.Module):
def __init__(self):
super(dy_nconv,self).__init__()
def forward(self,x, A):
x = torch.einsum('ncvl,nvwl->ncwl',(x,A))
return x.contiguous()
class linear(nn.Module):
def __init__(self,c_in,c_out,bias=True):
super(linear,self).__init__()
self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0,0), stride=(1,1), bias=bias)
def forward(self,x):
return self.mlp(x)
class prop(nn.Module):
def __init__(self,c_in,c_out,gdep,dropout,alpha):
super(prop, self).__init__()
self.nconv = nconv()
self.mlp = linear(c_in,c_out)
self.gdep = gdep
self.dropout = dropout
self.alpha = alpha
def forward(self,x,adj):
adj = adj + torch.eye(adj.size(0)).to(x.device)
d = adj.sum(1)
h = x
dv = d
a = adj / dv.view(-1, 1)
for i in range(self.gdep):
h = self.alpha*x + (1-self.alpha)*self.nconv(h,a)
ho = self.mlp(h)
return ho
class mixprop(nn.Module):
def __init__(self,c_in,c_out,gdep,dropout,alpha):
super(mixprop, self).__init__()
self.nconv = nconv()
self.mlp = linear((gdep+1)*c_in,c_out)
self.gdep = gdep
self.dropout = dropout
self.alpha = alpha
def forward(self,x,adj):
adj = adj + torch.eye(adj.size(0)).to(x.device)
d = adj.sum(1)
h = x
out = [h]
a = adj / d.view(-1, 1)
for i in range(self.gdep):
h = self.alpha*x + (1-self.alpha)*self.nconv(h,a)
out.append(h)
ho = torch.cat(out,dim=1)
ho = self.mlp(ho)
return ho
class dy_mixprop(nn.Module):
def __init__(self,c_in,c_out,gdep,dropout,alpha):
super(dy_mixprop, self).__init__()
self.nconv = dy_nconv()
self.mlp1 = linear((gdep+1)*c_in,c_out)
self.mlp2 = linear((gdep+1)*c_in,c_out)
self.gdep = gdep
self.dropout = dropout
self.alpha = alpha
self.lin1 = linear(c_in,c_in)
self.lin2 = linear(c_in,c_in)
def forward(self,x):
#adj = adj + torch.eye(adj.size(0)).to(x.device)
#d = adj.sum(1)
x1 = torch.tanh(self.lin1(x))
x2 = torch.tanh(self.lin2(x))
adj = self.nconv(x1.transpose(2,1),x2)
adj0 = torch.softmax(adj, dim=2)
adj1 = torch.softmax(adj.transpose(2,1), dim=2)
h = x
out = [h]
for i in range(self.gdep):
h = self.alpha*x + (1-self.alpha)*self.nconv(h,adj0)
out.append(h)
ho = torch.cat(out,dim=1)
ho1 = self.mlp1(ho)
h = x
out = [h]
for i in range(self.gdep):
h = self.alpha * x + (1 - self.alpha) * self.nconv(h, adj1)
out.append(h)
ho = torch.cat(out, dim=1)
ho2 = self.mlp2(ho)
return ho1+ho2
class dilated_1D(nn.Module):
def __init__(self, cin, cout, dilation_factor=2):
super(dilated_1D, self).__init__()
self.tconv = nn.ModuleList()
self.kernel_set = [2,3,6,7]
self.tconv = nn.Conv2d(cin,cout,(1,7),dilation=(1,dilation_factor))
def forward(self,input):
x = self.tconv(input)
return x
class dilated_inception(nn.Module):
def __init__(self, cin, cout, dilation_factor=2):
super(dilated_inception, self).__init__()
self.tconv = nn.ModuleList()
self.kernel_set = [2,3,6,7]
cout = int(cout/len(self.kernel_set))
for kern in self.kernel_set:
self.tconv.append(nn.Conv2d(cin,cout,(1,kern),dilation=(1,dilation_factor)))
def forward(self,input):
x = []
for i in range(len(self.kernel_set)):
x.append(self.tconv[i](input))
for i in range(len(self.kernel_set)):
x[i] = x[i][...,-x[-1].size(3):]
x = torch.cat(x,dim=1)
return x
class graph_constructor(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(graph_constructor, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
self.lin2 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.emb2 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim,dim)
self.lin2 = nn.Linear(dim,dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx,:]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1,0))-torch.mm(nodevec2, nodevec1.transpose(1,0))
adj = F.relu(torch.tanh(self.alpha*a))
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1,t1 = adj.topk(self.k,1)
mask.scatter_(1,t1,s1.fill_(1))
adj = adj*mask
return adj
def fullA(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx,:]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1,0))-torch.mm(nodevec2, nodevec1.transpose(1,0))
adj = F.relu(torch.tanh(self.alpha*a))
return adj
class graph_global(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(graph_global, self).__init__()
self.nnodes = nnodes
self.A = nn.Parameter(torch.randn(nnodes, nnodes).to(device), requires_grad=True).to(device)
def forward(self, idx):
return F.relu(self.A)
class graph_undirected(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(graph_undirected, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim,dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb1(idx)
else:
nodevec1 = self.static_feat[idx,:]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin1(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1,0))
adj = F.relu(torch.tanh(self.alpha*a))
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1,t1 = adj.topk(self.k,1)
mask.scatter_(1,t1,s1.fill_(1))
adj = adj*mask
return adj
class graph_directed(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(graph_directed, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
self.lin2 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.emb2 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim,dim)
self.lin2 = nn.Linear(dim,dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx,:]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1,0))
adj = F.relu(torch.tanh(self.alpha*a))
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1,t1 = adj.topk(self.k,1)
mask.scatter_(1,t1,s1.fill_(1))
adj = adj*mask
return adj
class LayerNorm(nn.Module):
__constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine']
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.Tensor(*normalized_shape))
self.bias = nn.Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input, idx):
if self.elementwise_affine:
return F.layer_norm(input, tuple(input.shape[1:]), self.weight[:,idx,:], self.bias[:,idx,:], self.eps)
else:
return F.layer_norm(input, tuple(input.shape[1:]), self.weight, self.bias, self.eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__) | 10,549 | 31.164634 | 114 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/MTGNN/train_single_step.py | import argparse
import math
import time
import torch
import torch.nn as nn
from net import gtnet
import numpy as np
import importlib
from util import *
from trainer import Optim
def evaluate(data, X, Y, model, evaluateL2, evaluateL1, batch_size):
model.eval()
total_loss = 0
total_loss_l1 = 0
n_samples = 0
predict = None
test = None
for X, Y in data.get_batches(X, Y, batch_size, False):
X = torch.unsqueeze(X,dim=1)
X = X.transpose(2,3)
with torch.no_grad():
output = model(X)
output = torch.squeeze(output)
if len(output.shape)==1:
output = output.unsqueeze(dim=0)
if predict is None:
predict = output
test = Y
else:
predict = torch.cat((predict, output))
test = torch.cat((test, Y))
scale = data.scale.expand(output.size(0), data.m)
total_loss += evaluateL2(output * scale, Y * scale).item()
total_loss_l1 += evaluateL1(output * scale, Y * scale).item()
n_samples += (output.size(0) * data.m)
rse = math.sqrt(total_loss / n_samples) / data.rse
rae = (total_loss_l1 / n_samples) / data.rae
predict = predict.data.cpu().numpy()
Ytest = test.data.cpu().numpy()
sigma_p = (predict).std(axis=0)
sigma_g = (Ytest).std(axis=0)
mean_p = predict.mean(axis=0)
mean_g = Ytest.mean(axis=0)
index = (sigma_g != 0)
correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis=0) / (sigma_p * sigma_g)
correlation = (correlation[index]).mean()
return rse, rae, correlation
def train(data, X, Y, model, criterion, optim, batch_size):
model.train()
total_loss = 0
n_samples = 0
iter = 0
for X, Y in data.get_batches(X, Y, batch_size, True):
model.zero_grad()
X = torch.unsqueeze(X,dim=1)
X = X.transpose(2,3)
if iter % args.step_size == 0:
perm = np.random.permutation(range(args.num_nodes))
num_sub = int(args.num_nodes / args.num_split)
for j in range(args.num_split):
if j != args.num_split - 1:
id = perm[j * num_sub:(j + 1) * num_sub]
else:
id = perm[j * num_sub:]
id = torch.tensor(id).to(device)
tx = X[:, :, id, :]
ty = Y[:, id]
output = model(tx,id)
output = torch.squeeze(output)
scale = data.scale.expand(output.size(0), data.m)
scale = scale[:,id]
loss = criterion(output * scale, ty * scale)
loss.backward()
total_loss += loss.item()
n_samples += (output.size(0) * data.m)
grad_norm = optim.step()
if iter%100==0:
print('iter:{:3d} | loss: {:.3f}'.format(iter,loss.item()/(output.size(0) * data.m)))
iter += 1
return total_loss / n_samples
parser = argparse.ArgumentParser(description='PyTorch Time series forecasting')
parser.add_argument('--data', type=str, default='./data/solar_AL.txt',
help='location of the data file')
parser.add_argument('--log_interval', type=int, default=2000, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model/model.pt',
help='path to save the final model')
parser.add_argument('--optim', type=str, default='adam')
parser.add_argument('--L1Loss', type=bool, default=True)
parser.add_argument('--normalize', type=int, default=2)
parser.add_argument('--device',type=str,default='cuda:1',help='')
parser.add_argument('--gcn_true', type=bool, default=True, help='whether to add graph convolution layer')
parser.add_argument('--buildA_true', type=bool, default=True, help='whether to construct adaptive adjacency matrix')
parser.add_argument('--gcn_depth',type=int,default=2,help='graph convolution depth')
parser.add_argument('--num_nodes',type=int,default=137,help='number of nodes/variables')
parser.add_argument('--dropout',type=float,default=0.3,help='dropout rate')
parser.add_argument('--subgraph_size',type=int,default=20,help='k')
parser.add_argument('--node_dim',type=int,default=40,help='dim of nodes')
parser.add_argument('--dilation_exponential',type=int,default=2,help='dilation exponential')
parser.add_argument('--conv_channels',type=int,default=16,help='convolution channels')
parser.add_argument('--residual_channels',type=int,default=16,help='residual channels')
parser.add_argument('--skip_channels',type=int,default=32,help='skip channels')
parser.add_argument('--end_channels',type=int,default=64,help='end channels')
parser.add_argument('--in_dim',type=int,default=1,help='inputs dimension')
parser.add_argument('--seq_in_len',type=int,default=24*7,help='input sequence length')
parser.add_argument('--seq_out_len',type=int,default=1,help='output sequence length')
parser.add_argument('--horizon', type=int, default=3)
parser.add_argument('--layers',type=int,default=5,help='number of layers')
parser.add_argument('--batch_size',type=int,default=32,help='batch size')
parser.add_argument('--lr',type=float,default=0.0001,help='learning rate')
parser.add_argument('--weight_decay',type=float,default=0.00001,help='weight decay rate')
parser.add_argument('--clip',type=int,default=5,help='clip')
parser.add_argument('--propalpha',type=float,default=0.05,help='prop alpha')
parser.add_argument('--tanhalpha',type=float,default=3,help='tanh alpha')
parser.add_argument('--epochs',type=int,default=1,help='')
parser.add_argument('--num_split',type=int,default=1,help='number of splits for graphs')
parser.add_argument('--step_size',type=int,default=100,help='step_size')
args = parser.parse_args()
device = torch.device(args.device)
torch.set_num_threads(3)
def main():
Data = DataLoaderS(args.data, 0.6, 0.2, device, args.horizon, args.seq_in_len, args.normalize)
model = gtnet(args.gcn_true, args.buildA_true, args.gcn_depth, args.num_nodes,
device, dropout=args.dropout, subgraph_size=args.subgraph_size,
node_dim=args.node_dim, dilation_exponential=args.dilation_exponential,
conv_channels=args.conv_channels, residual_channels=args.residual_channels,
skip_channels=args.skip_channels, end_channels= args.end_channels,
seq_length=args.seq_in_len, in_dim=args.in_dim, out_dim=args.seq_out_len,
layers=args.layers, propalpha=args.propalpha, tanhalpha=args.tanhalpha, layer_norm_affline=False)
model = model.to(device)
print(args)
print('The recpetive field size is', model.receptive_field)
nParams = sum([p.nelement() for p in model.parameters()])
print('Number of model parameters is', nParams, flush=True)
if args.L1Loss:
criterion = nn.L1Loss(size_average=False).to(device)
else:
criterion = nn.MSELoss(size_average=False).to(device)
evaluateL2 = nn.MSELoss(size_average=False).to(device)
evaluateL1 = nn.L1Loss(size_average=False).to(device)
best_val = 10000000
optim = Optim(
model.parameters(), args.optim, args.lr, args.clip, lr_decay=args.weight_decay
)
# At any point you can hit Ctrl + C to break out of training early.
try:
print('begin training')
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
train_loss = train(Data, Data.train[0], Data.train[1], model, criterion, optim, args.batch_size)
val_loss, val_rae, val_corr = evaluate(Data, Data.valid[0], Data.valid[1], model, evaluateL2, evaluateL1,
args.batch_size)
print(
'| end of epoch {:3d} | time: {:5.2f}s | train_loss {:5.4f} | valid rse {:5.4f} | valid rae {:5.4f} | valid corr {:5.4f}'.format(
epoch, (time.time() - epoch_start_time), train_loss, val_loss, val_rae, val_corr), flush=True)
# Save the model if the validation loss is the best we've seen so far.
if val_loss < best_val:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val = val_loss
if epoch % 5 == 0:
test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1,
args.batch_size)
print("test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}".format(test_acc, test_rae, test_corr), flush=True)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
vtest_acc, vtest_rae, vtest_corr = evaluate(Data, Data.valid[0], Data.valid[1], model, evaluateL2, evaluateL1,
args.batch_size)
test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1,
args.batch_size)
print("final test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}".format(test_acc, test_rae, test_corr))
return vtest_acc, vtest_rae, vtest_corr, test_acc, test_rae, test_corr
if __name__ == "__main__":
vacc = []
vrae = []
vcorr = []
acc = []
rae = []
corr = []
for i in range(10):
val_acc, val_rae, val_corr, test_acc, test_rae, test_corr = main()
vacc.append(val_acc)
vrae.append(val_rae)
vcorr.append(val_corr)
acc.append(test_acc)
rae.append(test_rae)
corr.append(test_corr)
print('\n\n')
print('10 runs average')
print('\n\n')
print("valid\trse\trae\tcorr")
print("mean\t{:5.4f}\t{:5.4f}\t{:5.4f}".format(np.mean(vacc), np.mean(vrae), np.mean(vcorr)))
print("std\t{:5.4f}\t{:5.4f}\t{:5.4f}".format(np.std(vacc), np.std(vrae), np.std(vcorr)))
print('\n\n')
print("test\trse\trae\tcorr")
print("mean\t{:5.4f}\t{:5.4f}\t{:5.4f}".format(np.mean(acc), np.mean(rae), np.mean(corr)))
print("std\t{:5.4f}\t{:5.4f}\t{:5.4f}".format(np.std(acc), np.std(rae), np.std(corr)))
| 10,199 | 42.220339 | 146 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/MTGNN/net.py | from layer import *
class gtnet(nn.Module):
def __init__(self, gcn_true, buildA_true, gcn_depth, num_nodes, device, predefined_A=None, static_feat=None, dropout=0.3, subgraph_size=20, node_dim=40, dilation_exponential=1, conv_channels=32, residual_channels=32, skip_channels=64, end_channels=128, seq_length=12, in_dim=2, out_dim=12, layers=3, propalpha=0.05, tanhalpha=3, layer_norm_affline=True):
super(gtnet, self).__init__()
self.gcn_true = gcn_true
self.buildA_true = buildA_true
self.num_nodes = num_nodes
self.dropout = dropout
self.predefined_A = predefined_A
self.filter_convs = nn.ModuleList()
self.gate_convs = nn.ModuleList()
self.residual_convs = nn.ModuleList()
self.skip_convs = nn.ModuleList()
self.gconv1 = nn.ModuleList()
self.gconv2 = nn.ModuleList()
self.norm = nn.ModuleList()
self.start_conv = nn.Conv2d(in_channels=in_dim,
out_channels=residual_channels,
kernel_size=(1, 1))
self.gc = graph_constructor(num_nodes, subgraph_size, node_dim, device, alpha=tanhalpha, static_feat=static_feat)
self.seq_length = seq_length
kernel_size = 7
if dilation_exponential>1:
self.receptive_field = int(1+(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1))
else:
self.receptive_field = layers*(kernel_size-1) + 1
for i in range(1):
if dilation_exponential>1:
rf_size_i = int(1 + i*(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1))
else:
rf_size_i = i*layers*(kernel_size-1)+1
new_dilation = 1
for j in range(1,layers+1):
if dilation_exponential > 1:
rf_size_j = int(rf_size_i + (kernel_size-1)*(dilation_exponential**j-1)/(dilation_exponential-1))
else:
rf_size_j = rf_size_i+j*(kernel_size-1)
self.filter_convs.append(dilated_inception(residual_channels, conv_channels, dilation_factor=new_dilation))
self.gate_convs.append(dilated_inception(residual_channels, conv_channels, dilation_factor=new_dilation))
self.residual_convs.append(nn.Conv2d(in_channels=conv_channels,
out_channels=residual_channels,
kernel_size=(1, 1)))
if self.seq_length>self.receptive_field:
self.skip_convs.append(nn.Conv2d(in_channels=conv_channels,
out_channels=skip_channels,
kernel_size=(1, self.seq_length-rf_size_j+1)))
else:
self.skip_convs.append(nn.Conv2d(in_channels=conv_channels,
out_channels=skip_channels,
kernel_size=(1, self.receptive_field-rf_size_j+1)))
if self.gcn_true:
self.gconv1.append(mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha))
self.gconv2.append(mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha))
if self.seq_length>self.receptive_field:
self.norm.append(LayerNorm((residual_channels, num_nodes, self.seq_length - rf_size_j + 1),elementwise_affine=layer_norm_affline))
else:
self.norm.append(LayerNorm((residual_channels, num_nodes, self.receptive_field - rf_size_j + 1),elementwise_affine=layer_norm_affline))
new_dilation *= dilation_exponential
self.layers = layers
self.end_conv_1 = nn.Conv2d(in_channels=skip_channels,
out_channels=end_channels,
kernel_size=(1,1),
bias=True)
self.end_conv_2 = nn.Conv2d(in_channels=end_channels,
out_channels=out_dim,
kernel_size=(1,1),
bias=True)
if self.seq_length > self.receptive_field:
self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=(1, self.seq_length), bias=True)
self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, self.seq_length-self.receptive_field+1), bias=True)
else:
self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=(1, self.receptive_field), bias=True)
self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, 1), bias=True)
self.idx = torch.arange(self.num_nodes).to(device)
def forward(self, input, idx=None):
seq_len = input.size(3)
assert seq_len==self.seq_length, 'input sequence length not equal to preset sequence length'
if self.seq_length<self.receptive_field:
input = nn.functional.pad(input,(self.receptive_field-self.seq_length,0,0,0))
if self.gcn_true:
if self.buildA_true:
if idx is None:
adp = self.gc(self.idx)
else:
adp = self.gc(idx)
else:
adp = self.predefined_A
x = self.start_conv(input)
skip = self.skip0(F.dropout(input, self.dropout, training=self.training))
for i in range(self.layers):
residual = x
filter = self.filter_convs[i](x)
filter = torch.tanh(filter)
gate = self.gate_convs[i](x)
gate = torch.sigmoid(gate)
x = filter * gate
x = F.dropout(x, self.dropout, training=self.training)
s = x
s = self.skip_convs[i](s)
skip = s + skip
if self.gcn_true:
x = self.gconv1[i](x, adp)+self.gconv2[i](x, adp.transpose(1,0))
else:
x = self.residual_convs[i](x)
x = x + residual[:, :, :, -x.size(3):]
if idx is None:
x = self.norm[i](x,self.idx)
else:
x = self.norm[i](x,idx)
print(x.shape)
skip = self.skipE(x) + skip
print(skip.shape)
x = F.relu(skip)
x = F.relu(self.end_conv_1(x))
print(x.shape)
x = self.end_conv_2(x)
print(x.shape)
return x
| 6,760 | 47.292857 | 358 | py |
Traffic-Benchmark | Traffic-Benchmark-master/methods/MTGNN/util.py | import pickle
import numpy as np
import os
import scipy.sparse as sp
import torch
from scipy.sparse import linalg
from torch.autograd import Variable
def normal_std(x):
return x.std() * np.sqrt((len(x) - 1.)/(len(x)))
class DataLoaderS(object):
# train and valid is the ratio of training set and validation set. test = 1 - train - valid
def __init__(self, file_name, train, valid, device, horizon, window, normalize=2):
self.P = window
self.h = horizon
fin = open(file_name)
self.rawdat = np.loadtxt(fin, delimiter=',')
self.dat = np.zeros(self.rawdat.shape)
self.n, self.m = self.dat.shape
self.normalize = 2
self.scale = np.ones(self.m)
self._normalized(normalize)
self._split(int(train * self.n), int((train + valid) * self.n), self.n)
self.scale = torch.from_numpy(self.scale).float()
tmp = self.test[1] * self.scale.expand(self.test[1].size(0), self.m)
self.scale = self.scale.to(device)
self.scale = Variable(self.scale)
self.rse = normal_std(tmp)
self.rae = torch.mean(torch.abs(tmp - torch.mean(tmp)))
self.device = device
def _normalized(self, normalize):
# normalized by the maximum value of entire matrix.
if (normalize == 0):
self.dat = self.rawdat
if (normalize == 1):
self.dat = self.rawdat / np.max(self.rawdat)
# normlized by the maximum value of each row(sensor).
if (normalize == 2):
for i in range(self.m):
self.scale[i] = np.max(np.abs(self.rawdat[:, i]))
self.dat[:, i] = self.rawdat[:, i] / np.max(np.abs(self.rawdat[:, i]))
def _split(self, train, valid, test):
train_set = range(self.P + self.h - 1, train)
valid_set = range(train, valid)
test_set = range(valid, self.n)
self.train = self._batchify(train_set, self.h)
self.valid = self._batchify(valid_set, self.h)
self.test = self._batchify(test_set, self.h)
def _batchify(self, idx_set, horizon):
n = len(idx_set)
X = torch.zeros((n, self.P, self.m))
Y = torch.zeros((n, self.m))
for i in range(n):
end = idx_set[i] - self.h + 1
start = end - self.P
X[i, :, :] = torch.from_numpy(self.dat[start:end, :])
Y[i, :] = torch.from_numpy(self.dat[idx_set[i], :])
return [X, Y]
def get_batches(self, inputs, targets, batch_size, shuffle=True):
length = len(inputs)
if shuffle:
index = torch.randperm(length)
else:
index = torch.LongTensor(range(length))
start_idx = 0
while (start_idx < length):
end_idx = min(length, start_idx + batch_size)
excerpt = index[start_idx:end_idx]
X = inputs[excerpt]
Y = targets[excerpt]
X = X.to(self.device)
Y = Y.to(self.device)
yield Variable(X), Variable(Y)
start_idx += batch_size
class DataLoaderM(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
self.xs = xs
self.ys = ys
def shuffle(self):
permutation = np.random.permutation(self.size)
xs, ys = self.xs[permutation], self.ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size, self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind: end_ind, ...]
y_i = self.ys[start_ind: end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class StandardScaler():
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def sym_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()
def asym_adj(adj):
"""Asymmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1)).flatten()
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat= sp.diags(d_inv)
return d_mat.dot(adj).astype(np.float32).todense()
def calculate_normalized_laplacian(adj):
"""
# L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2
# D = diag(A 1)
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32).todense()
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename):
sensor_ids, sensor_id_to_ind, adj = load_pickle(pkl_filename)
return adj
def load_dataset(dataset_dir, batch_size, valid_batch_size= None, test_batch_size=None):
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std())
# Data format
for category in ['train', 'val', 'test']:
data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
data['train_loader'] = DataLoaderM(data['x_train'], data['y_train'], batch_size)
data['val_loader'] = DataLoaderM(data['x_val'], data['y_val'], valid_batch_size)
data['test_loader'] = DataLoaderM(data['x_test'], data['y_test'], test_batch_size)
data['scaler'] = scaler
return data
# def load_dataset(dataset_dir, batch_size, valid_batch_size= None, test_batch_size=None):
# data = {}
# for category in ['train', 'val', 'test']:
# cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
# data['x_' + category] = cat_data['x']
# data['y_' + category] = cat_data['y']
# scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std())
# # Data format
# for category in ['train', 'val', 'test']:
# data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
#
# data['train_loader'] = DataLoaderM(data['x_train'], data['y_train'], batch_size, pad_with_last_sample=False)
# data['val_loader'] = DataLoaderM(data['x_val'], data['y_val'], valid_batch_size, pad_with_last_sample=False)
# data['test_loader'] = DataLoaderM(data['x_test'], data['y_test'], test_batch_size, pad_with_last_sample=False)
# data['scaler'] = scaler
# return data
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def load_node_feature(path):
fi = open(path)
x = []
for li in fi:
li = li.strip()
li = li.split(",")
e = [float(t) for t in li[1:]]
x.append(e)
x = np.array(x)
mean = np.mean(x,axis=0)
std = np.std(x,axis=0)
z = torch.tensor((x-mean)/std,dtype=torch.float)
return z
def normal_std(x):
return x.std() * np.sqrt((len(x) - 1.) / (len(x)))
| 10,951 | 34.102564 | 116 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.