code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from collections import OrderedDict
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torch.jit.annotations import Tuple, List, Dict
class FeaturePyramidNetwork(nn.Module):
"""
Module that adds a FPN from on top of a set of feature maps. This is based on
`"Feature Pyramid Network for Object Detection" <https://arxiv.org/abs/1612.03144>`_.
The feature maps are currently supposed to be in increasing depth
order.
The input to the model is expected to be an OrderedDict[Tensor], containing
the feature maps on top of which the FPN will be added.
Arguments:
in_channels_list (list[int]): number of channels for each feature map that
is passed to the module
out_channels (int): number of channels of the FPN representation
extra_blocks (ExtraFPNBlock or None): if provided, extra operations will
be performed. It is expected to take the fpn features, the original
features and the names of the original features as input, and returns
a new list of feature maps and their corresponding names
Examples::
>>> m = torchvision.ops.FeaturePyramidNetwork([10, 20, 30], 5)
>>> # get some dummy data
>>> x = OrderedDict()
>>> x['feat0'] = torch.rand(1, 10, 64, 64)
>>> x['feat2'] = torch.rand(1, 20, 16, 16)
>>> x['feat3'] = torch.rand(1, 30, 8, 8)
>>> # compute the FPN on top of x
>>> output = m(x)
>>> print([(k, v.shape) for k, v in output.items()])
>>> # returns
>>> [('feat0', torch.Size([1, 5, 64, 64])),
>>> ('feat2', torch.Size([1, 5, 16, 16])),
>>> ('feat3', torch.Size([1, 5, 8, 8]))]
"""
def __init__(self, in_channels_list, out_channels, extra_blocks=None):
super(FeaturePyramidNetwork, self).__init__()
self.inner_blocks = nn.ModuleList()
self.layer_blocks = nn.ModuleList()
for in_channels in in_channels_list:
if in_channels == 0:
raise ValueError("in_channels=0 is currently not supported")
inner_block_module = nn.Conv2d(in_channels, out_channels, 1)
layer_block_module = nn.Conv2d(out_channels, out_channels, 3, padding=1)
self.inner_blocks.append(inner_block_module)
self.layer_blocks.append(layer_block_module)
# initialize parameters now to avoid modifying the initialization of top_blocks
for m in self.children():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
if extra_blocks is not None:
assert isinstance(extra_blocks, ExtraFPNBlock)
self.extra_blocks = extra_blocks
def get_result_from_inner_blocks(self, x, idx):
# type: (Tensor, int)
"""
This is equivalent to self.inner_blocks[idx](x),
but torchscript doesn't support this yet
"""
num_blocks = 0
for m in self.inner_blocks:
num_blocks += 1
if idx < 0:
idx += num_blocks
i = 0
out = x
for module in self.inner_blocks:
if i == idx:
out = module(x)
i += 1
return out
def get_result_from_layer_blocks(self, x, idx):
# type: (Tensor, int)
"""
This is equivalent to self.layer_blocks[idx](x),
but torchscript doesn't support this yet
"""
num_blocks = 0
for m in self.layer_blocks:
num_blocks += 1
if idx < 0:
idx += num_blocks
i = 0
out = x
for module in self.layer_blocks:
if i == idx:
out = module(x)
i += 1
return out
def forward(self, x):
# type: (Dict[str, Tensor])
"""
Computes the FPN for a set of feature maps.
Arguments:
x (OrderedDict[Tensor]): feature maps for each feature level.
Returns:
results (OrderedDict[Tensor]): feature maps after FPN layers.
They are ordered from highest resolution first.
"""
# unpack OrderedDict into two lists for easier handling
names = list(x.keys())
x = list(x.values())
last_inner = self.get_result_from_inner_blocks(x[-1], -1)
results = []
results.append(self.get_result_from_layer_blocks(last_inner, -1))
for idx in range(len(x) - 2, -1, -1):
inner_lateral = self.get_result_from_inner_blocks(x[idx], idx)
feat_shape = inner_lateral.shape[-2:]
inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="nearest")
last_inner = inner_lateral + inner_top_down
results.insert(0, self.get_result_from_layer_blocks(last_inner, idx))
if self.extra_blocks is not None:
results, names = self.extra_blocks(results, x, names)
# make it back an OrderedDict
out = OrderedDict([(k, v) for k, v in zip(names, results)])
return out
class ExtraFPNBlock(nn.Module):
"""
Base class for the extra block in the FPN.
Arguments:
results (List[Tensor]): the result of the FPN
x (List[Tensor]): the original feature maps
names (List[str]): the names for each one of the
original feature maps
Returns:
results (List[Tensor]): the extended set of results
of the FPN
names (List[str]): the extended set of names for the results
"""
def forward(self, results, x, names):
pass
class LastLevelMaxPool(ExtraFPNBlock):
"""
Applies a max_pool2d on top of the last feature map
"""
def forward(self, x, y, names):
# type: (List[Tensor], List[Tensor], List[str])
names.append("pool")
x.append(F.max_pool2d(x[-1], 1, 2, 0))
return x, names
class LastLevelP6P7(ExtraFPNBlock):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7.
"""
def __init__(self, in_channels, out_channels):
super(LastLevelP6P7, self).__init__()
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
nn.init.kaiming_uniform_(module.weight, a=1)
nn.init.constant_(module.bias, 0)
self.use_P5 = in_channels == out_channels
def forward(self, p, c, names):
p5, c5 = p[-1], c[-1]
x = p5 if self.use_P5 else c5
p6 = self.p6(x)
p7 = self.p7(F.relu(p6))
p.extend([p6, p7])
names.extend(["p6", "p7"])
return p, names | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/ops/feature_pyramid_network.py | 0.943699 | 0.688796 | feature_pyramid_network.py | pypi |
import importlib
import math
import os
import warnings
from fractions import Fraction
from typing import List, Tuple
import numpy as np
import torch
_HAS_VIDEO_OPT = False
try:
lib_dir = os.path.join(os.path.dirname(__file__), "..")
loader_details = (
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES
)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec("video_reader")
if ext_specs is not None:
torch.ops.load_library(ext_specs.origin)
_HAS_VIDEO_OPT = True
except (ImportError, OSError):
pass
default_timebase = Fraction(0, 1)
# simple class for torch scripting
# the complex Fraction class from fractions module is not scriptable
@torch.jit.script
class Timebase(object):
__annotations__ = {"numerator": int, "denominator": int}
__slots__ = ["numerator", "denominator"]
def __init__(
self,
numerator, # type: int
denominator, # type: int
):
# type: (...) -> None
self.numerator = numerator
self.denominator = denominator
@torch.jit.script
class VideoMetaData(object):
__annotations__ = {
"has_video": bool,
"video_timebase": Timebase,
"video_duration": float,
"video_fps": float,
"has_audio": bool,
"audio_timebase": Timebase,
"audio_duration": float,
"audio_sample_rate": float,
}
__slots__ = [
"has_video",
"video_timebase",
"video_duration",
"video_fps",
"has_audio",
"audio_timebase",
"audio_duration",
"audio_sample_rate",
]
def __init__(self):
self.has_video = False
self.video_timebase = Timebase(0, 1)
self.video_duration = 0.0
self.video_fps = 0.0
self.has_audio = False
self.audio_timebase = Timebase(0, 1)
self.audio_duration = 0.0
self.audio_sample_rate = 0.0
def _validate_pts(pts_range):
# type: (List[int])
if pts_range[1] > 0:
assert (
pts_range[0] <= pts_range[1]
), """Start pts should not be smaller than end pts, got
start pts: %d and end pts: %d""" % (
pts_range[0],
pts_range[1],
)
def _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration):
# type: (torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor) -> VideoMetaData
"""
Build update VideoMetaData struct with info about the video
"""
meta = VideoMetaData()
if vtimebase.numel() > 0:
meta.video_timebase = Timebase(
int(vtimebase[0].item()), int(vtimebase[1].item())
)
timebase = vtimebase[0].item() / float(vtimebase[1].item())
if vduration.numel() > 0:
meta.has_video = True
meta.video_duration = float(vduration.item()) * timebase
if vfps.numel() > 0:
meta.video_fps = float(vfps.item())
if atimebase.numel() > 0:
meta.audio_timebase = Timebase(
int(atimebase[0].item()), int(atimebase[1].item())
)
timebase = atimebase[0].item() / float(atimebase[1].item())
if aduration.numel() > 0:
meta.has_audio = True
meta.audio_duration = float(aduration.item()) * timebase
if asample_rate.numel() > 0:
meta.audio_sample_rate = float(asample_rate.item())
return meta
def _align_audio_frames(aframes, aframe_pts, audio_pts_range):
# type: (torch.Tensor, torch.Tensor, List[int]) -> torch.Tensor
start, end = aframe_pts[0], aframe_pts[-1]
num_samples = aframes.size(0)
step_per_aframe = float(end - start + 1) / float(num_samples)
s_idx = 0
e_idx = num_samples
if start < audio_pts_range[0]:
s_idx = int((audio_pts_range[0] - start) / step_per_aframe)
if end > audio_pts_range[1]:
e_idx = int((audio_pts_range[1] - end) / step_per_aframe)
return aframes[s_idx:e_idx, :]
def _read_video_from_file(
filename,
seek_frame_margin=0.25,
read_video_stream=True,
video_width=0,
video_height=0,
video_min_dimension=0,
video_max_dimension=0,
video_pts_range=(0, -1),
video_timebase=default_timebase,
read_audio_stream=True,
audio_samples=0,
audio_channels=0,
audio_pts_range=(0, -1),
audio_timebase=default_timebase,
):
"""
Reads a video from a file, returning both the video frames as well as
the audio frames
Args
----------
filename : str
path to the video file
seek_frame_margin: double, optional
seeking frame in the stream is imprecise. Thus, when video_start_pts
is specified, we seek the pts earlier by seek_frame_margin seconds
read_video_stream: int, optional
whether read video stream. If yes, set to 1. Otherwise, 0
video_width/video_height/video_min_dimension/video_max_dimension: int
together decide the size of decoded frames
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the orignal frame resolution
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension = 0, keep the aspect ratio and resize the
frame so that shorter edge size is video_min_dimension
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension != 0, keep the aspect ratio and resize
the frame so that longer edge size is video_max_dimension
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension != 0, resize the frame so that shorter
edge size is video_min_dimension, and longer edge size is
video_max_dimension. The aspect ratio may not be preserved
- When video_width = 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_height is $video_height
- When video_width != 0, video_height == 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_width is $video_width
- When video_width != 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, resize the frame so that frame
video_width and video_height are set to $video_width and
$video_height, respectively
video_pts_range : list(int), optional
the start and end presentation timestamp of video stream
video_timebase: Fraction, optional
a Fraction rational number which denotes timebase in video stream
read_audio_stream: int, optional
whether read audio stream. If yes, set to 1. Otherwise, 0
audio_samples: int, optional
audio sampling rate
audio_channels: int optional
audio channels
audio_pts_range : list(int), optional
the start and end presentation timestamp of audio stream
audio_timebase: Fraction, optional
a Fraction rational number which denotes time base in audio stream
Returns
-------
vframes : Tensor[T, H, W, C]
the `T` video frames
aframes : Tensor[L, K]
the audio frames, where `L` is the number of points and
`K` is the number of audio_channels
info : Dict
metadata for the video and audio. Can contain the fields video_fps (float)
and audio_fps (int)
"""
_validate_pts(video_pts_range)
_validate_pts(audio_pts_range)
result = torch.ops.video_reader.read_video_from_file(
filename,
seek_frame_margin,
0, # getPtsOnly
read_video_stream,
video_width,
video_height,
video_min_dimension,
video_max_dimension,
video_pts_range[0],
video_pts_range[1],
video_timebase.numerator,
video_timebase.denominator,
read_audio_stream,
audio_samples,
audio_channels,
audio_pts_range[0],
audio_pts_range[1],
audio_timebase.numerator,
audio_timebase.denominator,
)
vframes, _vframe_pts, vtimebase, vfps, vduration, \
aframes, aframe_pts, atimebase, asample_rate, aduration = (
result
)
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
if aframes.numel() > 0:
# when audio stream is found
aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
return vframes, aframes, info
def _read_video_timestamps_from_file(filename):
"""
Decode all video- and audio frames in the video. Only pts
(presentation timestamp) is returned. The actual frame pixel data is not
copied. Thus, it is much faster than read_video(...)
"""
result = torch.ops.video_reader.read_video_from_file(
filename,
0, # seek_frame_margin
1, # getPtsOnly
1, # read_video_stream
0, # video_width
0, # video_height
0, # video_min_dimension
0, # video_max_dimension
0, # video_start_pts
-1, # video_end_pts
0, # video_timebase_num
1, # video_timebase_den
1, # read_audio_stream
0, # audio_samples
0, # audio_channels
0, # audio_start_pts
-1, # audio_end_pts
0, # audio_timebase_num
1, # audio_timebase_den
)
_vframes, vframe_pts, vtimebase, vfps, vduration, \
_aframes, aframe_pts, atimebase, asample_rate, aduration = (result)
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
vframe_pts = vframe_pts.numpy().tolist()
aframe_pts = aframe_pts.numpy().tolist()
return vframe_pts, aframe_pts, info
def _probe_video_from_file(filename):
"""
Probe a video file and return VideoMetaData with info about the video
"""
result = torch.ops.video_reader.probe_video_from_file(filename)
vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
return info
def _read_video_from_memory(
video_data, # type: torch.Tensor
seek_frame_margin=0.25, # type: float
read_video_stream=1, # type: int
video_width=0, # type: int
video_height=0, # type: int
video_min_dimension=0, # type: int
video_max_dimension=0, # type: int
video_pts_range=(0, -1), # type: List[int]
video_timebase_numerator=0, # type: int
video_timebase_denominator=1, # type: int
read_audio_stream=1, # type: int
audio_samples=0, # type: int
audio_channels=0, # type: int
audio_pts_range=(0, -1), # type: List[int]
audio_timebase_numerator=0, # type: int
audio_timebase_denominator=1, # type: int
):
# type: (...) -> Tuple[torch.Tensor, torch.Tensor]
"""
Reads a video from memory, returning both the video frames as well as
the audio frames
This function is torchscriptable.
Args
----------
video_data : data type could be 1) torch.Tensor, dtype=torch.int8 or 2) python bytes
compressed video content stored in either 1) torch.Tensor 2) python bytes
seek_frame_margin: double, optional
seeking frame in the stream is imprecise. Thus, when video_start_pts is specified,
we seek the pts earlier by seek_frame_margin seconds
read_video_stream: int, optional
whether read video stream. If yes, set to 1. Otherwise, 0
video_width/video_height/video_min_dimension/video_max_dimension: int
together decide the size of decoded frames
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the orignal frame resolution
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension = 0, keep the aspect ratio and resize the
frame so that shorter edge size is video_min_dimension
- When video_width = 0, video_height = 0, video_min_dimension = 0,
and video_max_dimension != 0, keep the aspect ratio and resize
the frame so that longer edge size is video_max_dimension
- When video_width = 0, video_height = 0, video_min_dimension != 0,
and video_max_dimension != 0, resize the frame so that shorter
edge size is video_min_dimension, and longer edge size is
video_max_dimension. The aspect ratio may not be preserved
- When video_width = 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_height is $video_height
- When video_width != 0, video_height == 0, video_min_dimension = 0,
and video_max_dimension = 0, keep the aspect ratio and resize
the frame so that frame video_width is $video_width
- When video_width != 0, video_height != 0, video_min_dimension = 0,
and video_max_dimension = 0, resize the frame so that frame
video_width and video_height are set to $video_width and
$video_height, respectively
video_pts_range : list(int), optional
the start and end presentation timestamp of video stream
video_timebase_numerator / video_timebase_denominator: optional
a rational number which denotes timebase in video stream
read_audio_stream: int, optional
whether read audio stream. If yes, set to 1. Otherwise, 0
audio_samples: int, optional
audio sampling rate
audio_channels: int optional
audio audio_channels
audio_pts_range : list(int), optional
the start and end presentation timestamp of audio stream
audio_timebase_numerator / audio_timebase_denominator: optional
a rational number which denotes time base in audio stream
Returns
-------
vframes : Tensor[T, H, W, C]
the `T` video frames
aframes : Tensor[L, K]
the audio frames, where `L` is the number of points and
`K` is the number of channels
"""
_validate_pts(video_pts_range)
_validate_pts(audio_pts_range)
result = torch.ops.video_reader.read_video_from_memory(
video_data,
seek_frame_margin,
0, # getPtsOnly
read_video_stream,
video_width,
video_height,
video_min_dimension,
video_max_dimension,
video_pts_range[0],
video_pts_range[1],
video_timebase_numerator,
video_timebase_denominator,
read_audio_stream,
audio_samples,
audio_channels,
audio_pts_range[0],
audio_pts_range[1],
audio_timebase_numerator,
audio_timebase_denominator,
)
vframes, _vframe_pts, vtimebase, vfps, vduration, \
aframes, aframe_pts, atimebase, asample_rate, aduration = (
result
)
if aframes.numel() > 0:
# when audio stream is found
aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
return vframes, aframes
def _read_video_timestamps_from_memory(video_data):
"""
Decode all frames in the video. Only pts (presentation timestamp) is returned.
The actual frame pixel data is not copied. Thus, read_video_timestamps(...)
is much faster than read_video(...)
"""
if not isinstance(video_data, torch.Tensor):
video_data = torch.from_numpy(np.frombuffer(video_data, dtype=np.uint8))
result = torch.ops.video_reader.read_video_from_memory(
video_data,
0, # seek_frame_margin
1, # getPtsOnly
1, # read_video_stream
0, # video_width
0, # video_height
0, # video_min_dimension
0, # video_max_dimension
0, # video_start_pts
-1, # video_end_pts
0, # video_timebase_num
1, # video_timebase_den
1, # read_audio_stream
0, # audio_samples
0, # audio_channels
0, # audio_start_pts
-1, # audio_end_pts
0, # audio_timebase_num
1, # audio_timebase_den
)
_vframes, vframe_pts, vtimebase, vfps, vduration, \
_aframes, aframe_pts, atimebase, asample_rate, aduration = (
result
)
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
vframe_pts = vframe_pts.numpy().tolist()
aframe_pts = aframe_pts.numpy().tolist()
return vframe_pts, aframe_pts, info
def _probe_video_from_memory(video_data):
# type: (torch.Tensor) -> VideoMetaData
"""
Probe a video in memory and return VideoMetaData with info about the video
This function is torchscriptable
"""
if not isinstance(video_data, torch.Tensor):
video_data = torch.from_numpy(np.frombuffer(video_data, dtype=np.uint8))
result = torch.ops.video_reader.probe_video_from_memory(video_data)
vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
return info
def _read_video(filename, start_pts=0, end_pts=None, pts_unit="pts"):
if end_pts is None:
end_pts = float("inf")
if pts_unit == "pts":
warnings.warn(
"The pts_unit 'pts' gives wrong results and will be removed in a "
+ "follow-up version. Please use pts_unit 'sec'."
)
info = _probe_video_from_file(filename)
has_video = info.has_video
has_audio = info.has_audio
def get_pts(time_base):
start_offset = start_pts
end_offset = end_pts
if pts_unit == "sec":
start_offset = int(math.floor(start_pts * (1 / time_base)))
if end_offset != float("inf"):
end_offset = int(math.ceil(end_pts * (1 / time_base)))
if end_offset == float("inf"):
end_offset = -1
return start_offset, end_offset
video_pts_range = (0, -1)
video_timebase = default_timebase
if has_video:
video_timebase = Fraction(
info.video_timebase.numerator, info.video_timebase.denominator
)
video_pts_range = get_pts(video_timebase)
audio_pts_range = (0, -1)
audio_timebase = default_timebase
if has_audio:
audio_timebase = Fraction(
info.audio_timebase.numerator, info.audio_timebase.denominator
)
audio_pts_range = get_pts(audio_timebase)
vframes, aframes, info = _read_video_from_file(
filename,
read_video_stream=True,
video_pts_range=video_pts_range,
video_timebase=video_timebase,
read_audio_stream=True,
audio_pts_range=audio_pts_range,
audio_timebase=audio_timebase,
)
_info = {}
if has_video:
_info["video_fps"] = info.video_fps
if has_audio:
_info["audio_fps"] = info.audio_sample_rate
return vframes, aframes, _info
def _read_video_timestamps(filename, pts_unit="pts"):
if pts_unit == "pts":
warnings.warn(
"The pts_unit 'pts' gives wrong results and will be removed in a "
+ "follow-up version. Please use pts_unit 'sec'."
)
pts, _, info = _read_video_timestamps_from_file(filename)
if pts_unit == "sec":
video_time_base = Fraction(
info.video_timebase.numerator, info.video_timebase.denominator
)
pts = [x * video_time_base for x in pts]
video_fps = info.video_fps if info.has_video else None
return pts, video_fps | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/io/_video_opt.py | 0.769427 | 0.213685 | _video_opt.py | pypi |
import os
import tarfile
import collections
from .vision import VisionDataset
import xml.etree.ElementTree as ET
from PIL import Image
from .utils import download_url, check_integrity, verify_str_arg
DATASET_YEAR_DICT = {
'2012': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'filename': 'VOCtrainval_11-May-2012.tar',
'md5': '6cd6e144f989b92b3379bac3b3de84fd',
'base_dir': os.path.join('VOCdevkit', 'VOC2012')
},
'2011': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar',
'filename': 'VOCtrainval_25-May-2011.tar',
'md5': '6c3384ef61512963050cb5d687e5bf1e',
'base_dir': os.path.join('TrainVal', 'VOCdevkit', 'VOC2011')
},
'2010': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',
'filename': 'VOCtrainval_03-May-2010.tar',
'md5': 'da459979d0c395079b5c75ee67908abb',
'base_dir': os.path.join('VOCdevkit', 'VOC2010')
},
'2009': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar',
'filename': 'VOCtrainval_11-May-2009.tar',
'md5': '59065e4b188729180974ef6572f6a212',
'base_dir': os.path.join('VOCdevkit', 'VOC2009')
},
'2008': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar',
'filename': 'VOCtrainval_11-May-2012.tar',
'md5': '2629fa636546599198acfcfbfcf1904a',
'base_dir': os.path.join('VOCdevkit', 'VOC2008')
},
'2007': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
'filename': 'VOCtrainval_06-Nov-2007.tar',
'md5': 'c52e279531787c972589f7e41ab4ae64',
'base_dir': os.path.join('VOCdevkit', 'VOC2007')
},
'2007-test': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
'filename': 'VOCtest_06-Nov-2007.tar',
'md5': 'b6e924de25625d8de591ea690078ad9f',
'base_dir': os.path.join('VOCdevkit', 'VOC2007')
}
}
class VOCSegmentation(VisionDataset):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset.
Args:
root (string): Root directory of the VOC Dataset.
year (string, optional): The dataset year, supports years 2007 to 2012.
image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(self,
root,
year='2012',
image_set='train',
download=False,
transform=None,
target_transform=None,
transforms=None):
super(VOCSegmentation, self).__init__(root, transforms, transform, target_transform)
self.year = year
if year == "2007" and image_set == "test":
year = "2007-test"
self.url = DATASET_YEAR_DICT[year]['url']
self.filename = DATASET_YEAR_DICT[year]['filename']
self.md5 = DATASET_YEAR_DICT[year]['md5']
valid_sets = ["train", "trainval", "val"]
if year == "2007-test":
valid_sets.append("test")
self.image_set = verify_str_arg(image_set, "image_set", valid_sets)
base_dir = DATASET_YEAR_DICT[year]['base_dir']
voc_root = os.path.join(self.root, base_dir)
image_dir = os.path.join(voc_root, 'JPEGImages')
mask_dir = os.path.join(voc_root, 'SegmentationClass')
if download:
download_extract(self.url, self.root, self.filename, self.md5)
if not os.path.isdir(voc_root):
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
splits_dir = os.path.join(voc_root, 'ImageSets/Segmentation')
split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt')
with open(os.path.join(split_f), "r") as f:
file_names = [x.strip() for x in f.readlines()]
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
self.masks = [os.path.join(mask_dir, x + ".png") for x in file_names]
assert (len(self.images) == len(self.masks))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
img = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.masks[index])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.images)
class VOCDetection(VisionDataset):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Detection Dataset.
Args:
root (string): Root directory of the VOC Dataset.
year (string, optional): The dataset year, supports years 2007 to 2012.
image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
(default: alphabetic indexing of VOC's 20 classes).
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, required): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(self,
root,
year='2012',
image_set='train',
download=False,
transform=None,
target_transform=None,
transforms=None):
super(VOCDetection, self).__init__(root, transforms, transform, target_transform)
self.year = year
if year == "2007" and image_set == "test":
year = "2007-test"
self.url = DATASET_YEAR_DICT[year]['url']
self.filename = DATASET_YEAR_DICT[year]['filename']
self.md5 = DATASET_YEAR_DICT[year]['md5']
valid_sets = ["train", "trainval", "val"]
if year == "2007-test":
valid_sets.append("test")
self.image_set = verify_str_arg(image_set, "image_set", valid_sets)
base_dir = DATASET_YEAR_DICT[year]['base_dir']
voc_root = os.path.join(self.root, base_dir)
image_dir = os.path.join(voc_root, 'JPEGImages')
annotation_dir = os.path.join(voc_root, 'Annotations')
if download:
download_extract(self.url, self.root, self.filename, self.md5)
if not os.path.isdir(voc_root):
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
splits_dir = os.path.join(voc_root, 'ImageSets/Main')
split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt')
with open(os.path.join(split_f), "r") as f:
file_names = [x.strip() for x in f.readlines()]
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
self.annotations = [os.path.join(annotation_dir, x + ".xml") for x in file_names]
assert (len(self.images) == len(self.annotations))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a dictionary of the XML tree.
"""
img = Image.open(self.images[index]).convert('RGB')
target = self.parse_voc_xml(
ET.parse(self.annotations[index]).getroot())
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.images)
def parse_voc_xml(self, node):
voc_dict = {}
children = list(node)
if children:
def_dic = collections.defaultdict(list)
for dc in map(self.parse_voc_xml, children):
for ind, v in dc.items():
def_dic[ind].append(v)
if node.tag == 'annotation':
def_dic['object'] = [def_dic['object']]
voc_dict = {
node.tag:
{ind: v[0] if len(v) == 1 else v
for ind, v in def_dic.items()}
}
if node.text:
text = node.text.strip()
if not children:
voc_dict[node.tag] = text
return voc_dict
def download_extract(url, root, filename, md5):
download_url(url, root, filename, md5)
with tarfile.open(os.path.join(root, filename), "r") as tar:
tar.extractall(path=root) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/voc.py | 0.63624 | 0.255544 | voc.py | pypi |
from collections import defaultdict
from PIL import Image
from html.parser import HTMLParser
import glob
import os
from .vision import VisionDataset
class Flickr8kParser(HTMLParser):
"""Parser for extracting captions from the Flickr8k dataset web page."""
def __init__(self, root):
super(Flickr8kParser, self).__init__()
self.root = root
# Data structure to store captions
self.annotations = {}
# State variables
self.in_table = False
self.current_tag = None
self.current_img = None
def handle_starttag(self, tag, attrs):
self.current_tag = tag
if tag == 'table':
self.in_table = True
def handle_endtag(self, tag):
self.current_tag = None
if tag == 'table':
self.in_table = False
def handle_data(self, data):
if self.in_table:
if data == 'Image Not Found':
self.current_img = None
elif self.current_tag == 'a':
img_id = data.split('/')[-2]
img_id = os.path.join(self.root, img_id + '_*.jpg')
img_id = glob.glob(img_id)[0]
self.current_img = img_id
self.annotations[img_id] = []
elif self.current_tag == 'li' and self.current_img:
img_id = self.current_img
self.annotations[img_id].append(data.strip())
class Flickr8k(VisionDataset):
"""`Flickr8k Entities <http://nlp.cs.illinois.edu/HockenmaierGroup/8k-pictures.html>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
ann_file (string): Path to annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, root, ann_file, transform=None, target_transform=None):
super(Flickr8k, self).__init__(root, transform=transform,
target_transform=target_transform)
self.ann_file = os.path.expanduser(ann_file)
# Read annotations and store in a dict
parser = Flickr8kParser(self.root)
with open(self.ann_file) as fh:
parser.feed(fh.read())
self.annotations = parser.annotations
self.ids = list(sorted(self.annotations.keys()))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is a list of captions for the image.
"""
img_id = self.ids[index]
# Image
img = Image.open(img_id).convert('RGB')
if self.transform is not None:
img = self.transform(img)
# Captions
target = self.annotations[img_id]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.ids)
class Flickr30k(VisionDataset):
"""`Flickr30k Entities <http://web.engr.illinois.edu/~bplumme2/Flickr30kEntities/>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
ann_file (string): Path to annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, root, ann_file, transform=None, target_transform=None):
super(Flickr30k, self).__init__(root, transform=transform,
target_transform=target_transform)
self.ann_file = os.path.expanduser(ann_file)
# Read annotations and store in a dict
self.annotations = defaultdict(list)
with open(self.ann_file) as fh:
for line in fh:
img_id, caption = line.strip().split('\t')
self.annotations[img_id[:-2]].append(caption)
self.ids = list(sorted(self.annotations.keys()))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is a list of captions for the image.
"""
img_id = self.ids[index]
# Image
filename = os.path.join(self.root, img_id)
img = Image.open(filename).convert('RGB')
if self.transform is not None:
img = self.transform(img)
# Captions
target = self.annotations[img_id]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.ids) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/flickr.py | 0.872768 | 0.304436 | flickr.py | pypi |
import glob
import os
from .utils import list_dir
from .folder import make_dataset
from .video_utils import VideoClips
from .vision import VisionDataset
class HMDB51(VisionDataset):
"""
`HMDB51 <http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_
dataset.
HMDB51 is an action recognition video dataset.
This dataset consider every video as a collection of video clips of fixed size, specified
by ``frames_per_clip``, where the step in frames between each clip is given by
``step_between_clips``.
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
elements will come from video 1, and the next three elements from video 2.
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
frames in a video might be present.
Internally, it uses a VideoClips object to handle clip creation.
Args:
root (string): Root directory of the HMDB51 Dataset.
annotation_path (str): Path to the folder containing the split files.
frames_per_clip (int): Number of frames in a clip.
step_between_clips (int): Number of frames between each clip.
fold (int, optional): Which fold to use. Should be between 1 and 3.
train (bool, optional): If ``True``, creates a dataset from the train split,
otherwise from the ``test`` split.
transform (callable, optional): A function/transform that takes in a TxHxWxC video
and returns a transformed version.
Returns:
video (Tensor[T, H, W, C]): the `T` video frames
audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
and `L` is the number of points
label (int): class of the video clip
"""
data_url = "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar"
splits = {
"url": "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar",
"md5": "15e67781e70dcfbdce2d7dbb9b3344b5"
}
TRAIN_TAG = 1
TEST_TAG = 2
def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1,
frame_rate=None, fold=1, train=True, transform=None,
_precomputed_metadata=None, num_workers=1, _video_width=0,
_video_height=0, _video_min_dimension=0, _audio_samples=0):
super(HMDB51, self).__init__(root)
if fold not in (1, 2, 3):
raise ValueError("fold should be between 1 and 3, got {}".format(fold))
extensions = ('avi',)
classes = sorted(list_dir(root))
class_to_idx = {class_: i for (i, class_) in enumerate(classes)}
self.samples = make_dataset(
self.root,
class_to_idx,
extensions,
)
video_paths = [path for (path, _) in self.samples]
video_clips = VideoClips(
video_paths,
frames_per_clip,
step_between_clips,
frame_rate,
_precomputed_metadata,
num_workers=num_workers,
_video_width=_video_width,
_video_height=_video_height,
_video_min_dimension=_video_min_dimension,
_audio_samples=_audio_samples,
)
self.fold = fold
self.train = train
self.classes = classes
self.video_clips_metadata = video_clips.metadata
self.indices = self._select_fold(video_paths, annotation_path, fold, train)
self.video_clips = video_clips.subset(self.indices)
self.transform = transform
@property
def metadata(self):
return self.video_clips_metadata
def _select_fold(self, video_list, annotations_dir, fold, train):
target_tag = self.TRAIN_TAG if train else self.TEST_TAG
split_pattern_name = "*test_split{}.txt".format(fold)
split_pattern_path = os.path.join(annotations_dir, split_pattern_name)
annotation_paths = glob.glob(split_pattern_path)
selected_files = []
for filepath in annotation_paths:
with open(filepath) as fid:
lines = fid.readlines()
for line in lines:
video_filename, tag_string = line.split()
tag = int(tag_string)
if tag == target_tag:
selected_files.append(video_filename)
selected_files = set(selected_files)
indices = []
for video_index, video_path in enumerate(video_list):
if os.path.basename(video_path) in selected_files:
indices.append(video_index)
return indices
def __len__(self):
return self.video_clips.num_clips()
def __getitem__(self, idx):
video, audio, _, video_idx = self.video_clips.get_clip(idx)
sample_index = self.indices[video_idx]
_, class_index = self.samples[sample_index]
if self.transform is not None:
video = self.transform(video)
return video, audio, class_index | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/hmdb51.py | 0.870405 | 0.546073 | hmdb51.py | pypi |
from PIL import Image
from os.path import join
import os
from .vision import VisionDataset
from .utils import download_and_extract_archive, check_integrity, list_dir, list_files
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = 'omniglot-py'
download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python'
zips_md5 = {
'images_background': '68d2efa1b9178cc56df9314c21c6e718',
'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811'
}
def __init__(self, root, background=True, transform=None, target_transform=None,
download=False):
super(Omniglot, self).__init__(join(root, self.folder), transform=transform,
target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))]
for a in self._alphabets], [])
self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')]
for idx, character in enumerate(self._characters)]
self._flat_character_images = sum(self._character_images, [])
def __len__(self):
return len(self._flat_character_images)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode='r').convert('L')
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self):
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + '.zip'), self.zips_md5[zip_filename]):
return False
return True
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
filename = self._get_target_folder()
zip_filename = filename + '.zip'
url = self.download_url_prefix + '/' + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self):
return 'images_background' if self.background else 'images_evaluation' | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/omniglot.py | 0.840652 | 0.413832 | omniglot.py | pypi |
import warnings
from contextlib import contextmanager
import os
import shutil
import tempfile
import torch
from .folder import ImageFolder
from .utils import check_integrity, extract_archive, verify_str_arg
ARCHIVE_META = {
'train': ('ILSVRC2012_img_train.tar', '1d675b47d978889d74fa0da5fadfb00e'),
'val': ('ILSVRC2012_img_val.tar', '29b22e2961454d5413ddabcf34fc5622'),
'devkit': ('ILSVRC2012_devkit_t12.tar.gz', 'fa75699e90414af021442c21a62c3abf')
}
META_FILE = "meta.bin"
class ImageNet(ImageFolder):
"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
Args:
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``, or ``val``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, split='train', download=None, **kwargs):
if download is True:
msg = ("The dataset is no longer publicly accessible. You need to "
"download the archives externally and place them in the root "
"directory.")
raise RuntimeError(msg)
elif download is False:
msg = ("The use of the download flag is deprecated, since the dataset "
"is no longer publicly accessible.")
warnings.warn(msg, RuntimeWarning)
root = self.root = os.path.expanduser(root)
self.split = verify_str_arg(split, "split", ("train", "val"))
self.parse_archives()
wnid_to_classes = load_meta_file(self.root)[0]
super(ImageNet, self).__init__(self.split_folder, **kwargs)
self.root = root
self.wnids = self.classes
self.wnid_to_idx = self.class_to_idx
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {cls: idx
for idx, clss in enumerate(self.classes)
for cls in clss}
def parse_archives(self):
if not check_integrity(os.path.join(self.root, META_FILE)):
parse_devkit_archive(self.root)
if not os.path.isdir(self.split_folder):
if self.split == 'train':
parse_train_archive(self.root)
elif self.split == 'val':
parse_val_archive(self.root)
@property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
def load_meta_file(root, file=None):
if file is None:
file = META_FILE
file = os.path.join(root, file)
if check_integrity(file):
return torch.load(file)
else:
msg = ("The meta file {} is not present in the root directory or is corrupted. "
"This file is automatically created by the ImageNet dataset.")
raise RuntimeError(msg.format(file, root))
def _verify_archive(root, file, md5):
if not check_integrity(os.path.join(root, file), md5):
msg = ("The archive {} is not present in the root directory or is corrupted. "
"You need to download it externally and place it in {}.")
raise RuntimeError(msg.format(file, root))
def parse_devkit_archive(root, file=None):
"""Parse the devkit archive of the ImageNet2012 classification dataset and save
the meta information in a binary file.
Args:
root (str): Root directory containing the devkit archive
file (str, optional): Name of devkit archive. Defaults to
'ILSVRC2012_devkit_t12.tar.gz'
"""
import scipy.io as sio
def parse_meta_mat(devkit_root):
metafile = os.path.join(devkit_root, "data", "meta.mat")
meta = sio.loadmat(metafile, squeeze_me=True)['synsets']
nums_children = list(zip(*meta))[4]
meta = [meta[idx] for idx, num_children in enumerate(nums_children)
if num_children == 0]
idcs, wnids, classes = list(zip(*meta))[:3]
classes = [tuple(clss.split(', ')) for clss in classes]
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
return idx_to_wnid, wnid_to_classes
def parse_val_groundtruth_txt(devkit_root):
file = os.path.join(devkit_root, "data",
"ILSVRC2012_validation_ground_truth.txt")
with open(file, 'r') as txtfh:
val_idcs = txtfh.readlines()
return [int(val_idx) for val_idx in val_idcs]
@contextmanager
def get_tmp_dir():
tmp_dir = tempfile.mkdtemp()
try:
yield tmp_dir
finally:
shutil.rmtree(tmp_dir)
archive_meta = ARCHIVE_META["devkit"]
if file is None:
file = archive_meta[0]
md5 = archive_meta[1]
_verify_archive(root, file, md5)
with get_tmp_dir() as tmp_dir:
extract_archive(os.path.join(root, file), tmp_dir)
devkit_root = os.path.join(tmp_dir, "ILSVRC2012_devkit_t12")
idx_to_wnid, wnid_to_classes = parse_meta_mat(devkit_root)
val_idcs = parse_val_groundtruth_txt(devkit_root)
val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
torch.save((wnid_to_classes, val_wnids), os.path.join(root, META_FILE))
def parse_train_archive(root, file=None, folder="train"):
"""Parse the train images archive of the ImageNet2012 classification dataset and
prepare it for usage with the ImageNet dataset.
Args:
root (str): Root directory containing the train images archive
file (str, optional): Name of train images archive. Defaults to
'ILSVRC2012_img_train.tar'
folder (str, optional): Optional name for train images folder. Defaults to
'train'
"""
archive_meta = ARCHIVE_META["train"]
if file is None:
file = archive_meta[0]
md5 = archive_meta[1]
_verify_archive(root, file, md5)
train_root = os.path.join(root, folder)
extract_archive(os.path.join(root, file), train_root)
archives = [os.path.join(train_root, archive) for archive in os.listdir(train_root)]
for archive in archives:
extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True)
def parse_val_archive(root, file=None, wnids=None, folder="val"):
"""Parse the validation images archive of the ImageNet2012 classification dataset
and prepare it for usage with the ImageNet dataset.
Args:
root (str): Root directory containing the validation images archive
file (str, optional): Name of validation images archive. Defaults to
'ILSVRC2012_img_val.tar'
wnids (list, optional): List of WordNet IDs of the validation images. If None
is given, the IDs are loaded from the meta file in the root directory
folder (str, optional): Optional name for validation images folder. Defaults to
'val'
"""
archive_meta = ARCHIVE_META["val"]
if file is None:
file = archive_meta[0]
md5 = archive_meta[1]
if wnids is None:
wnids = load_meta_file(root)[1]
_verify_archive(root, file, md5)
val_root = os.path.join(root, folder)
extract_archive(os.path.join(root, file), val_root)
images = sorted([os.path.join(val_root, image) for image in os.listdir(val_root)])
for wnid in set(wnids):
os.mkdir(os.path.join(val_root, wnid))
for wnid, img_file in zip(wnids, images):
shutil.move(img_file, os.path.join(val_root, wnid, os.path.basename(img_file))) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/imagenet.py | 0.661486 | 0.303409 | imagenet.py | pypi |
from functools import partial
import torch
import os
import PIL
from .vision import VisionDataset
from .utils import download_file_from_google_drive, check_integrity, verify_str_arg
class CelebA(VisionDataset):
"""`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
split (string): One of {'train', 'valid', 'test', 'all'}.
Accordingly dataset is selected.
target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,
or ``landmarks``. Can also be a list to output a tuple with all specified target types.
The targets represent:
``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes
``identity`` (int): label for each person (data points with the same identity are the same person)
``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height)
``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,
righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)
Defaults to ``attr``. If empty, ``None`` will be returned as target.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = "celeba"
# There currently does not appear to be a easy way to extract 7z in python (without introducing additional
# dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available
# right now.
file_list = [
# File ID MD5 Hash Filename
("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
# ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"),
# ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"),
("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
# ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"),
("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
]
def __init__(self, root, split="train", target_type="attr", transform=None,
target_transform=None, download=False):
import pandas
super(CelebA, self).__init__(root, transform=transform,
target_transform=target_transform)
self.split = split
if isinstance(target_type, list):
self.target_type = target_type
else:
self.target_type = [target_type]
if not self.target_type and self.target_transform is not None:
raise RuntimeError('target_transform is specified but target_type is empty')
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
split_map = {
"train": 0,
"valid": 1,
"test": 2,
"all": None,
}
split = split_map[verify_str_arg(split.lower(), "split",
("train", "valid", "test", "all"))]
fn = partial(os.path.join, self.root, self.base_folder)
splits = pandas.read_csv(fn("list_eval_partition.txt"), delim_whitespace=True, header=None, index_col=0)
identity = pandas.read_csv(fn("identity_CelebA.txt"), delim_whitespace=True, header=None, index_col=0)
bbox = pandas.read_csv(fn("list_bbox_celeba.txt"), delim_whitespace=True, header=1, index_col=0)
landmarks_align = pandas.read_csv(fn("list_landmarks_align_celeba.txt"), delim_whitespace=True, header=1)
attr = pandas.read_csv(fn("list_attr_celeba.txt"), delim_whitespace=True, header=1)
mask = slice(None) if split is None else (splits[1] == split)
self.filename = splits[mask].index.values
self.identity = torch.as_tensor(identity[mask].values)
self.bbox = torch.as_tensor(bbox[mask].values)
self.landmarks_align = torch.as_tensor(landmarks_align[mask].values)
self.attr = torch.as_tensor(attr[mask].values)
self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}
self.attr_names = list(attr.columns)
def _check_integrity(self):
for (_, md5, filename) in self.file_list:
fpath = os.path.join(self.root, self.base_folder, filename)
_, ext = os.path.splitext(filename)
# Allow original archive to be deleted (zip and 7z)
# Only need the extracted images
if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
return False
# Should check a hash of the images
return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
def download(self):
import zipfile
if self._check_integrity():
print('Files already downloaded and verified')
return
for (file_id, md5, filename) in self.file_list:
download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)
with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f:
f.extractall(os.path.join(self.root, self.base_folder))
def __getitem__(self, index):
X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
target = []
for t in self.target_type:
if t == "attr":
target.append(self.attr[index, :])
elif t == "identity":
target.append(self.identity[index, 0])
elif t == "bbox":
target.append(self.bbox[index, :])
elif t == "landmarks":
target.append(self.landmarks_align[index, :])
else:
# TODO: refactor with utils.verify_str_arg
raise ValueError("Target type \"{}\" is not recognized.".format(t))
if self.transform is not None:
X = self.transform(X)
if target:
target = tuple(target) if len(target) > 1 else target[0]
if self.target_transform is not None:
target = self.target_transform(target)
else:
target = None
return X, target
def __len__(self):
return len(self.attr)
def extra_repr(self):
lines = ["Target type: {target_type}", "Split: {split}"]
return '\n'.join(lines).format(**self.__dict__) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/celeba.py | 0.800536 | 0.47658 | celeba.py | pypi |
from PIL import Image
import os
import os.path
import numpy as np
from .vision import VisionDataset
from .utils import download_url, check_integrity
class SEMEION(VisionDataset):
"""`SEMEION <http://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``semeion.py`` exists.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"
filename = "semeion.data"
md5_checksum = 'cb545d371d2ce14ec121470795a77432'
def __init__(self, root, transform=None, target_transform=None, download=True):
super(SEMEION, self).__init__(root, transform=transform,
target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.data = []
self.labels = []
fp = os.path.join(self.root, self.filename)
data = np.loadtxt(fp)
# convert value to 8 bit unsigned integer
# color (white #255) the pixels
self.data = (data[:, :256] * 255).astype('uint8')
self.data = np.reshape(self.data, (-1, 16, 16))
self.labels = np.nonzero(data[:, 256:])[1]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
root = self.root
download_url(self.url, root, self.filename, self.md5_checksum) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/semeion.py | 0.813572 | 0.365825 | semeion.py | pypi |
import json
import os
from collections import namedtuple
import zipfile
from .utils import extract_archive, verify_str_arg, iterable_to_str
from .vision import VisionDataset
from PIL import Image
class Cityscapes(VisionDataset):
"""`Cityscapes <http://www.cityscapes-dataset.com/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory ``leftImg8bit``
and ``gtFine`` or ``gtCoarse`` are located.
split (string, optional): The image split to use, ``train``, ``test`` or ``val`` if mode="fine"
otherwise ``train``, ``train_extra`` or ``val``
mode (string, optional): The quality mode to use, ``fine`` or ``coarse``
target_type (string or list, optional): Type of target to use, ``instance``, ``semantic``, ``polygon``
or ``color``. Can also be a list to output a tuple with all specified target types.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Examples:
Get semantic segmentation target
.. code-block:: python
dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
target_type='semantic')
img, smnt = dataset[0]
Get multiple targets
.. code-block:: python
dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
target_type=['instance', 'color', 'polygon'])
img, (inst, col, poly) = dataset[0]
Validate on the "coarse" set
.. code-block:: python
dataset = Cityscapes('./data/cityscapes', split='val', mode='coarse',
target_type='semantic')
img, smnt = dataset[0]
"""
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, -1, 'vehicle', 7, False, True, (0, 0, 142)),
]
def __init__(self, root, split='train', mode='fine', target_type='instance',
transform=None, target_transform=None, transforms=None):
super(Cityscapes, self).__init__(root, transforms, transform, target_transform)
self.mode = 'gtFine' if mode == 'fine' else 'gtCoarse'
self.images_dir = os.path.join(self.root, 'leftImg8bit', split)
self.targets_dir = os.path.join(self.root, self.mode, split)
self.target_type = target_type
self.split = split
self.images = []
self.targets = []
verify_str_arg(mode, "mode", ("fine", "coarse"))
if mode == "fine":
valid_modes = ("train", "test", "val")
else:
valid_modes = ("train", "train_extra", "val")
msg = ("Unknown value '{}' for argument split if mode is '{}'. "
"Valid values are {{{}}}.")
msg = msg.format(split, mode, iterable_to_str(valid_modes))
verify_str_arg(split, "split", valid_modes, msg)
if not isinstance(target_type, list):
self.target_type = [target_type]
[verify_str_arg(value, "target_type",
("instance", "semantic", "polygon", "color"))
for value in self.target_type]
if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
if split == 'train_extra':
image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainextra.zip'))
else:
image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainvaltest.zip'))
if self.mode == 'gtFine':
target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '_trainvaltest.zip'))
elif self.mode == 'gtCoarse':
target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '.zip'))
if os.path.isfile(image_dir_zip) and os.path.isfile(target_dir_zip):
extract_archive(from_path=image_dir_zip, to_path=self.root)
extract_archive(from_path=target_dir_zip, to_path=self.root)
else:
raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the'
' specified "split" and "mode" are inside the "root" directory')
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
for file_name in os.listdir(img_dir):
target_types = []
for t in self.target_type:
target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],
self._get_target_suffix(self.mode, t))
target_types.append(os.path.join(target_dir, target_name))
self.images.append(os.path.join(img_dir, file_name))
self.targets.append(target_types)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
targets = []
for i, t in enumerate(self.target_type):
if t == 'polygon':
target = self._load_json(self.targets[index][i])
else:
target = Image.open(self.targets[index][i])
targets.append(target)
target = tuple(targets) if len(targets) > 1 else targets[0]
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self):
return len(self.images)
def extra_repr(self):
lines = ["Split: {split}", "Mode: {mode}", "Type: {target_type}"]
return '\n'.join(lines).format(**self.__dict__)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if target_type == 'instance':
return '{}_instanceIds.png'.format(mode)
elif target_type == 'semantic':
return '{}_labelIds.png'.format(mode)
elif target_type == 'color':
return '{}_color.png'.format(mode)
else:
return '{}_polygons.json'.format(mode) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/cityscapes.py | 0.770896 | 0.40486 | cityscapes.py | pypi |
from PIL import Image
import os
import os.path
from .vision import VisionDataset
from .utils import download_and_extract_archive, verify_str_arg
class Caltech101(VisionDataset):
"""`Caltech 101 <http://www.vision.caltech.edu/Image_Datasets/Caltech101/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of dataset where directory
``caltech101`` exists or will be saved to if download is set to True.
target_type (string or list, optional): Type of target to use, ``category`` or
``annotation``. Can also be a list to output a tuple with all specified target types.
``category`` represents the target class, and ``annotation`` is a list of points
from a hand-generated outline. Defaults to ``category``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(self, root, target_type="category", transform=None,
target_transform=None, download=False):
super(Caltech101, self).__init__(os.path.join(root, 'caltech101'),
transform=transform,
target_transform=target_transform)
os.makedirs(self.root, exist_ok=True)
if not isinstance(target_type, list):
target_type = [target_type]
self.target_type = [verify_str_arg(t, "target_type", ("category", "annotation"))
for t in target_type]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.categories = sorted(os.listdir(os.path.join(self.root, "101_ObjectCategories")))
self.categories.remove("BACKGROUND_Google") # this is not a real class
# For some reason, the category names in "101_ObjectCategories" and
# "Annotations" do not always match. This is a manual map between the
# two. Defaults to using same name, since most names are fine.
name_map = {"Faces": "Faces_2",
"Faces_easy": "Faces_3",
"Motorbikes": "Motorbikes_16",
"airplanes": "Airplanes_Side_2"}
self.annotation_categories = list(map(lambda x: name_map[x] if x in name_map else x, self.categories))
self.index = []
self.y = []
for (i, c) in enumerate(self.categories):
n = len(os.listdir(os.path.join(self.root, "101_ObjectCategories", c)))
self.index.extend(range(1, n + 1))
self.y.extend(n * [i])
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where the type of target specified by target_type.
"""
import scipy.io
img = Image.open(os.path.join(self.root,
"101_ObjectCategories",
self.categories[self.y[index]],
"image_{:04d}.jpg".format(self.index[index])))
target = []
for t in self.target_type:
if t == "category":
target.append(self.y[index])
elif t == "annotation":
data = scipy.io.loadmat(os.path.join(self.root,
"Annotations",
self.annotation_categories[self.y[index]],
"annotation_{:04d}.mat".format(self.index[index])))
target.append(data["obj_contour"])
target = tuple(target) if len(target) > 1 else target[0]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def _check_integrity(self):
# can be more robust and check hash of files
return os.path.exists(os.path.join(self.root, "101_ObjectCategories"))
def __len__(self):
return len(self.index)
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(
"http://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz",
self.root,
filename="101_ObjectCategories.tar.gz",
md5="b224c7392d521a49829488ab0f1120d9")
download_and_extract_archive(
"http://www.vision.caltech.edu/Image_Datasets/Caltech101/Annotations.tar",
self.root,
filename="101_Annotations.tar",
md5="6f83eeb1f24d99cab4eb377263132c91")
def extra_repr(self):
return "Target type: {target_type}".format(**self.__dict__)
class Caltech256(VisionDataset):
"""`Caltech 256 <http://www.vision.caltech.edu/Image_Datasets/Caltech256/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``caltech256`` exists or will be saved to if download is set to True.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(self, root, transform=None, target_transform=None, download=False):
super(Caltech256, self).__init__(os.path.join(root, 'caltech256'),
transform=transform,
target_transform=target_transform)
os.makedirs(self.root, exist_ok=True)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.categories = sorted(os.listdir(os.path.join(self.root, "256_ObjectCategories")))
self.index = []
self.y = []
for (i, c) in enumerate(self.categories):
n = len(os.listdir(os.path.join(self.root, "256_ObjectCategories", c)))
self.index.extend(range(1, n + 1))
self.y.extend(n * [i])
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img = Image.open(os.path.join(self.root,
"256_ObjectCategories",
self.categories[self.y[index]],
"{:03d}_{:04d}.jpg".format(self.y[index] + 1, self.index[index])))
target = self.y[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def _check_integrity(self):
# can be more robust and check hash of files
return os.path.exists(os.path.join(self.root, "256_ObjectCategories"))
def __len__(self):
return len(self.index)
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(
"http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar",
self.root,
filename="256_ObjectCategories.tar",
md5="67b4f42ca05d46448c6bb8ecd2220f6d") | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/caltech.py | 0.817866 | 0.517998 | caltech.py | pypi |
import os
import os.path
import hashlib
import gzip
import errno
import tarfile
import zipfile
import torch
from torch.utils.model_zoo import tqdm
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
# check if file is already present locally
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else: # download the file
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
else:
raise e
# check integrity of downloaded file
if not check_integrity(fpath, md5):
raise RuntimeError("File not found or corrupted.")
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _is_tarxz(filename):
return filename.endswith(".tar.xz")
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_tgz(filename):
return filename.endswith(".tgz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path) or _is_tgz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_tarxz(from_path):
with tarfile.open(from_path, 'r:xz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable):
return "'" + "', '".join([str(item) for item in iterable]) + "'"
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None):
if not isinstance(value, torch._six.string_classes):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = ("Unknown value '{value}' for argument {arg}. "
"Valid values are {{{valid_values}}}.")
msg = msg.format(value=value, arg=arg,
valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/utils.py | 0.567577 | 0.18374 | utils.py | pypi |
from PIL import Image
import os
import os.path
import numpy as np
import pickle
from .vision import VisionDataset
from .utils import check_integrity, download_and_extract_archive
class CIFAR10(VisionDataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False):
super(CIFAR10, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train # training set or test set
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class CIFAR100(CIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
} | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/cifar.py | 0.737158 | 0.336277 | cifar.py | pypi |
import os
import shutil
from .vision import VisionDataset
import numpy as np
from PIL import Image
from .utils import download_url, verify_str_arg
from .voc import download_extract
class SBDataset(VisionDataset):
"""`Semantic Boundaries Dataset <http://home.bharathh.info/pubs/codes/SBD/download.html>`_
The SBD currently contains annotations from 11355 images taken from the PASCAL VOC 2011 dataset.
.. note ::
Please note that the train and val splits included with this dataset are different from
the splits in the PASCAL VOC dataset. In particular some "train" images might be part of
VOC2012 val.
If you are interested in testing on VOC 2012 val, then use `image_set='train_noval'`,
which excludes all val images.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of the Semantic Boundaries Dataset
image_set (string, optional): Select the image_set to use, ``train``, ``val`` or ``train_noval``.
Image set ``train_noval`` excludes VOC 2012 val images.
mode (string, optional): Select target type. Possible values 'boundaries' or 'segmentation'.
In case of 'boundaries', the target is an array of shape `[num_classes, H, W]`,
where `num_classes=20`.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version. Input sample is PIL image and target is a numpy array
if `mode='boundaries'` or PIL image if `mode='segmentation'`.
"""
url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz"
md5 = "82b4d87ceb2ed10f6038a1cba92111cb"
filename = "benchmark.tgz"
voc_train_url = "http://home.bharathh.info/pubs/codes/SBD/train_noval.txt"
voc_split_filename = "train_noval.txt"
voc_split_md5 = "79bff800c5f0b1ec6b21080a3c066722"
def __init__(self,
root,
image_set='train',
mode='boundaries',
download=False,
transforms=None):
try:
from scipy.io import loadmat
self._loadmat = loadmat
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: "
"pip install scipy")
super(SBDataset, self).__init__(root, transforms)
self.image_set = verify_str_arg(image_set, "image_set",
("train", "val", "train_noval"))
self.mode = verify_str_arg(mode, "mode", ("segmentation", "boundaries"))
self.num_classes = 20
sbd_root = self.root
image_dir = os.path.join(sbd_root, 'img')
mask_dir = os.path.join(sbd_root, 'cls')
if download:
download_extract(self.url, self.root, self.filename, self.md5)
extracted_ds_root = os.path.join(self.root, "benchmark_RELEASE", "dataset")
for f in ["cls", "img", "inst", "train.txt", "val.txt"]:
old_path = os.path.join(extracted_ds_root, f)
shutil.move(old_path, sbd_root)
download_url(self.voc_train_url, sbd_root, self.voc_split_filename,
self.voc_split_md5)
if not os.path.isdir(sbd_root):
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
split_f = os.path.join(sbd_root, image_set.rstrip('\n') + '.txt')
with open(os.path.join(split_f), "r") as f:
file_names = [x.strip() for x in f.readlines()]
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
self.masks = [os.path.join(mask_dir, x + ".mat") for x in file_names]
assert (len(self.images) == len(self.masks))
self._get_target = self._get_segmentation_target \
if self.mode == "segmentation" else self._get_boundaries_target
def _get_segmentation_target(self, filepath):
mat = self._loadmat(filepath)
return Image.fromarray(mat['GTcls'][0]['Segmentation'][0])
def _get_boundaries_target(self, filepath):
mat = self._loadmat(filepath)
return np.concatenate([np.expand_dims(mat['GTcls'][0]['Boundaries'][0][i][0].toarray(), axis=0)
for i in range(self.num_classes)], axis=0)
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
target = self._get_target(self.masks[index])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.images)
def extra_repr(self):
lines = ["Image set: {image_set}", "Mode: {mode}"]
return '\n'.join(lines).format(**self.__dict__) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/sbd.py | 0.756358 | 0.490114 | sbd.py | pypi |
from PIL import Image
import os
import numpy as np
from .utils import download_url
from .vision import VisionDataset
class USPS(VisionDataset):
"""`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset.
The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``.
The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]``
and make pixel values in ``[0, 255]``.
Args:
root (string): Root directory of dataset to store``USPS`` data files.
train (bool, optional): If True, creates dataset from ``usps.bz2``,
otherwise from ``usps.t.bz2``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
'train': [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2",
"usps.bz2", 'ec16c51db3855ca6c91edd34d0e9b197'
],
'test': [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2",
"usps.t.bz2", '8ea070ee2aca1ac39742fdd1ef5ed118'
],
}
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False):
super(USPS, self).__init__(root, transform=transform,
target_transform=target_transform)
split = 'train' if train else 'test'
url, filename, checksum = self.split_list[split]
full_path = os.path.join(self.root, filename)
if download and not os.path.exists(full_path):
download_url(url, self.root, filename, md5=checksum)
import bz2
with bz2.open(full_path) as fp:
raw_data = [l.decode().split() for l in fp.readlines()]
imgs = [[x.split(':')[-1] for x in data[1:]] for data in raw_data]
imgs = np.asarray(imgs, dtype=np.float32).reshape((-1, 16, 16))
imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8)
targets = [int(d[0]) - 1 for d in raw_data]
self.data = imgs
self.targets = targets
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/usps.py | 0.820793 | 0.519095 | usps.py | pypi |
from PIL import Image
from .utils import download_url, check_integrity
import os
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (string): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "http://www.cs.virginia.edu/~vicente/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = '9aec147b3488753cf758b4d493422285'
def __init__(self, root, transform=None, target_transform=None, download=True):
super(SBU, self).__init__(root, transform=transform,
target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')
file2 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_captions.txt')
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, 'dataset', photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, 'dataset', self.photos[index])
img = Image.open(filename).convert('RGB')
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self):
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self):
"""Download and extract the tarball, and download each individual photo."""
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.md5_checksum)
# Extract file
with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar:
tar.extractall(path=self.root)
# Download individual photos
with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, 'dataset'))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/sbu.py | 0.836421 | 0.465448 | sbu.py | pypi |
import os
import numpy as np
from PIL import Image
import torch
from .vision import VisionDataset
from .utils import download_url
class PhotoTour(VisionDataset):
"""`Learning Local Image Descriptors Data <http://phototour.cs.washington.edu/patches/default.htm>`_ Dataset.
Args:
root (string): Root directory where images are.
name (string): Name of the dataset to load.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
urls = {
'notredame_harris': [
'http://matthewalunbrown.com/patchdata/notredame_harris.zip',
'notredame_harris.zip',
'69f8c90f78e171349abdf0307afefe4d'
],
'yosemite_harris': [
'http://matthewalunbrown.com/patchdata/yosemite_harris.zip',
'yosemite_harris.zip',
'a73253d1c6fbd3ba2613c45065c00d46'
],
'liberty_harris': [
'http://matthewalunbrown.com/patchdata/liberty_harris.zip',
'liberty_harris.zip',
'c731fcfb3abb4091110d0ae8c7ba182c'
],
'notredame': [
'http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip',
'notredame.zip',
'509eda8535847b8c0a90bbb210c83484'
],
'yosemite': [
'http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip',
'yosemite.zip',
'533b2e8eb7ede31be40abc317b2fd4f0'
],
'liberty': [
'http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip',
'liberty.zip',
'fdd9152f138ea5ef2091746689176414'
],
}
mean = {'notredame': 0.4854, 'yosemite': 0.4844, 'liberty': 0.4437,
'notredame_harris': 0.4854, 'yosemite_harris': 0.4844, 'liberty_harris': 0.4437}
std = {'notredame': 0.1864, 'yosemite': 0.1818, 'liberty': 0.2019,
'notredame_harris': 0.1864, 'yosemite_harris': 0.1818, 'liberty_harris': 0.2019}
lens = {'notredame': 468159, 'yosemite': 633587, 'liberty': 450092,
'liberty_harris': 379587, 'yosemite_harris': 450912, 'notredame_harris': 325295}
image_ext = 'bmp'
info_file = 'info.txt'
matches_files = 'm50_100000_100000_0.txt'
def __init__(self, root, name, train=True, transform=None, download=False):
super(PhotoTour, self).__init__(root, transform=transform)
self.name = name
self.data_dir = os.path.join(self.root, name)
self.data_down = os.path.join(self.root, '{}.zip'.format(name))
self.data_file = os.path.join(self.root, '{}.pt'.format(name))
self.train = train
self.mean = self.mean[name]
self.std = self.std[name]
if download:
self.download()
if not self._check_datafile_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
# load the serialized data
self.data, self.labels, self.matches = torch.load(self.data_file)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (data1, data2, matches)
"""
if self.train:
data = self.data[index]
if self.transform is not None:
data = self.transform(data)
return data
m = self.matches[index]
data1, data2 = self.data[m[0]], self.data[m[1]]
if self.transform is not None:
data1 = self.transform(data1)
data2 = self.transform(data2)
return data1, data2, m[2]
def __len__(self):
if self.train:
return self.lens[self.name]
return len(self.matches)
def _check_datafile_exists(self):
return os.path.exists(self.data_file)
def _check_downloaded(self):
return os.path.exists(self.data_dir)
def download(self):
if self._check_datafile_exists():
print('# Found cached data {}'.format(self.data_file))
return
if not self._check_downloaded():
# download files
url = self.urls[self.name][0]
filename = self.urls[self.name][1]
md5 = self.urls[self.name][2]
fpath = os.path.join(self.root, filename)
download_url(url, self.root, filename, md5)
print('# Extracting data {}\n'.format(self.data_down))
import zipfile
with zipfile.ZipFile(fpath, 'r') as z:
z.extractall(self.data_dir)
os.unlink(fpath)
# process and save as torch files
print('# Caching data {}'.format(self.data_file))
dataset = (
read_image_file(self.data_dir, self.image_ext, self.lens[self.name]),
read_info_file(self.data_dir, self.info_file),
read_matches_files(self.data_dir, self.matches_files)
)
with open(self.data_file, 'wb') as f:
torch.save(dataset, f)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
def read_image_file(data_dir, image_ext, n):
"""Return a Tensor containing the patches
"""
def PIL2array(_img):
"""Convert PIL image type to numpy 2D array
"""
return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
def find_files(_data_dir, _image_ext):
"""Return a list with the file names of the images containing the patches
"""
files = []
# find those files with the specified extension
for file_dir in os.listdir(_data_dir):
if file_dir.endswith(_image_ext):
files.append(os.path.join(_data_dir, file_dir))
return sorted(files) # sort files in ascend order to keep relations
patches = []
list_files = find_files(data_dir, image_ext)
for fpath in list_files:
img = Image.open(fpath)
for y in range(0, 1024, 64):
for x in range(0, 1024, 64):
patch = img.crop((x, y, x + 64, y + 64))
patches.append(PIL2array(patch))
return torch.ByteTensor(np.array(patches[:n]))
def read_info_file(data_dir, info_file):
"""Return a Tensor containing the list of labels
Read the file and keep only the ID of the 3D point.
"""
labels = []
with open(os.path.join(data_dir, info_file), 'r') as f:
labels = [int(line.split()[0]) for line in f]
return torch.LongTensor(labels)
def read_matches_files(data_dir, matches_file):
"""Return a Tensor containing the ground truth matches
Read the file and keep only 3D point ID.
Matches are represented with a 1, non matches with a 0.
"""
matches = []
with open(os.path.join(data_dir, matches_file), 'r') as f:
for line in f:
line_split = line.split()
matches.append([int(line_split[0]), int(line_split[3]),
int(line_split[1] == line_split[4])])
return torch.LongTensor(matches) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/phototour.py | 0.640973 | 0.3295 | phototour.py | pypi |
from PIL import Image
import os
import os.path
import numpy as np
from .vision import VisionDataset
from .utils import check_integrity, download_and_extract_archive, verify_str_arg
class STL10(VisionDataset):
"""`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``stl10_binary`` exists.
split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.
Accordingly dataset is selected.
folds (int, optional): One of {0-9} or None.
For training, loads one of the 10 pre-defined folds of 1k samples for the
standard evaluation procedure. If no value is passed, loads the 5k samples.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'stl10_binary'
url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz"
filename = "stl10_binary.tar.gz"
tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'
class_names_file = 'class_names.txt'
folds_list_file = 'fold_indices.txt'
train_list = [
['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],
['train_y.bin', '5a34089d4802c674881badbb80307741'],
['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']
]
test_list = [
['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],
['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']
]
splits = ('train', 'train+unlabeled', 'unlabeled', 'test')
def __init__(self, root, split='train', folds=None, transform=None,
target_transform=None, download=False):
super(STL10, self).__init__(root, transform=transform,
target_transform=target_transform)
self.split = verify_str_arg(split, "split", self.splits)
self.folds = self._verify_folds(folds)
if download:
self.download()
elif not self._check_integrity():
raise RuntimeError(
'Dataset not found or corrupted. '
'You can use download=True to download it')
# now load the picked numpy arrays
if self.split == 'train':
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0])
self.__load_folds(folds)
elif self.split == 'train+unlabeled':
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0])
self.__load_folds(folds)
unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
self.data = np.concatenate((self.data, unlabeled_data))
self.labels = np.concatenate(
(self.labels, np.asarray([-1] * unlabeled_data.shape[0])))
elif self.split == 'unlabeled':
self.data, _ = self.__loadfile(self.train_list[2][0])
self.labels = np.asarray([-1] * self.data.shape[0])
else: # self.split == 'test':
self.data, self.labels = self.__loadfile(
self.test_list[0][0], self.test_list[1][0])
class_file = os.path.join(
self.root, self.base_folder, self.class_names_file)
if os.path.isfile(class_file):
with open(class_file) as f:
self.classes = f.read().splitlines()
def _verify_folds(self, folds):
if folds is None:
return folds
elif isinstance(folds, int):
if folds in range(10):
return folds
msg = ("Value for argument folds should be in the range [0, 10), "
"but got {}.")
raise ValueError(msg.format(folds))
else:
msg = "Expected type None or int for argument folds, but got type {}."
raise ValueError(msg.format(type(folds)))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.labels is not None:
img, target = self.data[index], int(self.labels[index])
else:
img, target = self.data[index], None
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return self.data.shape[0]
def __loadfile(self, data_file, labels_file=None):
labels = None
if labels_file:
path_to_labels = os.path.join(
self.root, self.base_folder, labels_file)
with open(path_to_labels, 'rb') as f:
labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based
path_to_data = os.path.join(self.root, self.base_folder, data_file)
with open(path_to_data, 'rb') as f:
# read whole file in uint8 chunks
everything = np.fromfile(f, dtype=np.uint8)
images = np.reshape(everything, (-1, 3, 96, 96))
images = np.transpose(images, (0, 1, 3, 2))
return images, labels
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
self._check_integrity()
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
def __load_folds(self, folds):
# loads one of the folds if specified
if folds is None:
return
path_to_folds = os.path.join(
self.root, self.base_folder, self.folds_list_file)
with open(path_to_folds, 'r') as f:
str_idx = f.read().splitlines()[folds]
list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')
self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx] | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/stl10.py | 0.719679 | 0.418756 | stl10.py | pypi |
from .utils import list_dir
from .folder import make_dataset
from .video_utils import VideoClips
from .vision import VisionDataset
class Kinetics400(VisionDataset):
"""
`Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_
dataset.
Kinetics-400 is an action recognition video dataset.
This dataset consider every video as a collection of video clips of fixed size, specified
by ``frames_per_clip``, where the step in frames between each clip is given by
``step_between_clips``.
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
elements will come from video 1, and the next three elements from video 2.
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
frames in a video might be present.
Internally, it uses a VideoClips object to handle clip creation.
Args:
root (string): Root directory of the Kinetics-400 Dataset.
frames_per_clip (int): number of frames in a clip
step_between_clips (int): number of frames between each clip
transform (callable, optional): A function/transform that takes in a TxHxWxC video
and returns a transformed version.
Returns:
video (Tensor[T, H, W, C]): the `T` video frames
audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
and `L` is the number of points
label (int): class of the video clip
"""
def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None,
extensions=('avi',), transform=None, _precomputed_metadata=None,
num_workers=1, _video_width=0, _video_height=0,
_video_min_dimension=0, _audio_samples=0, _audio_channels=0):
super(Kinetics400, self).__init__(root)
classes = list(sorted(list_dir(root)))
class_to_idx = {classes[i]: i for i in range(len(classes))}
self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
self.classes = classes
video_list = [x[0] for x in self.samples]
self.video_clips = VideoClips(
video_list,
frames_per_clip,
step_between_clips,
frame_rate,
_precomputed_metadata,
num_workers=num_workers,
_video_width=_video_width,
_video_height=_video_height,
_video_min_dimension=_video_min_dimension,
_audio_samples=_audio_samples,
_audio_channels=_audio_channels,
)
self.transform = transform
@property
def metadata(self):
return self.video_clips.metadata
def __len__(self):
return self.video_clips.num_clips()
def __getitem__(self, idx):
video, audio, info, video_idx = self.video_clips.get_clip(idx)
label = self.samples[video_idx][1]
if self.transform is not None:
video = self.transform(video)
return video, audio, label | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/kinetics.py | 0.937247 | 0.564639 | kinetics.py | pypi |
from .vision import VisionDataset
from PIL import Image
import os
import os.path
class CocoCaptions(VisionDataset):
"""`MS Coco Captions <http://mscoco.org/dataset/#captions-challenge2015>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.ToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None):
super(CocoCaptions, self).__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is a list of captions for the image.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
anns = coco.loadAnns(ann_ids)
target = [ann['caption'] for ann in anns]
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.ids)
class CocoDetection(VisionDataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None):
super(CocoDetection, self).__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.ids) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/coco.py | 0.918199 | 0.570989 | coco.py | pypi |
from .vision import VisionDataset
from PIL import Image
import os
import os.path
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def is_image_file(filename):
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(directory, class_to_idx, extensions=None, is_valid_file=None):
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x):
return has_file_allowed_extension(x, extensions)
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
return instances
class DatasetFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, loader, extensions=None, transform=None,
target_transform=None, is_valid_file=None):
super(DatasetFolder, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
is_valid_file (callable, optional): A function that takes path of an Image file
and check if the file is a valid file (used to check of corrupt files)
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, is_valid_file=None):
super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
transform=transform,
target_transform=target_transform,
is_valid_file=is_valid_file)
self.imgs = self.samples | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/folder.py | 0.889493 | 0.386995 | folder.py | pypi |
import glob
import os
from .utils import list_dir
from .folder import make_dataset
from .video_utils import VideoClips
from .vision import VisionDataset
class UCF101(VisionDataset):
"""
`UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.
UCF101 is an action recognition video dataset.
This dataset consider every video as a collection of video clips of fixed size, specified
by ``frames_per_clip``, where the step in frames between each clip is given by
``step_between_clips``.
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
elements will come from video 1, and the next three elements from video 2.
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
frames in a video might be present.
Internally, it uses a VideoClips object to handle clip creation.
Args:
root (string): Root directory of the UCF101 Dataset.
annotation_path (str): path to the folder containing the split files
frames_per_clip (int): number of frames in a clip.
step_between_clips (int, optional): number of frames between each clip.
fold (int, optional): which fold to use. Should be between 1 and 3.
train (bool, optional): if ``True``, creates a dataset from the train split,
otherwise from the ``test`` split.
transform (callable, optional): A function/transform that takes in a TxHxWxC video
and returns a transformed version.
Returns:
video (Tensor[T, H, W, C]): the `T` video frames
audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
and `L` is the number of points
label (int): class of the video clip
"""
def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1,
frame_rate=None, fold=1, train=True, transform=None,
_precomputed_metadata=None, num_workers=1, _video_width=0,
_video_height=0, _video_min_dimension=0, _audio_samples=0):
super(UCF101, self).__init__(root)
if not 1 <= fold <= 3:
raise ValueError("fold should be between 1 and 3, got {}".format(fold))
extensions = ('avi',)
self.fold = fold
self.train = train
classes = list(sorted(list_dir(root)))
class_to_idx = {classes[i]: i for i in range(len(classes))}
self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
self.classes = classes
video_list = [x[0] for x in self.samples]
video_clips = VideoClips(
video_list,
frames_per_clip,
step_between_clips,
frame_rate,
_precomputed_metadata,
num_workers=num_workers,
_video_width=_video_width,
_video_height=_video_height,
_video_min_dimension=_video_min_dimension,
_audio_samples=_audio_samples,
)
self.video_clips_metadata = video_clips.metadata
self.indices = self._select_fold(video_list, annotation_path, fold, train)
self.video_clips = video_clips.subset(self.indices)
self.transform = transform
@property
def metadata(self):
return self.video_clips_metadata
def _select_fold(self, video_list, annotation_path, fold, train):
name = "train" if train else "test"
name = "{}list{:02d}.txt".format(name, fold)
f = os.path.join(annotation_path, name)
selected_files = []
with open(f, "r") as fid:
data = fid.readlines()
data = [x.strip().split(" ") for x in data]
data = [x[0] for x in data]
selected_files.extend(data)
selected_files = set(selected_files)
indices = [i for i in range(len(video_list)) if video_list[i][len(self.root) + 1:] in selected_files]
return indices
def __len__(self):
return self.video_clips.num_clips()
def __getitem__(self, idx):
video, audio, info, video_idx = self.video_clips.get_clip(idx)
label = self.samples[self.indices[video_idx]][1]
if self.transform is not None:
video = self.transform(video)
return video, audio, label | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/ucf101.py | 0.867373 | 0.603611 | ucf101.py | pypi |
import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
from torchvision.datasets.video_utils import VideoClips
class DistributedSampler(Sampler):
"""
Extension of DistributedSampler, as discussed in
https://github.com/pytorch/pytorch/issues/23430
Example:
dataset: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
num_replicas: 4
shuffle: False
when group_size = 1
RANK | shard_dataset
=========================
rank_0 | [0, 4, 8, 12]
rank_1 | [1, 5, 9, 13]
rank_2 | [2, 6, 10, 0]
rank_3 | [3, 7, 11, 1]
when group_size = 2
RANK | shard_dataset
=========================
rank_0 | [0, 1, 8, 9]
rank_1 | [2, 3, 10, 11]
rank_2 | [4, 5, 12, 13]
rank_3 | [6, 7, 0, 1]
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=False, group_size=1):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
assert len(dataset) % group_size == 0, (
"dataset length must be a multiplier of group size"
"dataset length: %d, group size: %d" % (len(dataset), group_size)
)
self.dataset = dataset
self.group_size = group_size
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
dataset_group_length = len(dataset) // group_size
self.num_group_samples = int(
math.ceil(dataset_group_length * 1.0 / self.num_replicas)
)
self.num_samples = self.num_group_samples * group_size
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
total_group_size = self.total_size // self.group_size
indices = torch.reshape(
torch.LongTensor(indices), (total_group_size, self.group_size)
)
# subsample
indices = indices[self.rank:total_group_size:self.num_replicas, :]
indices = torch.reshape(indices, (-1,)).tolist()
assert len(indices) == self.num_samples
if isinstance(self.dataset, Sampler):
orig_indices = list(iter(self.dataset))
indices = [orig_indices[i] for i in indices]
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
class UniformClipSampler(Sampler):
"""
Sample `num_video_clips_per_video` clips for each video, equally spaced.
When number of unique clips in the video is fewer than num_video_clips_per_video,
repeat the clips until `num_video_clips_per_video` clips are collected
Arguments:
video_clips (VideoClips): video clips to sample from
num_clips_per_video (int): number of clips to be sampled per video
"""
def __init__(self, video_clips, num_clips_per_video):
if not isinstance(video_clips, VideoClips):
raise TypeError("Expected video_clips to be an instance of VideoClips, "
"got {}".format(type(video_clips)))
self.video_clips = video_clips
self.num_clips_per_video = num_clips_per_video
def __iter__(self):
idxs = []
s = 0
# select num_clips_per_video for each video, uniformly spaced
for c in self.video_clips.clips:
length = len(c)
if length == 0:
# corner case where video decoding fails
continue
sampled = (
torch.linspace(s, s + length - 1, steps=self.num_clips_per_video)
.floor()
.to(torch.int64)
)
s += length
idxs.append(sampled)
idxs = torch.cat(idxs).tolist()
return iter(idxs)
def __len__(self):
return sum(
self.num_clips_per_video for c in self.video_clips.clips if len(c) > 0
)
class RandomClipSampler(Sampler):
"""
Samples at most `max_video_clips_per_video` clips for each video randomly
Arguments:
video_clips (VideoClips): video clips to sample from
max_clips_per_video (int): maximum number of clips to be sampled per video
"""
def __init__(self, video_clips, max_clips_per_video):
if not isinstance(video_clips, VideoClips):
raise TypeError("Expected video_clips to be an instance of VideoClips, "
"got {}".format(type(video_clips)))
self.video_clips = video_clips
self.max_clips_per_video = max_clips_per_video
def __iter__(self):
idxs = []
s = 0
# select at most max_clips_per_video for each video, randomly
for c in self.video_clips.clips:
length = len(c)
size = min(length, self.max_clips_per_video)
sampled = torch.randperm(length)[:size] + s
s += length
idxs.append(sampled)
idxs = torch.cat(idxs)
# shuffle all clips randomly
perm = torch.randperm(len(idxs))
idxs = idxs[perm].tolist()
return iter(idxs)
def __len__(self):
return sum(min(len(c), self.max_clips_per_video) for c in self.video_clips.clips) | /rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/samplers/clip_sampler.py | 0.887089 | 0.535706 | clip_sampler.py | pypi |
rpi\_ws281x
===========
Userspace Raspberry Pi library for controlling WS281X LEDs. This
includes WS2812 and SK6812RGB RGB LEDs Preliminary support is now
included for SK6812RGBW LEDs (yes, RGB + W) The LEDs can be controlled
by either the PWM (2 independent channels) or PCM controller (1 channel)
or the SPI interface (1 channel).
Background:
-----------
The BCM2835 in the Raspberry Pi has both a PWM and a PCM module that are
well suited to driving individually controllable WS281X LEDs. Using the
DMA, PWM or PCM FIFO, and serial mode in the PWM, it's possible to
control almost any number of WS281X LEDs in a chain connected to the
appropriate output pin. For SPI the Raspbian spidev driver is used
(``/dev/spidev0.0``). This library and test program set the clock rate
to 3X the desired output frequency and creates a bit pattern in RAM from
an array of colors where each bit is represented by 3 bits as follows.
::
Bit 1 - 1 1 0
Bit 0 - 1 0 0
GPIO Usage:
-----------
The GPIOs that can be used are limited by the hardware of the Pi and
will vary based on the method used to drive them (PWM, PCM or SPI).
Beware that the GPIO numbers are not the same as the physical pin
numbers on the header.
PWM:
::
PWM0, which can be set to use GPIOs 12, 18, 40, and 52.
Only 12 (pin 32) and 18 (pin 12) are available on the B+/2B/3B
PWM1 which can be set to use GPIOs 13, 19, 41, 45 and 53.
Only 13 is available on the B+/2B/PiZero/3B, on pin 33
PCM:
::
PCM_DOUT, which can be set to use GPIOs 21 and 31.
Only 21 is available on the B+/2B/PiZero/3B, on pin 40.
SPI:
::
SPI0-MOSI is available on GPIOs 10 and 38.
Only GPIO 10 is available on all models.
See also note for RPi 3 below.
Power and voltage requirements
------------------------------
WS281X LEDs are generally driven at 5V. Depending on your actual LED
model and data line length you might be able to successfully drive the
data input with 3.3V. However in the general case you probably want to
use a level shifter to convert from the Raspberry Pi GPIO/PWM to 5V.
It is also possible to run the LEDs from a 3.3V - 3.6V power source, and
connect the GPIO directly at a cost of brightness, but this isn't
recommended.
The test program is designed to drive a 8x8 grid of LEDs e.g.from
Adafruit (http://www.adafruit.com/products/1487) or Pimoroni
(https://shop.pimoroni.com/products/unicorn-hat). Please see the
Adafruit and Pimoroni websites for more information.
Know what you're doing with the hardware and electricity. I take no
reponsibility for damage, harm, or mistakes.
Important warning about DMA channels
------------------------------------
You must make sure that the DMA channel you choose to use for the LEDs
is not `already in
use <https://www.raspberrypi.org/forums/viewtopic.php?p=609380#p609380>`__
by the operating system.
For example, **using DMA channel 5 will cause filesystem corruption**
on the Raspberry Pi 3 Model B.
See: https://github.com/jgarff/rpi_ws281x/issues/224
The default DMA channel (10) should be safe for the Raspberry Pi 3 Model
B, but this may change in future software releases.
Limitations:
------------
PWM
~~~
Since this library and the onboard Raspberry Pi audio both use the PWM,
they cannot be used together. You will need to blacklist the Broadcom
audio kernel module by creating a file
``/etc/modprobe.d/snd-blacklist.conf`` with
::
blacklist snd_bcm2835
If the audio device is still loading after blacklisting, you may also
need to comment it out in the /etc/modules file.
On headless systems you may also need to force audio through hdmi Edit
config.txt and add:
::
hdmi_force_hotplug=1
hdmi_force_edid_audio=1
A reboot is required for this change to take effect
Some distributions use audio by default, even if nothing is being
played. If audio is needed, you can use a USB audio device instead.
PCM
~~~
When using PCM you cannot use digital audio devices which use I2S since
I2S uses the PCM hardware, but you can use analog audio.
SPI
~~~
When using SPI the ledstring is the only device which can be connected
to the SPI bus. Both digital (I2S/PCM) and analog (PWM) audio can be
used.
Many distributions have a maximum SPI transfer of 4096 bytes. This can
be changed in ``/boot/cmdline.txt`` by appending
::
spidev.bufsiz=32768
On a RPi 3 you have to change the GPU core frequency to 250 MHz,
otherwise the SPI clock has the wrong frequency. Do this by adding the
following line to /boot/config.txt and reboot.
::
core_freq=250
On a RPi 4 its dynamic frequency clocking has to be disabled, since it will
desync the SPI clock. Do this by adding this line to
``/boot/config.txt``. (``core_freq`` does not have to be changed, since
the default value of 500MHz is SPI compatible)
::
core_freq_min=500
SPI requires you to be in the ``gpio`` group if you wish to control your
LEDs without root.
Comparison PWM/PCM/SPI
----------------------
Both PWM and PCM use DMA transfer to output the control signal for the
LEDs. The max size of a DMA transfer is 65536 bytes. Since each LED
needs 12 bytes (4 colors, 8 symbols per color, 3 bits per symbol) this
means you can control approximately 5400 LEDs for a single strand in PCM
and 2700 LEDs per string for PWM (Only PWM can control 2 independent
strings simultaneously) SPI uses the SPI device driver in the kernel.
For transfers larger than 96 bytes the kernel driver also uses DMA. Of
course there are practical limits on power and signal quality. These
will be more constraining in practice than the theoretical limits above.
When controlling a LED string of 240 LEDs the CPU load on the original
Pi 2 (BCM2836) are: PWM 5% PCM 5% SPI 1%
| /rpi_ws281x_3bp_spi1-0.0.1.tar.gz/rpi_ws281x_3bp_spi1-0.0.1/README.rst | 0.8474 | 0.67996 | README.rst | pypi |
import _rpi_ws281x as ws
import atexit
try:
xrange(0)
except NameError:
xrange = range
def Color(red, green, blue, white=0):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return (white << 24) | (red << 16) | (green << 8) | blue
class _LED_Data(object):
"""Wrapper class which makes a SWIG LED color data array look and feel like
a Python list of integers.
"""
def __init__(self, channel, size):
self.size = size
self.channel = channel
def __getitem__(self, pos):
"""Return the 24-bit RGB color value at the provided position or slice
of positions.
"""
# Handle if a slice of positions are passed in by grabbing all the values
# and returning them in a list.
if isinstance(pos, slice):
return [ws.ws2811_led_get(self.channel, n) for n in xrange(*pos.indices(self.size))]
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_get(self.channel, pos)
def __setitem__(self, pos, value):
"""Set the 24-bit RGB color value at the provided position or slice of
positions.
"""
# Handle if a slice of positions are passed in by setting the appropriate
# LED data values to the provided values.
if isinstance(pos, slice):
index = 0
for n in xrange(*pos.indices(self.size)):
ws.ws2811_led_set(self.channel, n, value[index])
index += 1
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_set(self.channel, pos, value)
class PixelStrip(object):
def __init__(self, num, pin, freq_hz=800000, dma=10, invert=False,
brightness=255, channel=0, strip_type=None, gamma=None):
"""Class to represent a SK6812/WS281x LED display. Num should be the
number of pixels in the display, and pin should be the GPIO pin connected
to the display signal line (must be a PWM pin like 18!). Optional
parameters are freq, the frequency of the display signal in hertz (default
800khz), dma, the DMA channel to use (default 10), invert, a boolean
specifying if the signal line should be inverted (default False), and
channel, the PWM channel to use (defaults to 0).
"""
if gamma is None:
# Support gamma in place of strip_type for back-compat with
# previous version of forked library
if type(strip_type) is list and len(strip_type) == 256:
gamma = strip_type
strip_type = None
else:
gamma = list(range(256))
if strip_type is None:
strip_type = ws.WS2811_STRIP_GRB
# Create ws2811_t structure and fill in parameters.
self._leds = ws.new_ws2811_t()
# Initialize the channels to zero
for channum in range(2):
chan = ws.ws2811_channel_get(self._leds, channum)
ws.ws2811_channel_t_count_set(chan, 0)
ws.ws2811_channel_t_gpionum_set(chan, 0)
ws.ws2811_channel_t_invert_set(chan, 0)
ws.ws2811_channel_t_brightness_set(chan, 0)
# Initialize the channel in use
self._channel = ws.ws2811_channel_get(self._leds, channel)
ws.ws2811_channel_t_gamma_set(self._channel, gamma)
ws.ws2811_channel_t_count_set(self._channel, num)
ws.ws2811_channel_t_gpionum_set(self._channel, pin)
ws.ws2811_channel_t_invert_set(self._channel, 0 if not invert else 1)
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
ws.ws2811_channel_t_strip_type_set(self._channel, strip_type)
# Initialize the controller
ws.ws2811_t_freq_set(self._leds, freq_hz)
ws.ws2811_t_dmanum_set(self._leds, dma)
# Grab the led data array.
self._led_data = _LED_Data(self._channel, num)
# Substitute for __del__, traps an exit condition and cleans up properly
atexit.register(self._cleanup)
def _cleanup(self):
# Clean up memory used by the library when not needed anymore.
if self._leds is not None:
ws.ws2811_fini(self._leds)
ws.delete_ws2811_t(self._leds)
self._leds = None
self._channel = None
def setGamma(self, gamma):
if type(gamma) is list and len(gamma) == 256:
ws.ws2811_channel_t_gamma_set(self._channel, gamma)
def begin(self):
"""Initialize library, must be called once before other functions are
called.
"""
resp = ws.ws2811_init(self._leds)
if resp != 0:
str_resp = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, str_resp))
def show(self):
"""Update the display with the data from the LED buffer."""
resp = ws.ws2811_render(self._leds)
if resp != 0:
str_resp = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_render failed with code {0} ({1})'.format(resp, str_resp))
def setPixelColor(self, n, color):
"""Set LED at position n to the provided 24-bit color value (in RGB order).
"""
self._led_data[n] = color
def setPixelColorRGB(self, n, red, green, blue, white=0):
"""Set LED at position n to the provided red, green, and blue color.
Each color component should be a value from 0 to 255 (where 0 is the
lowest intensity and 255 is the highest intensity).
"""
self.setPixelColor(n, Color(red, green, blue, white))
def getBrightness(self):
return ws.ws2811_channel_t_brightness_get(self._channel)
def setBrightness(self, brightness):
"""Scale each LED in the buffer by the provided brightness. A brightness
of 0 is the darkest and 255 is the brightest.
"""
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
def getPixels(self):
"""Return an object which allows access to the LED display data as if
it were a sequence of 24-bit RGB values.
"""
return self._led_data
def numPixels(self):
"""Return the number of pixels in the display."""
return ws.ws2811_channel_t_count_get(self._channel)
def getPixelColor(self, n):
"""Get the 24-bit RGB color value for the LED at position n."""
return self._led_data[n]
def getPixelColorRGB(self, n):
c = lambda: None
setattr(c, 'r', self._led_data[n] >> 16 & 0xff)
setattr(c, 'g', self._led_data[n] >> 8 & 0xff)
setattr(c, 'b', self._led_data[n] & 0xff)
return c
def getPixelColorRGBW(self, n):
c = lambda: None
setattr(c, 'w', self._led_data[n] >> 24 & 0xff)
setattr(c, 'r', self._led_data[n] >> 16 & 0xff)
setattr(c, 'g', self._led_data[n] >> 8 & 0xff)
setattr(c, 'b', self._led_data[n] & 0xff)
return c
# Shim for back-compatibility
class Adafruit_NeoPixel(PixelStrip):
pass | /rpi_ws281x_3bp_spi1-0.0.1.tar.gz/rpi_ws281x_3bp_spi1-0.0.1/rpi_ws281x/rpi_ws281x.py | 0.76769 | 0.491883 | rpi_ws281x.py | pypi |
import time
from abc import ABC, abstractmethod
from colour import Color as C
import random
from enum import Enum
import math
from operator import add
from pydantic import BaseModel
from easing_functions import *
from rpi_ws281x_hub.strip import ColorPixelStrip
RAINBOW = list(C('#FF0000').range_to(C('#00FFFE'), 128)) + list(C('#00FFFF').range_to(C('#FF0001'), 128))
STAR = list(C('yellow').range_to(C('white'), 128))
WAVE = list(C('blue').range_to(C('cyan'), 128))
def roll_index(position, max_position):
if position > max_position:
return position - max_position
elif position < 0:
return position + max_position
return position
def dim_color(color, intensity):
return C(rgb=tuple([max(0,min(e * intensity,1)) for e in color.rgb]))
def rotate(array, position):
return array[position:len(array)] + array[0:position]
def add_color(c1, c2):
color = list(map(add, c1.rgb, c2.rgb))
return C(rgb=tuple([max(0,min(e,1)) for e in color]))
def task(func):
"""task decorator (wrap `__call__` method of tasks)
Returns:
actual color array of the strip
"""
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
return [ self.strip.getPixelRGB(i).hex for i in range(self.strip.numPixels()) ]
return wrapper
class Fire():
def __init__(self, strip: ColorPixelStrip, **kwargs):
self.strip = strip
colors = kwargs.get('colors', ['orange', 'red'])
self.colors = [C(color) for color in colors]
@task
def __call__(self, ratio: float):
i = int(random.uniform(0, self.strip.numPixels()))
c = self.colors[int(random.uniform(0, len(self.colors)))]
intensity = random.uniform(0, 1)
color = C(rgb=tuple([e * intensity for e in c.rgb]))
self.strip.setPixelRGB(i, color)
self.strip.show()
class Raindow():
def __init__(self, strip: ColorPixelStrip, **kwargs):
self.strip = strip
@task
def __call__(self, ratio: float):
for i in range(self.strip.numPixels()):
self.strip.setPixelRGB(i, RAINBOW[int(ratio*255)])
self.strip.show()
class RaindowChase():
def __init__(self, strip: ColorPixelStrip, **kwargs):
self.strip = strip
self.index = 0
def __call__(self, ratio: float):
d = int(ratio*255)
rotate_colors = RAINBOW[d:255] + RAINBOW[0:d]
for i in range(self.strip.numPixels()):
self.strip.setPixelRGB(i, rotate_colors[int((i/self.strip.numPixels())*255)])
self.strip.show()
class RaindowChase():
def __init__(self, strip: ColorPixelStrip, **kwargs):
self.strip = strip
self.index = 0
@task
def __call__(self, ratio: float):
d = int(ratio*255)
rotate_colors = RAINBOW[d:255] + RAINBOW[0:d]
for i in range(self.strip.numPixels()):
self.strip.setPixelRGB(i, rotate_colors[int((i/self.strip.numPixels())*255)])
self.strip.show()
class FallingStars():
def __init__(self, strip: ColorPixelStrip, **kwargs):
self.strip = strip
self.count = int(random.uniform(2, 4))
self.positions = random.choices( list(range(self.strip.numPixels())), k=self.count)
self.speeds = [random.uniform(-1, 1) for s in range(self.count)]
self.colors = random.choices( STAR, k=self.count)
print(self.count, self.positions, self.speeds, self.colors)
@task
def __call__(self, ratio: float):
numPixels = self.strip.numPixels()
for i in range(self.strip.numPixels()):
self.strip.setPixelRGB(i,dim_color(self.strip.getPixelRGB(i), 0.75))
for i in range(self.count):
self.positions[i] += self.speeds[i]
self.positions[i] = roll_index(self.positions[i],numPixels)
self.strip.setPixelRGB(int(self.positions[i]), self.colors[i])
self.strip.show()
class ColorWheel():
def __init__(self, strip: ColorPixelStrip, **kwargs):
self.strip = strip
self.colors = random.choices(RAINBOW, k=6)
self.color = random.choice(self.colors)
self.easing = CubicEaseInOut()
@task
def __call__(self, ratio: float):
r = self.easing(ratio if ratio < 0.5 else 1 - ratio)
if r < 0.01:
self.color = random.choice(self.colors)
for i in range(self.strip.numPixels()):
self.strip.setPixelRGB(i, dim_color(self.color,r))
self.strip.show()
class Sparkles():
def __init__(self, strip: ColorPixelStrip, **kwargs):
self.strip = strip
@task
def __call__(self, ratio: float):
numPixels = self.strip.numPixels()
for i in range(numPixels):
self.strip.setPixelRGB(i, dim_color(self.strip.getPixelRGB(i), 0.75))
if random.choice(range(10)) < 2:
index = random.choice(range(numPixels))
color = random.choice(STAR)
self.strip.setPixelRGB(index, color)
self.strip.show()
class RainbowStar():
def __init__(self, strip: ColorPixelStrip, **kwargs):
self.strip = strip
num = self.strip.numPixels()
self.position = random.choice(range(num))
self.size = random.choice(range(int(num/8),int(num/4)))
self.speed = random.uniform(0.5, 2)
self.easing = CubicEaseIn()
@task
def __call__(self, ratio: float):
num = self.strip.numPixels()
if random.uniform(0,1) < 0.1:
self.speed = random.uniform(0.5, 2)
if random.uniform(0,1) < 0.1:
self.size = random.choice(range(int(num/8),int(num/4)))
self.position = roll_index( self.position + self.speed, num)
fraction = self.position % 1
indexes = [ int(((i + fraction) / self.size)*254) for i in range(self.size)]
start_tail = [ dim_color(RAINBOW[p],0.75 * (1-self.easing(i/len(indexes)))) for i,p in enumerate(indexes) ]
# extend to stip size (fill with black)
start_tail = start_tail + [C('black')]*(num - len(start_tail))
start_tail = rotate(start_tail, int(self.position))
for i in range(0,len(start_tail)):
self.strip.setPixelRGB(i, start_tail[i])
self.strip.show()
class Wave():
def __init__(self, strip: ColorPixelStrip, **kwargs):
self.strip = strip
self.count = int(random.uniform(3, 6))
self.phases = [random.uniform(0, 2*math.pi) for w in range(self.count)]
self.period = [random.uniform(0.01, 0.5) for w in range(self.count)]
self.speeds = [random.uniform(-2, 2) for w in range(self.count)]
self.intensities = [random.uniform(0.5, 1) for w in range(self.count)]
@task
def __call__(self, ratio: float):
numPixels = self.strip.numPixels()
numWaves = self.count
for i in range(numPixels):
self.strip.setPixelRGB(i, dim_color(self.strip.getPixelRGB(i), 0.1))
for w in range(numWaves):
self.phases[w] += self.speeds[w]
for i in range(numPixels):
value = self.intensities[w] * (1 + math.sin(self.phases[w] + self.period[w]*i)) / 2
color = WAVE[int(value*len(WAVE))]
self.strip.setPixelRGB(i, add_color(self.strip.getPixelRGB(i), dim_color(color,1/numWaves)))
self.strip.show()
class TaskName(str, Enum):
fire = "fire"
rainbow = "rainbow"
rainbowChase = "rainbowChase"
fallingStars = "fallingStars"
colorWheel = "colorWheel"
sparkles = "sparkles"
rainbowStar = "rainbowStar"
wave = "wave"
class TaskFactory():
def __init__(self, strip: ColorPixelStrip):
self.strip = strip
def get(self, name: str, **kwargs):
print(name, kwargs)
if name == 'fire':
return Fire(self.strip, **kwargs)
elif name == 'rainbow':
return Raindow(self.strip, **kwargs)
elif name == 'rainbowChase':
return RaindowChase(self.strip, **kwargs)
elif name == 'fallingStars':
return FallingStars(self.strip, **kwargs)
elif name == 'colorWheel':
return ColorWheel(self.strip, **kwargs)
elif name == 'sparkles':
return Sparkles(self.strip, **kwargs)
elif name == 'rainbowStar':
return RainbowStar(self.strip, **kwargs)
elif name == 'wave':
return Wave(self.strip, **kwargs)
else:
return None | /rpi_ws281x_hub-1.0.3-py3-none-any.whl/rpi_ws281x_hub/tasks.py | 0.763307 | 0.172398 | tasks.py | pypi |
import atexit
def Color(red, green, blue, white=0):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return (white << 24) | (red << 16) | (green << 8) | blue
class PixelStrip(object):
def __init__(
self,
num,
pin,
freq_hz=800000,
dma=10,
invert=False,
brightness=255,
channel=0,
strip_type=None,
gamma=None,
):
"""Class to represent a SK6812/WS281x LED display. Num should be the
number of pixels in the display, and pin should be the GPIO pin connected
to the display signal line (must be a PWM pin like 18!). Optional
parameters are freq, the frequency of the display signal in hertz (default
800khz), dma, the DMA channel to use (default 10), invert, a boolean
specifying if the signal line should be inverted (default False), and
channel, the PWM channel to use (defaults to 0).
"""
# Create the led data array.
self._led_buffer = [0] * num
self._leds = [0] * num
self._brightness = brightness
self._cleaned_up = False
atexit.register(self.check_cleanup)
# Substitute for __del__, traps an exit condition and cleans up properly
atexit.register(self._cleanup)
self.started = False
def check_cleanup(self):
assert self._cleaned_up
def _cleanup(self):
# Clean up memory used by the library when not needed anymore.
self._cleaned_up = True
def begin(self):
"""Initialize library, must be called once before other functions are
called.
"""
assert self.started == False
self.started = True
def show(self):
"""Update the display with the data from the LED buffer."""
assert self.started
self._leds = self._led_buffer.copy()
def setPixelColor(self, n, color):
"""Set LED at position n to the provided 24-bit color value (in RGB order).
"""
assert self.started
self._led_buffer[n] = color
def setPixelColorRGB(self, n, red, green, blue, white=0):
"""Set LED at position n to the provided red, green, and blue color.
Each color component should be a value from 0 to 255 (where 0 is the
lowest intensity and 255 is the highest intensity).
"""
assert self.started
self.setPixelColor(n, Color(red, green, blue, white))
def getBrightness(self):
assert self.started
return self._brightness
def setBrightness(self, brightness):
"""Scale each LED in the buffer by the provided brightness. A brightness
of 0 is the darkest and 255 is the brightest.
"""
assert self.started
self._brightness = brightness
updated_buffer = []
for pixel in self._led_buffer:
new_value = int(pixel * brightness / 255)
updated_buffer.append(new_value)
self._led_buffer = updated_buffer
def getPixels(self):
"""Return an object which allows access to the LED display data as if
it were a sequence of 24-bit RGB values.
"""
assert self.started
return self._led_buffer
def numPixels(self):
"""Return the number of pixels in the display."""
assert self.started
return len(self._led_buffer)
def getPixelColor(self, n):
"""Get the 24-bit RGB color value for the LED at position n."""
assert self.started
return self._led_buffer[n]
def getPixelColorRGB(self, n):
assert self.started
c = lambda: None
setattr(c, "r", self._led_buffer[n] >> 16 & 0xFF)
setattr(c, "g", self._led_buffer[n] >> 8 & 0xFF)
setattr(c, "b", self._led_buffer[n] & 0xFF)
return c
# Shim for back-compatibility
class Adafruit_NeoPixel(PixelStrip):
pass
class ws:
WS2811_TARGET_FREQ = "_rpi_ws281x.WS2811_TARGET_FREQ"
SK6812_STRIP_RGBW = "_rpi_ws281x.SK6812_STRIP_RGBW"
SK6812_STRIP_RBGW = "_rpi_ws281x.SK6812_STRIP_RBGW"
SK6812_STRIP_GRBW = "_rpi_ws281x.SK6812_STRIP_GRBW"
SK6812_STRIP_GBRW = "_rpi_ws281x.SK6812_STRIP_GBRW"
SK6812_STRIP_BRGW = "_rpi_ws281x.SK6812_STRIP_BRGW"
SK6812_STRIP_BGRW = "_rpi_ws281x.SK6812_STRIP_BGRW"
SK6812_SHIFT_WMASK = "_rpi_ws281x.SK6812_SHIFT_WMASK"
WS2811_STRIP_RGB = "_rpi_ws281x.WS2811_STRIP_RGB"
WS2811_STRIP_RBG = "_rpi_ws281x.WS2811_STRIP_RBG"
WS2811_STRIP_GRB = "_rpi_ws281x.WS2811_STRIP_GRB"
WS2811_STRIP_GBR = "_rpi_ws281x.WS2811_STRIP_GBR"
WS2811_STRIP_BRG = "_rpi_ws281x.WS2811_STRIP_BRG"
WS2811_STRIP_BGR = "_rpi_ws281x.WS2811_STRIP_BGR"
WS2812_STRIP = "_rpi_ws281x.WS2812_STRIP"
SK6812_STRIP = "_rpi_ws281x.SK6812_STRIP"
SK6812W_STRIP = "_rpi_ws281x.SK6812W_STRIP" | /rpi_ws281x_mock-0.2.2.tar.gz/rpi_ws281x_mock-0.2.2/rpi_ws281x/rpi_ws281x_mock.py | 0.818664 | 0.606003 | rpi_ws281x_mock.py | pypi |
rpi\_ws281x
===========
Userspace Raspberry Pi library for controlling WS281X LEDs. This
includes WS2812 and SK6812RGB RGB LEDs Preliminary support is now
included for SK6812RGBW LEDs (yes, RGB + W) The LEDs can be controlled
by either the PWM (2 independent channels) or PCM controller (1 channel)
or the SPI interface (1 channel).
Background:
-----------
The BCM2835 in the Raspberry Pi has both a PWM and a PCM module that are
well suited to driving individually controllable WS281X LEDs. Using the
DMA, PWM or PCM FIFO, and serial mode in the PWM, it's possible to
control almost any number of WS281X LEDs in a chain connected to the
appropriate output pin. For SPI the Raspbian spidev driver is used
(``/dev/spidev0.0``). This library and test program set the clock rate
to 3X the desired output frequency and creates a bit pattern in RAM from
an array of colors where each bit is represented by 3 bits as follows.
::
Bit 1 - 1 1 0
Bit 0 - 1 0 0
GPIO Usage:
-----------
The GPIOs that can be used are limited by the hardware of the Pi and
will vary based on the method used to drive them (PWM, PCM or SPI).
Beware that the GPIO numbers are not the same as the physical pin
numbers on the header.
PWM:
::
PWM0, which can be set to use GPIOs 12, 18, 40, and 52.
Only 12 (pin 32) and 18 (pin 12) are available on the B+/2B/3B
PWM1 which can be set to use GPIOs 13, 19, 41, 45 and 53.
Only 13 is available on the B+/2B/PiZero/3B, on pin 33
PCM:
::
PCM_DOUT, which can be set to use GPIOs 21 and 31.
Only 21 is available on the B+/2B/PiZero/3B, on pin 40.
SPI:
::
SPI0-MOSI is available on GPIOs 10 and 38.
Only GPIO 10 is available on all models.
See also note for RPi 3 below.
Power and voltage requirements
------------------------------
WS281X LEDs are generally driven at 5V. Depending on your actual LED
model and data line length you might be able to successfully drive the
data input with 3.3V. However in the general case you probably want to
use a level shifter to convert from the Raspberry Pi GPIO/PWM to 5V.
It is also possible to run the LEDs from a 3.3V - 3.6V power source, and
connect the GPIO directly at a cost of brightness, but this isn't
recommended.
The test program is designed to drive a 8x8 grid of LEDs e.g.from
Adafruit (http://www.adafruit.com/products/1487) or Pimoroni
(https://shop.pimoroni.com/products/unicorn-hat). Please see the
Adafruit and Pimoroni websites for more information.
Know what you're doing with the hardware and electricity. I take no
reponsibility for damage, harm, or mistakes.
Important warning about DMA channels
------------------------------------
You must make sure that the DMA channel you choose to use for the LEDs
is not `already in
use <https://www.raspberrypi.org/forums/viewtopic.php?p=609380#p609380>`__
by the operating system.
For example, **using DMA channel 5 will cause filesystem corruption**
on the Raspberry Pi 3 Model B.
See: https://github.com/jgarff/rpi_ws281x/issues/224
The default DMA channel (10) should be safe for the Raspberry Pi 3 Model
B, but this may change in future software releases.
Limitations:
------------
PWM
~~~
Since this library and the onboard Raspberry Pi audio both use the PWM,
they cannot be used together. You will need to blacklist the Broadcom
audio kernel module by creating a file
``/etc/modprobe.d/snd-blacklist.conf`` with
::
blacklist snd_bcm2835
If the audio device is still loading after blacklisting, you may also
need to comment it out in the /etc/modules file.
On headless systems you may also need to force audio through hdmi Edit
config.txt and add:
::
hdmi_force_hotplug=1
hdmi_force_edid_audio=1
A reboot is required for this change to take effect
Some distributions use audio by default, even if nothing is being
played. If audio is needed, you can use a USB audio device instead.
PCM
~~~
When using PCM you cannot use digital audio devices which use I2S since
I2S uses the PCM hardware, but you can use analog audio.
SPI
~~~
When using SPI the ledstring is the only device which can be connected
to the SPI bus. Both digital (I2S/PCM) and analog (PWM) audio can be
used.
Many distributions have a maximum SPI transfer of 4096 bytes. This can
be changed in ``/boot/cmdline.txt`` by appending
::
spidev.bufsiz=32768
On a RPi 3 you have to change the GPU core frequency to 250 MHz,
otherwise the SPI clock has the wrong frequency. Do this by adding the
following line to /boot/config.txt and reboot.
::
core_freq=250
On a RPi 4 its dynamic frequency clocking has to be disabled, since it will
desync the SPI clock. Do this by adding this line to
``/boot/config.txt``. (``core_freq`` does not have to be changed, since
the default value of 500MHz is SPI compatible)
::
core_freq_min=500
SPI requires you to be in the ``gpio`` group if you wish to control your
LEDs without root.
Comparison PWM/PCM/SPI
----------------------
Both PWM and PCM use DMA transfer to output the control signal for the
LEDs. The max size of a DMA transfer is 65536 bytes. Since each LED
needs 12 bytes (4 colors, 8 symbols per color, 3 bits per symbol) this
means you can control approximately 5400 LEDs for a single strand in PCM
and 2700 LEDs per string for PWM (Only PWM can control 2 independent
strings simultaneously) SPI uses the SPI device driver in the kernel.
For transfers larger than 96 bytes the kernel driver also uses DMA. Of
course there are practical limits on power and signal quality. These
will be more constraining in practice than the theoretical limits above.
When controlling a LED string of 240 LEDs the CPU load on the original
Pi 2 (BCM2836) are: PWM 5% PCM 5% SPI 1%
| /rpi_ws281x-5.0.0.tar.gz/rpi_ws281x-5.0.0/README.rst | 0.8474 | 0.67996 | README.rst | pypi |
import _rpi_ws281x as ws
import atexit
class RGBW(int):
def __new__(self, r, g=None, b=None, w=None):
if (g, b, w) == (None, None, None):
return int.__new__(self, r)
else:
if w is None:
w = 0
return int.__new__(self, (w << 24) | (r << 16) | (g << 8) | b)
@property
def r(self):
return (self >> 16) & 0xff
@property
def g(self):
return (self >> 8) & 0xff
@property
def b(self):
return (self) & 0xff
@property
def w(self):
return (self >> 24) & 0xff
def Color(red, green, blue, white=0):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return RGBW(red, green, blue, white)
class PixelStrip:
def __init__(self, num, pin, freq_hz=800000, dma=10, invert=False,
brightness=255, channel=0, strip_type=None, gamma=None):
"""Class to represent a SK6812/WS281x LED display. Num should be the
number of pixels in the display, and pin should be the GPIO pin connected
to the display signal line (must be a PWM pin like 18!). Optional
parameters are freq, the frequency of the display signal in hertz (default
800khz), dma, the DMA channel to use (default 10), invert, a boolean
specifying if the signal line should be inverted (default False), and
channel, the PWM channel to use (defaults to 0).
"""
if gamma is None:
# Support gamma in place of strip_type for back-compat with
# previous version of forked library
if type(strip_type) is list and len(strip_type) == 256:
gamma = strip_type
strip_type = None
else:
gamma = list(range(256))
if strip_type is None:
strip_type = ws.WS2811_STRIP_GRB
# Create ws2811_t structure and fill in parameters.
self._leds = ws.new_ws2811_t()
# Initialize the channels to zero
for channum in range(2):
chan = ws.ws2811_channel_get(self._leds, channum)
ws.ws2811_channel_t_count_set(chan, 0)
ws.ws2811_channel_t_gpionum_set(chan, 0)
ws.ws2811_channel_t_invert_set(chan, 0)
ws.ws2811_channel_t_brightness_set(chan, 0)
# Initialize the channel in use
self._channel = ws.ws2811_channel_get(self._leds, channel)
ws.ws2811_channel_t_gamma_set(self._channel, gamma)
ws.ws2811_channel_t_count_set(self._channel, num)
ws.ws2811_channel_t_gpionum_set(self._channel, pin)
ws.ws2811_channel_t_invert_set(self._channel, 0 if not invert else 1)
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
ws.ws2811_channel_t_strip_type_set(self._channel, strip_type)
# Initialize the controller
ws.ws2811_t_freq_set(self._leds, freq_hz)
ws.ws2811_t_dmanum_set(self._leds, dma)
self.size = num
# Substitute for __del__, traps an exit condition and cleans up properly
atexit.register(self._cleanup)
def __getitem__(self, pos):
"""Return the 24-bit RGB color value at the provided position or slice
of positions.
"""
# Handle if a slice of positions are passed in by grabbing all the values
# and returning them in a list.
if isinstance(pos, slice):
return [ws.ws2811_led_get(self._channel, n) for n in range(*pos.indices(self.size))]
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_get(self._channel, pos)
def __setitem__(self, pos, value):
"""Set the 24-bit RGB color value at the provided position or slice of
positions.
"""
# Handle if a slice of positions are passed in by setting the appropriate
# LED data values to the provided value.
if isinstance(pos, slice):
for n in range(*pos.indices(self.size)):
ws.ws2811_led_set(self._channel, n, value)
# Else assume the passed in value is a number to the position.
else:
return ws.ws2811_led_set(self._channel, pos, value)
def __len__(self):
return ws.ws2811_channel_t_count_get(self._channel)
def _cleanup(self):
# Clean up memory used by the library when not needed anymore.
if self._leds is not None:
ws.ws2811_fini(self._leds)
ws.delete_ws2811_t(self._leds)
self._leds = None
self._channel = None
def setGamma(self, gamma):
if type(gamma) is list and len(gamma) == 256:
ws.ws2811_channel_t_gamma_set(self._channel, gamma)
def begin(self):
"""Initialize library, must be called once before other functions are
called.
"""
resp = ws.ws2811_init(self._leds)
if resp != 0:
str_resp = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, str_resp))
def show(self):
"""Update the display with the data from the LED buffer."""
resp = ws.ws2811_render(self._leds)
if resp != 0:
str_resp = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_render failed with code {0} ({1})'.format(resp, str_resp))
def setPixelColor(self, n, color):
"""Set LED at position n to the provided 24-bit color value (in RGB order).
"""
self[n] = color
def setPixelColorRGB(self, n, red, green, blue, white=0):
"""Set LED at position n to the provided red, green, and blue color.
Each color component should be a value from 0 to 255 (where 0 is the
lowest intensity and 255 is the highest intensity).
"""
self.setPixelColor(n, Color(red, green, blue, white))
def getBrightness(self):
return ws.ws2811_channel_t_brightness_get(self._channel)
def setBrightness(self, brightness):
"""Scale each LED in the buffer by the provided brightness. A brightness
of 0 is the darkest and 255 is the brightest.
"""
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
def getPixels(self):
"""Return an object which allows access to the LED display data as if
it were a sequence of 24-bit RGB values.
"""
return self[:]
def numPixels(self):
"""Return the number of pixels in the display."""
return len(self)
def getPixelColor(self, n):
"""Get the 24-bit RGB color value for the LED at position n."""
return self[n]
def getPixelColorRGB(self, n):
return RGBW(self[n])
def getPixelColorRGBW(self, n):
return RGBW(self[n])
# Shim for back-compatibility
class Adafruit_NeoPixel(PixelStrip):
pass | /rpi_ws281x-5.0.0.tar.gz/rpi_ws281x-5.0.0/rpi_ws281x/rpi_ws281x.py | 0.820685 | 0.336576 | rpi_ws281x.py | pypi |
rpi2caster
==========
Raspberry Pi controls a Monotype composition caster.
----------------------------------------------------
Based on computer2caster by John Cornelisse Original idea described at
http://letterpress.ch
Typesetting and casting software for a Raspberry Pi-based computer
control attachment for Monotype composition casters.
This program suite consists of three major parts:
1. Typesetting program for parsing UTF-8 text, calculating justification
and coding it as a series of control codes accepted by the Monotype
composition caster,
2. Casting program for sending said codes to the casting machine using
an interface with 32 pneumatic outputs, a pneumatic connection block
attached to the caster's paper tower and a machine cycle sensor
input. The program also allows to cast sorts, test the machine or set
a short text on-the-fly, then cast the composed type.
3. Inventory management program for adding, editing and deleting the
definitions for replaceable machine components: normal wedges and
matrix cases (diecases).
The workflow is as follows: 1. define matrix case layouts for your
matrix cases (and edit if needed), 2. define your normal wedges so that
the program knows what series in which set widths you have, 3. use a
typesetting program to generate a "ribbon" i.e. series of control codes
from a text, for a specified matrix case and normal wedge, 4. use the
casting program to test the machine/interface, perform machine
adjustments, and cast the type from ribbon you made earlier.
Things you need to do
~~~~~~~~~~~~~~~~~~~~~
1. Get a Raspberry Pi mod B+ or 2B and install Raspbian
2. Use Raspbian jessie or stretch
3. Make a pneumatic interface that attaches on the Monotype's paper
tower - or have it made by someone with a CNC mill, 3D printer etc.
This is the hardest part. The pneumatic interface must be very
precisely done, so that no air leaks out.
4. Get 31 three-way solenoid valves on valve islands. 12 or 24V DC.
IMPORTANT: Some valves require minimum air pressure of 2...2.5bar.
Since the Monotype caster uses 1bar, the pressure differential across
the valve will be too low and the valve won't open. We need the 0bar
minimum pressure variety, even though they may take more electrical
power. A great candidate is the MATRIX BX758-8E1C3-24. The valve
block is very compact and looks a bit like a stepper motor. It
features up to 8 valves in a 55x55mm enclosure, with 12V DC or 24V DC
controls and minimal pressure of 0 bar (this is important - we'll be
using just 1bar!) The cost is about 200 Euro per piece (we need a set
of 4). Another good candidates are e.g. two Festo CPV10-GE-MP-8
islands with 8 x "C" valve (i.e. dual 3/2) and separate connections
for pilot supply.
5. Make a RPi to 32 open collector output interface (the Raspberry's
native GPIO pins cannot drive 24V loads, and there's not enough of
them). Check out
https://github.com/elegantandrogyne/rpi2caster-doc-hardware for the
documentation (electrical in Eagle, PDF and Gerber, mechanical in
CorelDraw) of my interface.
6. Resolve the dependencies as described further.
7. Install this software on your RPi.
System config
~~~~~~~~~~~~~
The most common distro is Raspbian and we'll use jessie or newer. If you
need GUI (HDMI connection from Raspberry to monitor/TV, local console),
choose the standard image; otherwise (for headless setup) use minimal.
Assuming that you know your way around the Raspberry, SSH into it, set
it up (you must enable I2C), choose locale, change password, update the
system etc.
- Network setup - I recommend configuring your router to offer the
Raspberry a static DHCP lease based on the MAC address. If that's not
possible (e.g. you don't have admin access to the router), use static
IP address or scan the network for a host with the RPi's MAC address
after each boot.
- User & security config advice:
1. Create user accounts and disable no-password sudoing for "pi" by
commenting out the respective line in /etc/sudoers.
2. Add new users to groups user "pi" belongs to. For security
reasons, you may want to remove "pi" from the "sudo" and "adm"
groups.
3. Since you'll log on the machine via SSH, you can use a RSA key
authentication instead of entering a password on each logon.
Either use a "ssh-copy-id [target-username@]target-host" command
or create a ~/.ssh/authorized\_keys file on the Raspberry and
paste your id\_rsa.pub contents there. Then you can just ssh by
typing "ssh [username@]monotype" or via PuTTY.
- Various improvements
1. You can enable GUI access by VNC. Install tightvncserver. Edit
/etc/lightdm/lightdm.conf and uncomment the lines in VNC section.
Change the port, geometry etc. if you wish. You don't have to
create any init scripts; lightdm will already take care of running
the VNC server. Just run "vncviewer [hostname or IP addr]:[port]"
client-side and you'll get a lightdm login screen. Sign in to your
account.
2. Using a web-based SSH client might be a good idea. I've used
shellinabox with great results. This way, you won't have to
install any additional software, esp. if you're using M$ Windows
(otherwise, you'll need PuTTY or other SSH client).
- Get rid of unneeded stuff! The original Raspbian distro has some
unneeded software installed by default. We can get rid of it by using
"sudo aptitude purge...":
1. wolfram-engine - removing it will clean somewhere around 450MB (!)
2. X, LXDE etc. unless you want to VNC into the machine or set up a
local console with GUI
3. anything related to Scratch
4. Minecraft or any other games, LibreOffice etc., they're huge
diskspace hogs, not really needed
Dependencies
~~~~~~~~~~~~
Some of the dependencies will be marked as "(repo)". This means that you
can install them from Raspbian repository using apt or aptitude.
1. python3 - doesn't come with minimal Jessie, and rpi2caster is written
in python3 only (it's relatively new and there's no need for
backwards compatibility)
2. python3-pip - for installing python3 packages (install it with apt or
aptitude; python3 will be installed automatically)
3. wiringpi2-python - Python bindings for wiringpi2. You can install it
with pip3: ``sudo pip3 install wiringpi2`` - necessary for program to
work!
| /rpi2caster-2.5.0.tar.gz/rpi2caster-2.5.0/README.rst | 0.582966 | 0.666314 | README.rst | pypi |
rpi2casterd
===========
Hardware driver and web API for rpi2caster
------------------------------------------
This is a machine control daemon for the ``rpi2caster`` typesetting and casting software.
It is supposed to run on a Raspberry Pi (any model) with an output expander based on two
MCP23017 chips to provide 32 additional outputs. These are connected to solenoid valves,
which in turn send the pneumatic signals to a Monotype composition caster or tape punch.
The program uses ``Flask`` to provide a rudimentary JSON API for caster control.
``gpiozero`` library is used for GPIO control, with RPi.GPIO as a preferable backend.
There are several available MCP23017 control backends:
1. SMBus (via ``smbus-cffi`` or ``smbus2`` package),
2. ``WiringPi`` library.
The daemon also controls several GPIO pins:
1. `ready LED (green)` - when lit, the control device and software is ready to use.
2. `working LED (green)` and `error LED (red)`, typically a dual-color common cathode LED, indicates the machine state -
green when the machine is working, red when the machine is stopping the pump, and orange when the machine is starting.
3. `motor start` and `motor stop` - pulse outputs for start/stop relays, connected with the original AllenWest motor starter
(their use is optional, more of a convenience).
4. `air` and `water` - for controlling air solenoid valve (prevents unnecessary air use when the machine is not working)
and cooling water valve/pump (ditto with water). Like motor control, this is more of a 'deluxe' feature and is not
necessary for caster operation.
5. `sensor` (photocell, e.g. TCST2103) input for getting the information about the machine cycle phase. When the sensor is
going ON, the air is fed into the machine; when the sensor is going OFF, the air is cut off and the control daemon
ends the signals sending sequence. This sensor is necessary for caster operation. Punching is timer-driven
and no sensor is needed.
6. `mode sense` input - when grounded, the interface works in the casting mode; when lifted (pulled up to 3V3),
the interface works in the punching mode. This input is typically connected with a 9-pin D-sub connector for the sensor,
with a jumper to the ground in the plug.
7. `shutdown` and `reboot buttons` - after one of these is held for 2 seconds, the LED flashes and the shutdown or reboot
procedure begins.
8. `emergency stop button` - stops the machine as soon as possible and marks the emergency stop as activated; when that happens,
the client software has to clear the emergency stop first in order to be able to use the machine.
The program uses ``Flask`` to provide a rudimentary JSON API for caster control.
Starting
--------
The interface needs to be started up in order to work. The startup procedure ensures that:
1. the interface is not busy, not stopping and not starting - has not been claimed by any other client,
2. air and (for casting only) water and motor is turned on, if the hardware supports this,
3. (for casting) the machine is actually turning; during this phase, the state LED lights up orange,
4. after the starting sequence is successfully finished, the state LED lights up green,
5. the interface will stay busy until released by the ``stop`` method.
Machine is started with a request from the client software. See the API section for details.
Stopping
--------
Stopping the interface ensures that:
1. if the pump is working, it is stopped (see the pump control section); during this phase the state LED lights up red,
2. air and (for casting) water and motor is turned off, if hardware supports this,
3. the state LED is turned off, if hardware supports this,
4. the `testing_mode` flag is set to False,
5. the interface is released for the future clients to claim.
Machine is stopped when called by the client software, when the machine has been stalling (waiting for the signal
from the cycle sensor for too long), or when emergency stop happens because of button press or client software request.
Pump control
------------
The software turns the pump on (sending ``NKS 0075`` + current 0075 justifying wedge position) or off.
Pump switch-off is done whenever the machine stops and the pump is marked as working. This ensures that after re-start,
the pump will stay stopped.
During the pump switch-off procedure, an "alarm" LED (red) is lit to prompt the operator to turh the
machine's main shaft a few times. The interface will then send a ``NJS 0005`` + current 0005 justifying wedge position.
This way, stopping the pump does not change the wedge position.
Motor control
-------------
When starting in the casting mode, the software activates the ``motor_start`` GPIO for a fraction of a second.
The GPIO can be coupled with a NO SPST relay connected with the original AllenWest electromagnetic starter.
Use a relay rated for at least 400V AC if your caster is wired for three-phase power (common in continental Europe).
The relay should be connected to the contacts marked "1" and "2" on the motor starter.
Similarly, when stopping in the casting mode, the software activates the ``motor_stop`` GPIO. This can be coupled
with a NC SPST relay that breaks the current flow through the starter's coil. The relay should be connected instead
of a jumper between one of the live wires and the contact marked as "2".
Air and water control
---------------------
The daemon can also control a solenoid valve to enable or disable air flow when the machine is working or stopped.
Air control works in all operation modes (casting, punching and testing).
Water control can be used in the casting mode for controlling a pump or solenoid valve for cooling water flow.
Sending signals
---------------
Based on the caster's current operation mode, signals are modified or not:
1. testing mode ensures that signals 1...14, A...N, 0075, S, 0005, O15 are sent to the machine as they are received
2. punching mode ensures that a combined signal O+15 is present only when less than 2 signals are received
3. casting mode ensures that the O15 signal is ommitted
Sending the signals can take place only when the interface has been previously started and claimed as busy;
otherwise, ``InterfaceNotStarted`` is raised in the casting mode, and the startup is done automatically
in the punching and testing modes.
The daemon behaves differently depending on the operation mode:
casting
_______
1. wait for a machine cycle sensor to turn ON,
2. activate the valves for specified signals,
3. wait until the cycle sensor goes OFF,
4. turn all the valves off,
5. check the pump state and justifying wedge positions, and update the current state,
6. return a reply to the request, allowing the client to cast the next combination.
However, a machine sometimes stops during casting (e.g. when the operator sees a lead squirt
and has to stop immediately to prevent damage). In case of emergency stop, the machine is stopped immediately
and the client software gets an error reply to the send request.
punching
________
This mode is fully automatic and driven by a configureble timer:
1. turn the valves on,
2. wait time_on for punches to go up,
3. turn the valves off,
4. wait time_off for punches to come back down,
5. check the pump state and justifying wedge positions, and update the current state,
6. return a success reply to the request.
testing
_______
The software just turns off the valves, then turns them on, sending the specified signal combination.
REST API documentation
======================
The API is typically accessed at ``http://[address]:[port]`` (typically ``23017``, as in MCP23017).
Several endpoints are available:
``/`` - status: ``GET``: reads and ``POST`` changes the status, which is used mostly for setting the temporary ``testing_mode`` flag.
``/config`` - configuration: `GET` reads and `POST` changes the configuration
``/machine`` - machine start/stop/state:
``GET`` reads the state, ``PUT`` turns the machine on, ``DELETE`` turns the machine off, and ``POST`` turns it on or off
depending on the JSON data in the request (``{state: true}`` for starting, ``{state: false}`` for stopping).
The reply can either be ``{success: true, active: [true/false]}`` if successful, or ``{success: false, error_code: [EC], error_name: [EN]}``
if exception was raised. Error codes and names:
1. ``0: The machine was abnormally stopped.`` in case of emergency stop or machine stalling,
2. ``3: This interface was started and is already in use. If this is not the case, restart the interface.`` if the interface has already
been claimed as busy,
``/motor``, ``/air``, ``/water``, ``/pump``, ``valves`` - motor, air, water, pump and solenoid valves checking/control. The verbs work as above.
``/emergency_stop``:
``GET`` gets the current state, ``PUT`` (or ``POST`` with ``{state: true}`` JSON data) activates the emergency stop,
``DELETE`` (or ``POST`` with ``{state: false}`` JSON data) clears the emergency stop state, allowing the machine to start.
When emergency stop is activated, the server replies with ``{success: false, error_code: 0, message: 'The machine was abnormally stopped.'}``
``/signals``:
``GET``: gets the last signals sent (unless the machine was stopped, which clears the signals),
``POST`` or ``PUT`` with ``{signals: [sig1, sig2...], timeout: x}`` (timeout is optional and overrides the default machine stalling timeout)
sends the specified signals, and ``DELETE`` turns off the valves. Emergency stop events are tracked and whenever the emergency stop was triggered,
the server will reply with an error message.
Possible error replies:
1. ``0: The machine was abnormally stopped.`` in case of emergency stop or machine stalling,
2. ``4: Trying to cast or punch with an interface that is not started.`` (only in casting mode, as punching/testing starts the interface automatically)
| /rpi2casterd-2.5.12.tar.gz/rpi2casterd-2.5.12/README.rst | 0.831622 | 0.865679 | README.rst | pypi |
```
%load_ext autoreload
autoreload 2
from rpi2mqtt.config import *
import yaml
from collections import deque
class HestiaPi:
def __init__(self, **kwargs):
# self._modes = HVAC.HEAT_PUMP_MODES
# super(HestiaPi, self).__init__(kwargs.get('name'), None, kwargs.get('topic'), 'climate', 'HestiaPi')
self.mode = kwargs.get('mode', 'heat')
# self.active = False
# self.desired_mode = 'off'
self.active_start_time = None
self.set_point_cool = kwargs.get('cool_setpoint', 76)
self.set_point_heat = kwargs.get('heat_setpoint', 68)
# how much wiggle room in temperature reading before starting/stopping HVAC.
# setting this too low can trigger frequence HVAC cycles.
self.set_point_tolerance = kwargs.get('set_point_tolerance', 0.5)
# Minimum time HVAC should run (in minutes)
self.min_run_time = kwargs.get('min_run_time', 15)
# how soon can HVAC be activated again after stopping (in minutes)
self.min_trigger_cooldown_time = kwargs.get('min_trigger_cooldown_time', 15)
self.last_mode_change_time = None
self.last_hvac_state_change_time = None
self.bme280 = None
# container to holder mode switches. Do not use directly.
self._modes = {}
# container to store temperature history
self.temperature_history = deque(maxlen=kwargs.get('temperature_history_period', 6))
# Minimum temperature rate of change over 4 measurements
self.minimum_temp_rate_of_change = kwargs.get('minimum_temp_rate_of_change', -0.25)
# super(HestiaPi, self).__init__(name, None, topic, 'climate', 'HestiaPi')
# put thermostat into test mode. i.e. don't trigger HVAC commands
self.dry_run = kwargs.get('dry_run')
# save boost state
self._boosting_heat = 'off'
self._boosting_start_time = None
self._boosting_enabled_switch = None
self.aux_enabled = kwargs.get('aux_enabled', 'on')
# self.setup()
c = config.Config.get_instance('rpi2mqtt/config.yaml')
yaml.dump(c.toDict())
with open('rpi2mqtt/config.yaml', 'r') as f:
config = yaml.safe_load(f.read())
config
c = Conf(**config)
c.mqtt = MqttConfig(**c.mqtt)
c.sensors
c.sensors = [SensorConfig(**sensor) for sensor in c.sensors]
c.mqtt
HestiaPi(**c.sensors[0])
cfg = """log_level: info
mqtt:
ca_cert: /home/pi/ca-chain.crt
host: brains.lan
password: verylongpassword
port: 8883
retries: 3
username: thermostat_pi
polling_interval: 300
sensors:
- name: thermostat_temp
topic: homeassistant/sensor/thermostat_temp/state
type: bme280
- beacon_away_timeout: 10
name: lilly_bookbag
topic: homeassistant/binary_sensor/lilly_bookbag_presence/state
type: ibeacon
beacon_uuid: d83ccd9b-1f1e-4d4d-b825-c26ba15cb2c4
- cool_setpoint: 75
heat_setpoint: 68
name: hestia_pi
topic: homeassistant/climate/hestiapi
type: hestiapi
min_run_time: null
"""
config = yaml.load(cfg)
config
Config.load(
c = Config.load(config)
c
c = Conf(**config)
c.mqtt = MqttConfig(**c.mqtt)
c.sensors = {sensor['name']: SensorConfig(**sensor) for sensor in c.sensors}
c.to_dict()
therm = HestiaPi(**c.sensors[2])
therm.min_run_time
_keys = list(filter(lambda k: not(k.startswith('_')), therm.__dict__.keys()))
{k: v for k, v in therm.__dict__.items() if k in _keys}
from dataclasses import asdict
_cfg = asdict(c)
sensors = []
for sensor in _cfg['sensors']:
sensors.append({k:v for k,v in sensor.items() if v})
_cfg['sensors'] = sensors
_cfg
```
| /rpi2mqtt-0.5.29.tar.gz/rpi2mqtt-0.5.29/Untitled.ipynb | 0.521959 | 0.177668 | Untitled.ipynb | pypi |
import math
import torch
from functools import reduce
from sys import float_info
class __PrinterOptions(object):
precision = 4
threshold = 1000
edgeitems = 3
linewidth = 80
PRINT_OPTS = __PrinterOptions()
SCALE_FORMAT = '{:.5e} *\n'
# We could use **kwargs, but this will give better docs
def set_printoptions(
precision=None,
threshold=None,
edgeitems=None,
linewidth=None,
profile=None,
):
r"""Set options for printing. Items shamelessly taken from NumPy
Args:
precision: Number of digits of precision for floating point output
(default = 8).
threshold: Total number of array elements which trigger summarization
rather than full `repr` (default = 1000).
edgeitems: Number of array items in summary at beginning and end of
each dimension (default = 3).
linewidth: The number of characters per line for the purpose of
inserting line breaks (default = 80). Thresholded matrices will
ignore this parameter.
profile: Sane defaults for pretty printing. Can override with any of
the above options. (any one of `default`, `short`, `full`)
"""
if profile is not None:
if profile == "default":
PRINT_OPTS.precision = 4
PRINT_OPTS.threshold = 1000
PRINT_OPTS.edgeitems = 3
PRINT_OPTS.linewidth = 80
elif profile == "short":
PRINT_OPTS.precision = 2
PRINT_OPTS.threshold = 1000
PRINT_OPTS.edgeitems = 2
PRINT_OPTS.linewidth = 80
elif profile == "full":
PRINT_OPTS.precision = 4
PRINT_OPTS.threshold = float('inf')
PRINT_OPTS.edgeitems = 3
PRINT_OPTS.linewidth = 80
if precision is not None:
PRINT_OPTS.precision = precision
if threshold is not None:
PRINT_OPTS.threshold = threshold
if edgeitems is not None:
PRINT_OPTS.edgeitems = edgeitems
if linewidth is not None:
PRINT_OPTS.linewidth = linewidth
def _get_min_log_scale():
min_positive = float_info.min * float_info.epsilon # get smallest denormal
if min_positive == 0: # use smallest normal if DAZ/FTZ is set
min_positive = float_info.min
return math.ceil(math.log(min_positive, 10))
def _get_format_fn(format, nonfinite_format):
return lambda x: format.format(x) if math.isinf(x) or math.isnan(x) else nonfinite_format.format(x)
def _number_format(tensor, min_sz=-1):
floating_dtype = tensor.dtype.is_floating_point # save this because we cast later
_min_log_scale = _get_min_log_scale()
min_sz = max(min_sz, 2)
tensor = torch.DoubleTensor(tensor.size()).copy_(tensor).abs_().view(tensor.nelement())
pos_inf_mask = tensor.eq(float('inf'))
neg_inf_mask = tensor.eq(float('-inf'))
nan_mask = tensor.ne(tensor)
invalid_value_mask = pos_inf_mask + neg_inf_mask + nan_mask
if invalid_value_mask.all():
example_value = 0
else:
example_value = tensor[invalid_value_mask.eq(0)][0]
tensor[invalid_value_mask] = example_value
if invalid_value_mask.any():
min_sz = max(min_sz, 3)
int_mode = True
# TODO: use fmod?
for value in tensor.tolist():
if value != math.ceil(value):
int_mode = False
break
exp_min = tensor.min()
if exp_min != 0:
exp_min = math.floor(math.log10(exp_min)) + 1
else:
exp_min = 1
exp_max = tensor.max()
if exp_max != 0:
exp_max = math.floor(math.log10(exp_max)) + 1
else:
exp_max = 1
include_decimal_int_mode = floating_dtype and int_mode
scale = 1
exp_max = int(exp_max)
prec = PRINT_OPTS.precision
if int_mode:
if exp_max > prec + 1:
format = '{{:11.{}e}}'.format(prec)
fmt_fn = format.format
sz = max(min_sz, 7 + prec)
else:
sz = max(min_sz, exp_max + 1 + include_decimal_int_mode)
format = '{:' + str(sz) + '.0f}'
fmt_fn = format.format
if include_decimal_int_mode:
format = '{:' + str(sz - 1) + '.0f}'
nonfinite_format = format + '.'
fmt_fn = _get_format_fn(format, nonfinite_format)
else:
if exp_max - exp_min > prec:
sz = 7 + prec
if abs(exp_max) > 99 or abs(exp_min) > 99:
sz = sz + 1
sz = max(min_sz, sz)
format = '{{:{}.{}e}}'.format(sz, prec)
fmt_fn = format.format
else:
if exp_max > prec + 1 or exp_max < 0:
sz = max(min_sz, 7)
scale = math.pow(10, max(exp_max - 1, _min_log_scale))
else:
if exp_max == 0:
sz = 7
else:
sz = exp_max + 6
sz = max(min_sz, sz)
format = '{{:{}.{}f}}'.format(sz, prec)
fmt_fn = format.format
return fmt_fn, scale, sz
def _scalar_str(self, fmt, scale):
scalar_str = fmt(self.item() / scale)
# The leading space for positives is ugly on scalars, so we strip it
return scalar_str.lstrip()
def _vector_str(self, indent, fmt, scale, sz, summarize):
element_length = sz + 3
elements_per_line = int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
char_per_line = element_length * elements_per_line
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
data = ([fmt(val / scale) for val in self[:PRINT_OPTS.edgeitems].tolist()] +
[' ...'] +
[fmt(val / scale) for val in self[-PRINT_OPTS.edgeitems:].tolist()])
else:
data = [fmt(val / scale) for val in self.tolist()]
data_lines = [data[i:i + elements_per_line] for i in range(0, len(data), elements_per_line)]
lines = [', '.join(line) for line in data_lines]
return '[' + (',' + '\n' + ' ' * (indent + 1)).join(lines) + ']'
def _tensor_str(self, indent, fmt, scale, sz, summarize):
dim = self.dim()
if dim == 0:
return _scalar_str(self, fmt, scale)
if dim == 1:
return _vector_str(self, indent, fmt, scale, sz, summarize)
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
slices = ([_tensor_str(self[i], indent + 1, fmt, scale, sz, summarize)
for i in range(0, PRINT_OPTS.edgeitems)] +
['...'] +
[_tensor_str(self[i], indent + 1, fmt, scale, sz, summarize)
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))])
else:
slices = [_tensor_str(self[i], indent + 1, fmt, scale, sz, summarize) for i in range(0, self.size(0))]
tensor_str = (',' + '\n' * (dim - 1) + ' ' * (indent + 1)).join(slices)
return '[' + tensor_str + ']'
def get_summarized_data(self):
dim = self.dim()
if dim == 0:
return self
if dim == 1:
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
return torch.cat((self[:PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems:]))
else:
return self
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
start = [get_summarized_data(self[i]).view(-1) for i in range(0, PRINT_OPTS.edgeitems)]
end = ([get_summarized_data(self[i]).view(-1)
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))])
return torch.cat((start + end))
else:
return self
def _str(self):
if self.is_sparse:
size_str = str(tuple(self.shape)).replace(' ', '')
return '{} of size {} with indices:\n{}\nand values:\n{}'.format(
self.type(), size_str, self._indices(), self._values())
prefix = 'tensor('
indent = len(prefix)
summarize = self.numel() > PRINT_OPTS.threshold
suffix = ')'
if not torch._C._is_default_type_cuda():
if self.device.type == 'cuda':
suffix = ', device=\'' + str(self.device) + '\'' + suffix
else:
if self.device.type == 'cpu' or torch.cuda.current_device() != self.device.index:
suffix = ', device=\'' + str(self.device) + '\'' + suffix
if self.numel() == 0:
# In an empty tensor, there are no elements to infer if the dtype should be int64,
# so it must be shown explicitly.
if self.dtype != torch.get_default_dtype():
suffix = ', dtype=' + str(self.dtype) + suffix
tensor_str = '[]'
else:
if self.dtype != torch.get_default_dtype() and self.dtype != torch.int64:
suffix = ', dtype=' + str(self.dtype) + suffix
fmt, scale, sz = _number_format(get_summarized_data(self) if summarize else self)
if scale != 1:
prefix = prefix + SCALE_FORMAT.format(scale) + ' ' * indent
tensor_str = _tensor_str(self, indent, fmt, scale, sz, summarize)
return prefix + tensor_str + suffix | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/_tensor_str.py | 0.535827 | 0.297387 | _tensor_str.py | pypi |
import torch
import contextlib
import warnings
from torch._C import default_generator
def set_rng_state(new_state):
r"""Sets the random number generator state.
Args:
new_state (torch.ByteTensor): The desired state
"""
default_generator.set_state(new_state)
def get_rng_state():
r"""Returns the random number generator state as a `torch.ByteTensor`."""
return default_generator.get_state()
def manual_seed(seed):
r"""Sets the seed for generating random numbers. Returns a
`torch._C.Generator` object.
Args:
seed (int): The desired seed.
"""
seed = int(seed)
import torch.cuda
if not torch.cuda._in_bad_fork:
torch.cuda.manual_seed_all(seed)
return default_generator.manual_seed(seed)
def initial_seed():
r"""Returns the initial seed for generating random numbers as a
Python `long`.
"""
return default_generator.initial_seed()
_fork_rng_warned_already = False
@contextlib.contextmanager
def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices"):
"""
Forks the RNG, so that when you return, the RNG is reset
to the state that it was previously in.
Arguments:
devices (iterable of CUDA IDs): CUDA devices for which to fork
the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates
on all devices, but will emit a warning if your machine has a lot
of devices, since this function will run very slowly in that case.
If you explicitly specify devices, this warning will be supressed
enabled (bool): if ``False``, the RNG is not forked. This is a convenience
argument for easily disabling the context manager without having
to reindent your Python code.
"""
import torch.cuda
global _fork_rng_warned_already
# Internal arguments:
# _caller: the function which called fork_rng, which the user used
# _devices_kw: the devices keyword of _caller
if not enabled:
yield
return
if devices is None:
num_devices = torch.cuda.device_count()
if num_devices > 1 and not _fork_rng_warned_already:
warnings.warn(
("CUDA reports that you have {num_devices} available devices, and you "
"have used {caller} without explicitly specifying which devices are being used. "
"For safety, we initialize *every* CUDA device by default, which "
"can be quite slow if you have a lot of GPUs. If you know that you are only "
"making use of a few CUDA devices, set the environment variable CUDA_VISIBLE_DEVICES "
"or the '{devices_kw}' keyword argument of {caller} with the set of devices "
"you are actually using. For example, if you are using CPU only, "
"set CUDA_VISIBLE_DEVICES= or devices=[]; if you are using "
"GPU 0 only, set CUDA_VISIBLE_DEVICES=0 or devices=[0]. To initialize "
"all devices and suppress this warning, set the '{devices_kw}' keyword argument "
"to `range(torch.cuda.device_count())`."
).format(num_devices=num_devices, caller=_caller, devices_kw=_devices_kw))
_fork_rng_warned_already = True
devices = list(range(num_devices))
else:
# Protect against user passing us a generator; we need to traverse this
# multiple times but a generator will be exhausted upon first traversal
devices = list(devices)
cpu_rng_state = torch.get_rng_state()
gpu_rng_states = []
for device in devices:
with torch.cuda.device(device):
gpu_rng_states.append(torch.cuda.get_rng_state())
try:
yield
finally:
torch.set_rng_state(cpu_rng_state)
for device, gpu_rng_state in zip(devices, gpu_rng_states):
with torch.cuda.device(device):
torch.cuda.set_rng_state(gpu_rng_state) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/random.py | 0.895912 | 0.453625 | random.py | pypi |
import torch
from ._utils import _type, _cuda
class _StorageBase(object):
is_cuda = False
is_sparse = False
def __str__(self):
content = ' ' + '\n '.join(str(self[i]) for i in range(len(self)))
return content + '\n[{} of size {}]'.format(torch.typename(self), len(self))
def __repr__(self):
return str(self)
def __iter__(self):
return iter(map(lambda i: self[i], range(self.size())))
def __copy__(self):
return self.clone()
def __deepcopy__(self, memo):
memo = memo.setdefault('torch', {})
if self._cdata in memo:
return memo[self._cdata]
new_storage = self.clone()
memo[self._cdata] = new_storage
return new_storage
def __reduce__(self):
return type(self), (self.tolist(),)
def __sizeof__(self):
return super(_StorageBase, self).__sizeof__() + self.element_size() * self.size()
def clone(self):
"""Returns a copy of this storage"""
return type(self)(self.size()).copy_(self)
def tolist(self):
"""Returns a list containing the elements of this storage"""
return [v for v in self]
def cpu(self):
"""Returns a CPU copy of this storage if it's not already on the CPU"""
return self.type(getattr(torch, self.__class__.__name__))
def double(self):
"""Casts this storage to double type"""
return self.type(type(self).__module__ + '.DoubleStorage')
def float(self):
"""Casts this storage to float type"""
return self.type(type(self).__module__ + '.FloatStorage')
def half(self):
"""Casts this storage to half type"""
return self.type(type(self).__module__ + '.HalfStorage')
def long(self):
"""Casts this storage to long type"""
return self.type(type(self).__module__ + '.LongStorage')
def int(self):
"""Casts this storage to int type"""
return self.type(type(self).__module__ + '.IntStorage')
def short(self):
"""Casts this storage to short type"""
return self.type(type(self).__module__ + '.ShortStorage')
def char(self):
"""Casts this storage to char type"""
return self.type(type(self).__module__ + '.CharStorage')
def byte(self):
"""Casts this storage to byte type"""
return self.type(type(self).__module__ + '.ByteStorage')
def pin_memory(self):
"""Copies the storage to pinned memory, if it's not already pinned."""
if self.is_cuda:
raise TypeError("cannot pin '{0}' only CPU memory can be pinned"
.format(self.type()))
import torch.cuda
allocator = torch.cuda._host_allocator()
return type(self)(self.size(), allocator=allocator).copy_(self)
def share_memory_(self):
"""Moves the storage to shared memory.
This is a no-op for storages already in shared memory and for CUDA
storages, which do not need to be moved for sharing across processes.
Storages in shared memory cannot be resized.
Returns: self
"""
from torch.multiprocessing import get_sharing_strategy
if self.is_cuda:
pass # CUDA doesn't use POSIX shared memory
elif get_sharing_strategy() == 'file_system':
self._share_filename_()
else:
self._share_fd_()
return self
@classmethod
def _new_shared(cls, size):
"""Creates a new storage in shared memory with the same data type"""
from torch.multiprocessing import get_sharing_strategy
if cls.is_cuda:
return cls(size)
elif get_sharing_strategy() == 'file_system':
return cls._new_using_filename(size)
else:
return cls._new_using_fd(size)
_StorageBase.type = _type
_StorageBase.cuda = _cuda | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/storage.py | 0.758332 | 0.236307 | storage.py | pypi |
import sys
import torch
import torch._C as _C
from collections import OrderedDict
import torch.utils.hooks as hooks
import warnings
import weakref
from torch._six import imap
from torch._C import _add_docstr
class Tensor(torch._C._TensorBase):
def __deepcopy__(self, memo):
if not self.is_leaf:
raise RuntimeError("Only Tensors created explicitly by the user "
"(graph leaves) support the deepcopy protocol at the moment")
if id(self) in memo:
return memo[id(self)]
with torch.no_grad():
if self.is_sparse:
new_tensor = self.clone()
else:
new_storage = self.storage().__deepcopy__(memo)
new_tensor = self.new()
new_tensor.set_(new_storage, self.storage_offset(), self.size(), self.stride())
memo[id(self)] = new_tensor
new_tensor.requires_grad = self.requires_grad
return new_tensor
def __reduce_ex__(self, proto):
args = (self.storage(),
self.storage_offset(),
tuple(self.size()),
self.stride(),
self.requires_grad,
self._backward_hooks)
return (torch._utils._rebuild_tensor_v2, args)
def __setstate__(self, state):
if not self.is_leaf:
raise RuntimeError('__setstate__ can be only called on leaf Tensors')
if len(state) == 4:
# legacy serialization of Tensor
self.set_(*state)
return
elif len(state) == 5:
# legacy serialization of Variable
self.data = state[0]
state = (state[3], state[4], state[2])
self.requires_grad, _, self._backward_hooks = state
def __repr__(self):
# All strings are unicode in Python 3, while we have to encode unicode
# strings in Python2. If we can't, let python decide the best
# characters to replace unicode characters with.
if sys.version_info > (3,):
return torch._tensor_str._str(self)
else:
if hasattr(sys.stdout, 'encoding'):
return torch._tensor_str._str(self).encode(
sys.stdout.encoding or 'UTF-8', 'replace')
else:
return torch._tensor_str._str(self).encode('UTF-8', 'replace')
def backward(self, gradient=None, retain_graph=None, create_graph=False):
r"""Computes the gradient of current tensor w.r.t. graph leaves.
The graph is differentiated using the chain rule. If the tensor is
non-scalar (i.e. its data has more than one element) and requires
gradient, the function additionally requires specifying ``gradient``.
It should be a tensor of matching type and location, that contains
the gradient of the differentiated function w.r.t. ``self``.
This function accumulates gradients in the leaves - you might need to
zero them before calling it.
Arguments:
gradient (Tensor or None): Gradient w.r.t. the
tensor. If it is a tensor, it will be automatically converted
to a Tensor that does not require grad unless ``create_graph`` is True.
None values can be specified for scalar Tensors or ones that
don't require grad. If a None value would be acceptable then
this argument is optional.
retain_graph (bool, optional): If ``False``, the graph used to compute
the grads will be freed. Note that in nearly all cases setting
this option to True is not needed and often can be worked around
in a much more efficient way. Defaults to the value of
``create_graph``.
create_graph (bool, optional): If ``True``, graph of the derivative will
be constructed, allowing to compute higher order derivative
products. Defaults to ``False``.
"""
torch.autograd.backward(self, gradient, retain_graph, create_graph)
def register_hook(self, hook):
r"""Registers a backward hook.
The hook will be called every time a gradient with respect to the
Tensor is computed. The hook should have the following signature::
hook(grad) -> Tensor or None
The hook should not modify its argument, but it can optionally return
a new gradient which will be used in place of :attr:`grad`.
This function returns a handle with a method ``handle.remove()``
that removes the hook from the module.
Example:
>>> v = torch.tensor([0., 0., 0.], requires_grad=True)
>>> h = v.register_hook(lambda grad: grad * 2) # double the gradient
>>> v.backward(torch.tensor([1., 2., 3.]))
>>> v.grad
2
4
6
[torch.FloatTensor of size (3,)]
>>> h.remove() # removes the hook
"""
if not self.requires_grad:
raise RuntimeError("cannot register a hook on a tensor that "
"doesn't require gradient")
if self._backward_hooks is None:
self._backward_hooks = OrderedDict()
if self.grad_fn is not None:
self.grad_fn._register_hook_dict(self)
handle = hooks.RemovableHandle(self._backward_hooks)
self._backward_hooks[handle.id] = hook
return handle
def reinforce(self, reward):
def trim(str):
return '\n'.join([line.strip() for line in str.split('\n')])
raise RuntimeError(trim(r"""reinforce() was removed.
Use torch.distributions instead.
See http://pytorch.org/docs/master/distributions.html
Instead of:
probs = policy_network(state)
action = probs.multinomial()
next_state, reward = env.step(action)
action.reinforce(reward)
action.backward()
Use:
probs = policy_network(state)
# NOTE: categorical is equivalent to what used to be called multinomial
m = torch.distributions.Categorical(probs)
action = m.sample()
next_state, reward = env.step(action)
loss = -m.log_prob(action) * reward
loss.backward()
"""))
detach = _add_docstr(_C._TensorBase.detach, r"""
Returns a new Tensor, detached from the current graph.
The result will never require gradient.
.. note::
Returned Tensor uses the same data tensor as the original one.
In-place modifications on either of them will be seen, and may trigger
errors in correctness checks.
""")
detach_ = _add_docstr(_C._TensorBase.detach_, r"""
Detaches the Tensor from the graph that created it, making it a leaf.
Views cannot be detached in-place.
""")
def retain_grad(self):
r"""Enables .grad attribute for non-leaf Tensors."""
if self.grad_fn is None: # no-op for leaves
return
if not self.requires_grad:
raise RuntimeError("can't retain_grad on Tensor that has requires_grad=False")
if hasattr(self, 'retains_grad'):
return
weak_self = weakref.ref(self)
def retain_grad_hook(grad):
var = weak_self()
if var is None:
return
if var._grad is None:
var._grad = grad.clone()
else:
var._grad = var._grad + grad
self.register_hook(retain_grad_hook)
self.retains_grad = True
def is_pinned(self):
r"""Returns true if this tensor resides in pinned memory"""
storage = self.storage()
return storage.is_pinned() if storage else False
def is_shared(self):
r"""Checks if tensor is in shared memory.
This is always ``True`` for CUDA tensors.
"""
return self.storage().is_shared()
def share_memory_(self):
r"""Moves the underlying storage to shared memory.
This is a no-op if the underlying storage is already in shared memory
and for CUDA tensors. Tensors in shared memory cannot be resized.
"""
self.storage().share_memory_()
return self
def view_as(self, tensor):
r"""view_as(other) -> Tensor
View this tensor as the same size as :attr:`other`.
``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same size
as :attr:`other.size()`.
"""
return self.view(tensor.size())
def argmax(self, dim=None, keepdim=False):
r"""See :func:`torch.argmax`"""
return torch.argmax(self, dim, keepdim)
def argmin(self, dim=None, keepdim=False):
r"""See :func:`torch.argmin`"""
return torch.argmin(self, dim, keepdim)
def btrifact(self, info=None, pivot=True):
r"""See :func:`torch.btrifact`
"""
if info is not None:
warnings.warn("info option in btrifact is deprecated and will be removed in v0.4, "
"consider using btrifact_with_info instead", stacklevel=2)
factorization, pivots, _info = super(Tensor, self).btrifact_with_info(pivot=pivot)
if info.type() != _info.type():
raise ValueError('btrifact expects info to be an IntTenor')
info.resize_as_(_info).copy_(_info)
return factorization, pivots
else:
return super(Tensor, self).btrifact(pivot=pivot)
def resize(self, *sizes):
warnings.warn("non-inplace resize is deprecated")
from torch.autograd._functions import Resize
return Resize.apply(self, sizes)
def resize_as(self, tensor):
warnings.warn("non-inplace resize_as is deprecated")
from torch.autograd._functions import Resize
return Resize.apply(self, tensor.size())
def split(self, split_size, dim=0):
r"""See :func:`torch.split`
"""
if isinstance(split_size, int):
return super(Tensor, self).split(split_size, dim)
else:
return super(Tensor, self).split_with_sizes(split_size, dim)
def index_add(self, dim, index, tensor):
return self.clone().index_add_(dim, index, tensor)
def index_copy(self, dim, index, tensor):
return self.clone().index_copy_(dim, index, tensor)
def index_fill(self, dim, index, value):
return self.clone().index_fill_(dim, index, value)
def scatter(self, dim, index, source):
return self.clone().scatter_(dim, index, source)
def scatter_add(self, dim, index, source):
return self.clone().scatter_add_(dim, index, source)
def masked_copy(self, mask, tensor):
warnings.warn("masked_copy is deprecated and renamed to masked_scatter, and will be removed in v0.3")
return self.masked_scatter(mask, tensor)
def masked_copy_(self, mask, tensor):
warnings.warn("masked_copy_ is deprecated and renamed to masked_scatter_, and will be removed in v0.3")
return self.masked_scatter_(mask, tensor)
def masked_scatter(self, mask, tensor):
return self.clone().masked_scatter_(mask, tensor)
def masked_fill(self, mask, value):
return self.clone().masked_fill_(mask, value)
def unique(self, sorted=False, return_inverse=False):
r"""Returns the unique scalar elements of the tensor as a 1-D tensor.
See :func:`torch.unique`
"""
output, inverse_indices = self._unique(
sorted=sorted, return_inverse=return_inverse)
if return_inverse:
return output, inverse_indices
else:
return output
def __rsub__(self, other):
return -self + other
def __rdiv__(self, other):
if self.dtype.is_floating_point:
return self.reciprocal() * other
else:
return (self.double().reciprocal() * other).type_as(self)
__rtruediv__ = __rdiv__
__itruediv__ = _C._TensorBase.__idiv__
__pow__ = _C._TensorBase.pow
def __format__(self, format_spec):
if self.dim() == 0:
return self.item().__format__(format_spec)
return object.__format__(self, format_spec)
def __ipow__(self, other):
raise NotImplementedError("in-place pow not implemented")
def __rpow__(self, other):
return self.new([other]) ** self
def __floordiv__(self, other):
result = self / other
if result.dtype.is_floating_point:
result = result.trunc()
return result
def __rfloordiv__(self, other):
result = other / self
if result.dtype.is_floating_point:
result = result.trunc()
return result
__neg__ = _C._TensorBase.neg
__eq__ = _C._TensorBase.eq
__ne__ = _C._TensorBase.ne
__lt__ = _C._TensorBase.lt
__le__ = _C._TensorBase.le
__gt__ = _C._TensorBase.gt
__ge__ = _C._TensorBase.ge
__abs__ = _C._TensorBase.abs
def __len__(self):
if self.dim() == 0:
raise TypeError("len() of a 0-d tensor")
return self.shape[0]
def __iter__(self):
# NB: we use 'imap' and not 'map' here, so that in Python 2 we get a
# generator and don't eagerly perform all the indexes. This could
# save us work, and also helps keep trace ordering deterministic
# (e.g., if you zip(*hiddens), the eager map will force all the
# indexes of hiddens[0] before hiddens[1], while the generator
# map will interleave them.)
if self.dim() == 0:
raise TypeError('iteration over a 0-d tensor')
return iter(imap(lambda i: self[i], range(self.size(0))))
def __hash__(self):
return id(self)
def __dir__(self):
tensor_methods = dir(self.__class__)
tensor_methods.remove('volatile') # deprecated
attrs = list(self.__dict__.keys())
keys = tensor_methods + attrs
return sorted(keys)
# Numpy array interface, to support `numpy.asarray(tensor) -> ndarray`
def __array__(self, dtype=None):
if dtype is None:
return self.cpu().numpy()
else:
return self.cpu().numpy().astype(dtype, copy=False)
# Wrap Numpy array again in a suitable tensor when done, to support e.g.
# `numpy.sin(tensor) -> tensor` or `numpy.greater(tensor, 0) -> ByteTensor`
def __array_wrap__(self, array):
if array.dtype == bool:
# Workaround, torch has no built-in bool tensor
array = array.astype('uint8')
return torch.from_numpy(array)
__module__ = 'torch' | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/tensor.py | 0.77569 | 0.337204 | tensor.py | pypi |
import torch
import importlib
import warnings
from collections import defaultdict
def _type(self, dtype=None, non_blocking=False, **kwargs):
"""Returns the type if `dtype` is not provided, else casts this object to
the specified type.
If this is already of the correct type, no copy is performed and the
original object is returned.
Args:
dtype (type or string): The desired type
non_blocking (bool): If ``True``, and the source is in pinned memory
and destination is on the GPU or vice versa, the copy is performed
asynchronously with respect to the host. Otherwise, the argument
has no effect.
**kwargs: For compatibility, may contain the key ``async`` in place of
the ``non_blocking`` argument. The ``async`` arg is deprecated.
"""
non_blocking = _get_async_or_non_blocking('type', non_blocking, kwargs)
if dtype is None:
return self.__module__ + '.' + self.__class__.__name__
if isinstance(dtype, str):
dtype = _import_dotted_name(dtype)
if dtype == type(self):
return self
if self.is_sparse:
if not dtype.is_sparse:
raise RuntimeError("Cannot cast sparse tensor to dense tensor")
new_module_name = dtype.__module__.replace('.sparse', '')
new_values_type_name = new_module_name + '.' + dtype.__name__
new_values = self._values().type(new_values_type_name, non_blocking)
new_indices_type_name = new_module_name + '.LongTensor'
new_indices = self._indices().type(new_indices_type_name, non_blocking)
return dtype(new_indices, new_values, self.size())
if dtype.is_sparse:
raise RuntimeError("Cannot cast dense tensor to sparse tensor")
return dtype(self.size()).copy_(self, non_blocking)
def _cuda(self, device=None, non_blocking=False, **kwargs):
"""Returns a copy of this object in CUDA memory.
If this object is already in CUDA memory and on the correct device, then
no copy is performed and the original object is returned.
Args:
device (int): The destination GPU id. Defaults to the current device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host. Otherwise,
the argument has no effect.
**kwargs: For compatibility, may contain the key ``async`` in place of
the ``non_blocking`` argument.
"""
non_blocking = _get_async_or_non_blocking('cuda', non_blocking, kwargs)
if self.is_cuda:
if device is None:
device = torch.cuda.current_device()
if self.get_device() == device:
return self
else:
if device is None:
device = -1
with torch.cuda.device(device):
if self.is_sparse:
new_type = getattr(torch.cuda.sparse, self.__class__.__name__)
indices = self._indices().cuda(device, non_blocking)
values = self._values().cuda(device, non_blocking)
return new_type(indices, values, self.size())
else:
new_type = getattr(torch.cuda, self.__class__.__name__)
return new_type(self.size()).copy_(self, non_blocking)
def _get_async_or_non_blocking(function_name, non_blocking, kwargs):
if not kwargs:
return non_blocking
if len(kwargs) != 1 or 'async' not in kwargs:
message = "{}() got an unexpected keyword argument '{}'"
argument = list(kwargs.keys()).pop()
raise TypeError(message.format(function_name, argument))
warnings.warn("'async' is deprecated; use 'non_blocking'")
return kwargs['async']
def _rebuild_tensor(storage, storage_offset, size, stride):
class_name = storage.__class__.__name__.replace('Storage', 'Tensor')
module = importlib.import_module(storage.__module__)
tensor_class = getattr(module, class_name)
return tensor_class().set_(storage, storage_offset, size, stride)
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = _rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
def _import_dotted_name(name):
components = name.split('.')
obj = __import__(components[0])
for component in components[1:]:
obj = getattr(obj, component)
return obj
# Taken from python 3.5 docs
def _accumulate(iterable, fn=lambda x, y: x + y):
'Return running totals'
# _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = fn(total, element)
yield total
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
return flat
def _flatten_sparse_tensors(tensors):
"""Flatten sparse tensors into two contiguous 1D buffers, one of indices and
one of values. Assume tensors are of same sparse type.
Arguments:
tensors (Iterable[Tensor]): sparse tensors to flatten.
Returns:
A tuple of two contiguous 1D buffers, one containing input tensors'
indices and the other containing the values.
"""
flat_indices = _flatten_dense_tensors([t._indices() for t in tensors])
flat_values = _flatten_dense_tensors([t._values() for t in tensors])
return flat_indices, flat_values
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
def _unflatten_sparse_tensors(flat, tensors):
"""View flat buffer (containing indices and values) using the sizes of
tensors. Assume that tensors are of same sparse type, and that flat is given
by _flatten_sparse_tensors.
Arguments:
flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
tensors to unflatten.
tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened sparse tensors with sizes same as tensors and values from
flat.
"""
flat_indices, flat_values = flat
indices = _unflatten_dense_tensors(flat_indices, [t._indices() for t in tensors])
values = _unflatten_dense_tensors(flat_values, [t._values() for t in tensors])
outputs = []
for t, i, v in zip(tensors, indices, values):
outputs.append(t.new(i, v, t.size()))
return tuple(outputs)
def _reorder_tensors_as(tensors, ordered_tensors):
"""Assume that tensors are of same order as ordered_tensors within their
types, e.g., from _take_tensors. Reorder them to be of same order as
ordered_tensors.
Arguments:
tensors (Iterable[Tensor]): tensors to be reordered. They should be of
the same order as ordered_tensors within their own types.
ordered_tensors (Iterable[Tensor]): tensors whose order will be the
reference.
Returns:
Ordered tuple of tensors with contents from tensors and order of
ordered_tensors.
"""
type_dict = defaultdict(list)
for tensor in tensors:
type_dict[tensor.type()].append(tensor)
type_dict = {t: iter(coll) for t, coll in type_dict.items()}
return tuple(next(type_dict[tensor.type()]) for tensor in ordered_tensors)
def _take_tensors(tensors, size_limit):
"""Group tensors into chunks. This generator yields a chunk at each time,
each containing tensors of same type up to certain byte limit in total size.
Args:
tensors (Sequence): A sequence of tensors to be separated into chunks.
size_limit (int): The limit of each chunk in bytes.
Yields:
Blocks of tensors of same type and within size_limit. The yielded
tensors are only ordered as the original sequence within its types.
"""
buf_dict = defaultdict(lambda: [[], 0])
for tensor in tensors:
t = tensor.type()
if tensor.is_sparse:
indices = tensor._indices()
values = tensor._values()
size = indices.numel() * indices.element_size() + values.numel() * values.element_size()
else:
size = tensor.numel() * tensor.element_size()
buf_and_size = buf_dict[t]
if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:
yield buf_and_size[0]
buf_and_size = buf_dict[t] = [[], 0]
buf_and_size[0].append(tensor)
buf_and_size[1] += size
for buf, _ in buf_dict.values():
if len(buf) > 0:
yield buf | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/_utils.py | 0.896206 | 0.358493 | _utils.py | pypi |
import warnings
import math
from operator import mul
from functools import reduce
import torch
from torch._C import _infer_size, _add_docstr
from . import _functions
from .modules import utils
from ._functions.padding import ConstantPadNd
from ._functions import vision
from ._functions.thnn.fold import Col2Im, Im2Col
from .modules.utils import _single, _pair, _triple
from . import grad
conv1d = _add_docstr(torch.conv1d, r"""
conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 1D convolution over an input signal composed of several input
planes.
See :class:`~torch.nn.Conv1d` for details and output shape.
Args:
input: input tensor of shape :math:`minibatch \times in\_channels \times iW`
weight: filters of shape :math:`out\_channels \times \frac{in\_channels}{groups} \times kW`
bias: optional bias of shape (:math:`out\_channels`). Default: ``None``
stride: the stride of the convolving kernel. Can be a single number or
a one-element tuple `(sW,)`. Default: 1
padding: implicit zero paddings on both sides of the input. Can be a
single number or a one-element tuple `(padW,)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a one-element tuple `(dW,)`. Default: 1
groups: split input into groups, :math:`in\_channels` should be divisible by
the number of groups. Default: 1
Examples::
>>> filters = torch.randn(33, 16, 3)
>>> inputs = torch.randn(20, 16, 50)
>>> F.conv1d(inputs, filters)
""")
conv2d = _add_docstr(torch.conv2d, r"""
conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 2D convolution over an input image composed of several input
planes.
See :class:`~torch.nn.Conv2d` for details and output shape.
Args:
input: input tensor of shape (:math:`minibatch \times in\_channels \times iH \times iW`)
weight: filters of shape (:math:`out\_channels \times \frac{in\_channels}{groups} \times kH \times kW`)
bias: optional bias tensor of shape (:math:`out\_channels`). Default: ``None``
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sH, sW)`. Default: 1
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dH, dW)`. Default: 1
groups: split input into groups, :math:`in\_channels` should be divisible by the
number of groups. Default: 1
Examples::
>>> # With square kernels and equal stride
>>> filters = torch.randn(8,4,3,3)
>>> inputs = torch.randn(1,4,5,5)
>>> F.conv2d(inputs, filters, padding=1)
""")
conv3d = _add_docstr(torch.conv3d, r"""
conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 3D convolution over an input image composed of several input
planes.
See :class:`~torch.nn.Conv3d` for details and output shape.
Args:
input: input tensor of shape (:math:`minibatch \times in\_channels \times iT \times iH \times iW`)
weight: filters of shape (:math:`out\_channels \times \frac{in\_channels}{groups} \times kT \times kH \times kW`)
bias: optional bias tensor of shape (:math:`out\_channels`). Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sT, sH, sW)`. Default: 1
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padT, padH, padW)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dT, dH, dW)`. Default: 1
groups: split input into groups, :math:`in\_channels` should be divisible by
the number of groups. Default: 1
Examples::
>>> filters = torch.randn(33, 16, 3, 3, 3)
>>> inputs = torch.randn(20, 16, 50, 10, 20)
>>> F.conv3d(inputs, filters)
""")
conv_transpose1d = _add_docstr(torch.conv_transpose1d, r"""
conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 1D transposed convolution operator over an input signal
composed of several input planes, sometimes also called "deconvolution".
See :class:`~torch.nn.ConvTranspose1d` for details and output shape.
Args:
input: input tensor of shape (:math:`minibatch \times in\_channels \times iW`)
weight: filters of shape (:math:`in\_channels \times \frac{out\_channels}{groups} \times kW`)
bias: optional bias of shape (:math:`out\_channels`). Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sW,)`. Default: 1
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padW,)`. Default: 0
output_padding: implicit zero-paddings of :math:`0 \leq padding < stride` on both
sides of the output. Can be a single number or a tuple `(out_padW,)`.
Default: 0
groups: split input into groups, :math:`in\_channels` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dW,)`. Default: 1
Examples::
>>> inputs = torch.randn(20, 16, 50)
>>> weights = torch.randn(16, 33, 5)
>>> F.conv_transpose1d(inputs, weights)
""")
conv_transpose2d = _add_docstr(torch.conv_transpose2d, r"""
conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 2D transposed convolution operator over an input image
composed of several input planes, sometimes also called "deconvolution".
See :class:`~torch.nn.ConvTranspose2d` for details and output shape.
Args:
input: input tensor of shape (:math:`minibatch \times in\_channels \times iH \times iW`)
weight: filters of shape (:math:`in\_channels \times \frac{out\_channels}{groups} \times kH \times kW`)
bias: optional bias of shape (:math:`out\_channels`). Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sH, sW)`. Default: 1
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
output_padding: implicit zero-paddings of :math:`0 \leq padding < stride` on both
sides of the output. Can be a single number or a tuple
`(out_padH, out_padW)`. Default: 0
groups: split input into groups, :math:`in\_channels` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dH, dW)`. Default: 1
Examples::
>>> # With square kernels and equal stride
>>> inputs = torch.randn(1, 4, 5, 5)
>>> weights = torch.randn(4, 8, 3, 3)
>>> F.conv_transpose2d(inputs, weights, padding=1)
""")
conv_transpose3d = _add_docstr(torch.conv_transpose3d, r"""
conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 3D transposed convolution operator over an input image
composed of several input planes, sometimes also called "deconvolution"
See :class:`~torch.nn.ConvTranspose3d` for details and output shape.
Args:
input: input tensor of shape (:math:`minibatch \times in\_channels \times iT \times iH \times iW`)
weight: filters of shape (:math:`in\_channels \times \frac{out\_channels}{groups} \times kT \times kH \times kW`)
bias: optional bias of shape (:math:`out\_channels`). Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sT, sH, sW)`. Default: 1
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padT, padH, padW)`. Default: 0
output_padding: implicit zero-paddings of `0 \leq padding < stride` on both
sides of the output. Can be a single number or a tuple
`(out_padT, out_padH, out_padW)`. Default: 0
groups: split input into groups, :math:`in\_channels` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dT, dH, dW)`. Default: 1
Examples::
>>> inputs = torch.randn(20, 16, 50, 10, 20)
>>> weights = torch.randn(16, 33, 3, 3, 3)
>>> F.conv_transpose3d(inputs, weights)
""")
def conv_tbc(input, weight, bias, pad=0):
r"""Applies a 1-dimensional sequence convolution over an input sequence.
Input and output dimensions are (Time, Batch, Channels) - hence TBC.
Args:
input: input tensor of shape (:math:`\text{sequence length} \times batch \times in\_channels`)
weight: filter of shape (:math:`\text{kernel width} \times in\_channels \times out\_channels`)
bias: bias of shape (:math:`out\_channels`)
pad: number of timesteps to pad
"""
return input.conv_tbc(weight, bias, pad)
# Pooling
def avg_pool1d(input, kernel_size, stride=None, padding=0,
ceil_mode=False, count_include_pad=True):
r"""Applies a 1D average pooling over an input signal composed of several
input planes.
See :class:`~torch.nn.AvgPool1d` for details and output shape.
Args:
input: input tensor of shape (:math:`minibatch \times in\_channels \times iW`)
kernel_size: the size of the window. Can be a single number or a
tuple `(kW,)`
stride: the stride of the window. Can be a single number or a tuple
`(sW,)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padW,)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` to compute the
output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
Example::
>>> # pool of square window of size=3, stride=2
>>> input = torch.tensor([[[1,2,3,4,5,6,7]]])
>>> F.avg_pool1d(input, kernel_size=3, stride=2)
tensor([[[ 2., 4., 6.]]])
"""
if input.dim() != 3:
raise ValueError('expected 3D input (got {} dimensions)'
.format(input.dim()))
kernel_size = _single(kernel_size) + (1,)
stride = _single(stride) + (1,) if stride is not None else kernel_size
padding = _single(padding) + (0,)
return avg_pool2d(input.unsqueeze(3), kernel_size, stride, padding,
ceil_mode, count_include_pad).squeeze(3)
avg_pool2d = _add_docstr(torch._C._nn.avg_pool2d, r"""
avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor
Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
:math:`sH \times sW` steps. The number of output features is equal to the number of
input planes.
See :class:`~torch.nn.AvgPool2d` for details and output shape.
Args:
input: input tensor (:math:`minibatch \times in\_channels \times iH \times iW`)
kernel_size: size of the pooling region. Can be a single number or a
tuple (:math:`kH \times kW`)
stride: stride of the pooling operation. Can be a single number or a
tuple `(sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
""")
avg_pool3d = _add_docstr(torch._C._nn.avg_pool3d, r"""
avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor
Applies 3D average-pooling operation in :math:`kT \times kH \times kW` regions by step
size :math:`sT \times sH \times sW` steps. The number of output features is equal to
:math:`\lfloor\frac{\text{input planes}}{sT}\rfloor`.
See :class:`~torch.nn.AvgPool3d` for details and output shape.
Args:
input: input tensor (:math:`minibatch \times in\_channels \times iT \times iH \times iW`)
kernel_size: size of the pooling region. Can be a single number or a
tuple (:math:`kT \times kH \times kW`)
stride: stride of the pooling operation. Can be a single number or a
tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padT, padH, padW)`, Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape
count_include_pad: when True, will include the zero-padding in the
averaging calculation
""")
def fractional_max_pool2d(input, kernel_size, output_size=None,
output_ratio=None, return_indices=False,
_random_samples=None):
r"""Applies 2D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number :math:`k` (for a square kernel of :math:`k \times k`)
or a tuple (:math:`kH \times kW`)
output_size: the target output size of the image of the form :math:`oH \times oW`.
Can be a tuple `(oH, oW)` or a single number :math:`oH` for a square image :math:`oH \times oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to `max_unpool2d`.
Examples::
>>> input = torch.randn(20, 16, 50, 32)
>>> # pool of square window of size=3, and target output size 13x12
>>> F.fractional_max_pool2d(input, 3, output_size=(13, 12))
>>> # pool of square window and target output size being half of input image size
>>> F.fractional_max_pool2d(input, 3, output_ratio=(0.5, 0.5))
.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
if output_size is None and output_ratio is None:
raise ValueError("fractional_max_pool2d requires specifying either "
"an output_size, or a output_ratio")
if output_size is None:
output_ratio = _pair(output_ratio)
output_size = (int(input.size(2) * output_ratio[0]),
int(input.size(3) * output_ratio[1]))
if _random_samples is None:
_random_samples = input.new(input.size(0), input.size(1), 2).uniform_()
ret = torch._C._nn.fractional_max_pool2d(input, kernel_size, output_size, _random_samples)
return ret if return_indices else ret[0]
def max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
r"""Applies a 1D max pooling over an input signal composed of several input
planes.
See :class:`~torch.nn.MaxPool1d` for details.
"""
ret = torch.max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode)
return ret if return_indices else ret[0]
def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
r"""Applies a 2D max pooling over an input signal composed of several input
planes.
See :class:`~torch.nn.MaxPool2d` for details.
"""
ret = torch._C._nn.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
return ret if return_indices else ret[0]
def max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
r"""Applies a 3D max pooling over an input signal composed of several input
planes.
See :class:`~torch.nn.MaxPool3d` for details.
"""
ret = torch._C._nn.max_pool3d(input, kernel_size, stride, padding, dilation, ceil_mode)
return ret if return_indices else ret[0]
def _unpool_output_size(input, kernel_size, stride, padding, output_size):
input_size = input.size()
default_size = []
for d in range(len(kernel_size)):
default_size.append((input_size[d + 2] - 1) * stride[d] +
kernel_size[d] - 2 * padding[d])
if output_size is None:
return default_size
output_size = list(output_size)
if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
if len(output_size) != len(kernel_size):
raise ValueError("output_size should be a sequence containing "
"{} or {} elements, but it has a length of '{}'"
.format(len(kernel_size), len(kernel_size) + 2,
len(output_size)))
for d in range(len(kernel_size)):
min_size = default_size[d] - stride[d]
max_size = default_size[d] + stride[d]
if not (min_size < output_size[d] < max_size):
raise ValueError(
'invalid output_size "{}" (dim {} must be between {} and {})'
.format(output_size, d, min_size, max_size))
return output_size
def max_unpool1d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
r"""Computes a partial inverse of :class:`MaxPool1d`.
See :class:`~torch.nn.MaxUnpool1d` for details.
"""
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
output_size = _unpool_output_size(input, kernel_size, stride, padding,
output_size)
return torch._C._nn.max_unpool2d(input.unsqueeze(3), indices.unsqueeze(3), output_size + [1]).squeeze(3)
def max_unpool2d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
r"""Computes a partial inverse of :class:`MaxPool2d`.
See :class:`~torch.nn.MaxUnpool2d` for details.
"""
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
output_size = _unpool_output_size(input, kernel_size, stride, padding,
output_size)
return torch._C._nn.max_unpool2d(input, indices, output_size)
def max_unpool3d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
r"""Computes a partial inverse of :class:`MaxPool3d`.
See :class:`~torch.nn.MaxUnpool3d` for details.
"""
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
output_size = _unpool_output_size(input, kernel_size, stride, padding,
output_size)
return torch._C._nn.max_unpool3d(input, indices, output_size, stride, padding)
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
r"""Applies a 2D power-average pooling over an input signal composed of
several input planes. If the sum of all inputs to the power of `p` is
zero, the gradient is set to zero as well.
See :class:`~torch.nn.LPPool2d` for details.
"""
kw, kh = utils._pair(kernel_size)
out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1. / norm_type)
def lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
r"""Applies a 1D power-average pooling over an input signal composed of
several input planes. If the sum of all inputs to the power of `p` is
zero, the gradient is set to zero as well.
See :class:`~torch.nn.LPPool1d` for details.
"""
out = avg_pool1d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
return (torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1. / norm_type)
def adaptive_max_pool1d(input, output_size, return_indices=False):
r"""Applies a 1D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
return_indices: whether to return pooling indices. Default: ``False``
"""
ret = torch.adaptive_max_pool1d(input, output_size)
return ret if return_indices else ret[0]
def adaptive_max_pool2d(input, output_size, return_indices=False):
r"""Applies a 2D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
return_indices: whether to return pooling indices. Default: ``False``
"""
ret = torch._C._nn.adaptive_max_pool2d(input, output_size)
return ret if return_indices else ret[0]
def adaptive_max_pool3d(input, output_size, return_indices=False):
r"""Applies a 3D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool3d` for details and output shape.
Args:
output_size: the target output size (single integer or
triple-integer tuple)
return_indices: whether to return pooling indices. Default: ``False``
"""
ret = torch._C._nn.adaptive_max_pool3d(input, output_size)
return ret if return_indices else ret[0]
adaptive_avg_pool1d = _add_docstr(torch.adaptive_avg_pool1d, r"""
adaptive_avg_pool1d(input, output_size) -> Tensor
Applies a 1D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
""")
adaptive_avg_pool2d = _add_docstr(torch._C._nn.adaptive_avg_pool2d, r"""
adaptive_avg_pool2d(input, output_size) -> Tensor
Applies a 2D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
""")
adaptive_avg_pool3d = _add_docstr(torch._C._nn.adaptive_avg_pool3d, r"""
adaptive_avg_pool3d(input, output_size) -> Tensor
Applies a 3D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool3d` for details and output shape.
Args:
output_size: the target output size (single integer or
triple-integer tuple)
""")
# Activation functions
def dropout(input, p=0.5, training=False, inplace=False):
return _functions.dropout.Dropout.apply(input, p, training, inplace)
def alpha_dropout(input, p=0.5, training=False):
r"""Applies alpha dropout to the input.
See :class:`~torch.nn.AlphaDropout` for details.
Args:
p (float, optional): the drop probability. Default: 0.5
training (bool, optional): switch between training and evaluation mode. Default: ``False``
"""
if p < 0 or p > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(p))
if p == 0 or not training:
return input
alpha = -1.7580993408473766
keep_prob = 1 - p
# TODO avoid casting to byte after resize
noise = input.data.new().resize_(input.size())
noise.bernoulli_(p)
noise = noise.byte()
output = input.masked_fill(noise, alpha)
a = (keep_prob + alpha ** 2 * keep_prob * (1 - keep_prob)) ** (-0.5)
b = -a * alpha * (1 - keep_prob)
return output.mul_(a).add_(b)
def dropout2d(input, p=0.5, training=False, inplace=False):
return _functions.dropout.FeatureDropout.apply(input, p, training, inplace)
def dropout3d(input, p=0.5, training=False, inplace=False):
return _functions.dropout.FeatureDropout.apply(input, p, training, inplace)
def threshold(input, threshold, value, inplace=False):
r"""Thresholds each element of the input Tensor.
See :class:`~torch.nn.Threshold` for more details.
"""
if inplace:
return torch._C._nn.threshold_(input, threshold, value)
return torch._C._nn.threshold(input, threshold, value)
threshold_ = _add_docstr(torch._C._nn.threshold_, r"""
threshold_(input, threshold, value) -> Tensor
In-place version of :func:`~threshold`.
""")
def relu(input, inplace=False):
r"""relu(input, inplace=False) -> Tensor
Applies the rectified linear unit function element-wise. See
:class:`~torch.nn.ReLU` for more details.
"""
if inplace:
return torch.relu_(input)
return torch.relu(input)
relu_ = _add_docstr(torch.relu_, r"""
relu_(input) -> Tensor
In-place version of :func:`~relu`.
""")
def glu(input, dim=-1):
r"""
glu(input, dim=-1) -> Tensor
The gated linear unit. Computes:
.. math ::
H = A \times \sigma(B)
where `input` is split in half along `dim` to form `A` and `B`.
See `Language Modeling with Gated Convolutional Networks <https://arxiv.org/abs/1612.08083>`_.
Args:
input (Tensor): input tensor
dim (int): dimension on which to split the input
"""
if input.dim() == 0:
raise RuntimeError("glu does not suppport scalars because halving size must be even")
return torch._C._nn.glu(input, dim)
def hardtanh(input, min_val=-1., max_val=1., inplace=False):
r"""
hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor
Applies the HardTanh function element-wise. See :class:`~torch.nn.Hardtanh` for more
details.
"""
if inplace:
return torch._C._nn.hardtanh_(input, min_val, max_val)
return torch._C._nn.hardtanh(input, min_val, max_val)
hardtanh_ = _add_docstr(torch._C._nn.hardtanh_, r"""
hardtanh_(input, min_val=-1., max_val=1.) -> Tensor
In-place version of :func:`~hardtanh`.
""")
def relu6(input, inplace=False):
r"""relu6(input, inplace=False) -> Tensor
Applies the element-wise function :math:`\text{ReLU6}(x) = \min(\max(0,x), 6)`.
See :class:`~torch.nn.ReLU6` for more details.
"""
return hardtanh(input, 0, 6, inplace)
def elu(input, alpha=1., inplace=False):
r"""Applies element-wise,
:math:`\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))`.
See :class:`~torch.nn.ELU` for more details.
"""
if inplace:
return torch._C._nn.elu_(input, alpha)
return torch._C._nn.elu(input, alpha)
elu_ = _add_docstr(torch._C._nn.elu_, r"""
elu_(input, alpha=1.) -> Tensor
In-place version of :func:`~elu`.
""")
def selu(input, inplace=False):
r"""selu(input, inplace=False) -> Tensor
Applies element-wise,
:math:`\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))`,
with :math:`\alpha=1.6732632423543772848170429916717` and
:math:`scale=1.0507009873554804934193349852946`.
See :class:`~torch.nn.SELU` for more details.
"""
if inplace:
return torch.selu_(input)
return torch.selu(input)
selu_ = _add_docstr(torch.selu_, r"""
selu_(input) -> Tensor
In-place version of :func:`~selu`.
""")
def leaky_relu(input, negative_slope=0.01, inplace=False):
r"""
leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor
Applies element-wise,
:math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative_slope} * \min(0, x)`
See :class:`~torch.nn.LeakyReLU` for more details.
"""
if inplace:
return torch._C._nn.leaky_relu_(input, negative_slope)
return torch._C._nn.leaky_relu(input, negative_slope)
leaky_relu_ = _add_docstr(torch._C._nn.leaky_relu_, r"""
leaky_relu_(input, negative_slope=0.01) -> Tensor
In-place version of :func:`~leaky_relu`.
""")
prelu = _add_docstr(torch._C._nn.prelu, r"""
prelu(input, weight) -> Tensor
Applies element-wise the function
:math:`\text{PReLU}(x) = \max(0,x) + \text{weight} * \min(0,x)` where weight is a
learnable parameter.
See :class:`~torch.nn.PReLU` for more details.
""")
def rrelu(input, lower=1. / 8, upper=1. / 3, training=False, inplace=False):
r"""rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) -> Tensor
Randomized leaky ReLU.
See :class:`~torch.nn.RReLU` for more details.
"""
if inplace:
return torch.rrelu_(input, lower, upper, training)
return torch.rrelu(input, lower, upper, training)
rrelu_ = _add_docstr(torch.rrelu_, r"""
rrelu_(input, lower=1./8, upper=1./3, training=False) -> Tensor
In-place version of :func:`~rrelu`.
""")
logsigmoid = _add_docstr(torch._C._nn.log_sigmoid, r"""
logsigmoid(input) -> Tensor
Applies element-wise :math:`\text{LogSigmoid}(x) = \log \left(\frac{1}{1 + \exp(-x_i)}\right)`
See :class:`~torch.nn.LogSigmoid` for more details.
""")
hardshrink = _add_docstr(torch._C._nn.hardshrink, r"""
hardshrink(input, lambd=0.5) -> Tensor
Applies the hard shrinkage function element-wise
See :class:`~torch.nn.Hardshrink` for more details.
""")
def tanhshrink(input):
r"""tanhshrink(input) -> Tensor
Applies element-wise, :math:`\text{Tanhshrink}(x) = x - \text{Tanh}(x)`
See :class:`~torch.nn.Tanhshrink` for more details.
"""
return input - input.tanh()
def softsign(input):
r"""softsign(input) -> Tensor
Applies element-wise, the function :math:`\text{SoftSign}(x) = \frac{x}{1 + |x|}`
See :class:`~torch.nn.Softsign` for more details.
"""
return input / (input.abs() + 1)
softplus = _add_docstr(torch._C._nn.softplus, r"""
softplus(input, beta=1, threshold=20) -> Tensor
""")
def _get_softmax_dim(name, ndim, stacklevel):
warnings.warn("Implicit dimension choice for " + name + " has been deprecated. "
"Change the call to include dim=X as an argument.", stacklevel=stacklevel)
if ndim == 0 or ndim == 1 or ndim == 3:
return 0
else:
return 1
def softmin(input, dim=None, _stacklevel=3):
r"""Applies a softmin function.
Note that :math:`\text{Softmin}(x) = \text{Softmax}(-x)`. See softmax definition for mathematical formula.
See :class:`~torch.nn.Softmin` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmin will be computed (so every slice
along dim will sum to 1).
"""
if dim is None:
dim = _get_softmax_dim('softmin', input.dim(), _stacklevel)
return -input.softmax(dim)
def softmax(input, dim=None, _stacklevel=3):
r"""Applies a softmax function.
Softmax is defined as:
:math:`\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `(0, 1)` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if dim is None:
dim = _get_softmax_dim('softmax', input.dim(), _stacklevel)
return input.softmax(dim)
def _sample_gumbel(shape, eps=1e-10, out=None):
"""
Sample from Gumbel(0, 1)
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = out.resize_(shape).uniform_() if out is not None else torch.rand(shape)
return - torch.log(eps - torch.log(U + eps))
def _gumbel_softmax_sample(logits, tau=1, eps=1e-10):
"""
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
dims = logits.dim()
gumbel_noise = _sample_gumbel(logits.size(), eps=eps, out=logits.data.new())
y = logits + gumbel_noise
return softmax(y / tau, dims - 1)
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
"""
Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: `[batch_size, n_class]` unnormalized log-probs
tau: non-negative scalar temperature
hard: if ``True``, take `argmax`, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probability distribution that sums to 1 across classes
Constraints:
- this implementation only works on batch_size x num_features tensor for now
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
shape = logits.size()
assert len(shape) == 2
y_soft = _gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
_, k = y_soft.max(-1)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = logits.new_zeros(*shape).scatter_(-1, k.view(-1, 1), 1.0)
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
y = y_hard - y_soft.detach() + y_soft
else:
y = y_soft
return y
def log_softmax(input, dim=None, _stacklevel=3):
r"""Applies a softmax followed by a logarithm.
While mathematically equivalent to log(softmax(x)), doing these two
operations separately is slower, and numerically unstable. This function
uses an alternative formulation to compute the output and gradient correctly.
See :class:`~torch.nn.LogSoftmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which log_softmax will be computed.
"""
if dim is None:
dim = _get_softmax_dim('log_softmax', input.dim(), _stacklevel)
return input.log_softmax(dim)
softshrink = _add_docstr(torch._C._nn.softshrink, r"""
softshrink(input, lambd=0.5) -> Tensor
Applies the soft shrinkage function elementwise
See :class:`~torch.nn.Softshrink` for more details.
""")
def tanh(input):
r"""tanh(input) -> Tensor
Applies element-wise,
:math:`\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)}{\exp(x) + \exp(-x)}`
See :class:`~torch.nn.Tanh` for more details.
"""
return input.tanh()
def sigmoid(input):
r"""sigmoid(input) -> Tensor
Applies the element-wise function :math:`\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}`
See :class:`~torch.nn.Sigmoid` for more details.
"""
return input.sigmoid()
# etc.
def linear(input, weight, bias=None):
"""
Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
Shape:
- Input: :math:`(N, *, in\_features)` where `*` means any number of
additional dimensions
- Weight: :math:`(out\_features, in\_features)`
- Bias: :math:`(out\_features)`
- Output: :math:`(N, *, out\_features)`
"""
if input.dim() == 2 and bias is not None:
# fused op is marginally faster
return torch.addmm(bias, input, weight.t())
output = input.matmul(weight.t())
if bias is not None:
output += bias
return output
def bilinear(input1, input2, weight, bias=None):
return torch.bilinear(input1, input2, weight, bias)
def embedding(input, weight, padding_idx=None, max_norm=None, norm_type=2,
scale_grad_by_freq=False, sparse=False):
r"""A simple lookup table that looks up embeddings in a fixed dictionary and size.
This module is often used to retrieve word embeddings using indices.
The input to the module is a list of indices, and the embedding matrix,
and the output is the corresponding word embeddings.
Args:
input: tensor, containing indices into the embedding matrix
weight:
Number of rows should correspond to the maximum possible index + 1,
number of columns is the embedding size
padding_idx (int, optional): Entries at the given index do not contribute to the gradient
max_norm (float, optional): If given, will renormalize the embeddings to always have a norm lesser than this
norm_type (float, optional): The p of the p-norm to compute for the max_norm option
scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the frequency of
the words in the mini-batch.
sparse (boolean, optional): if ``True``, gradient w.r.t. weight matrix will be a sparse tensor. See Notes for
more details regarding sparse gradients.
Shape:
- Input: LongTensor `(N, W)`, N = mini-batch, W = number of indices to extract per mini-batch
- Embedding_matrix: FloatTensor `(V, embedding_dim)`, V = maximum index + 1, embedding_dim = embedding size
- Output: `(N, W, embedding_dim)`
Notes:
It is advised to only use `sparse=True` if `embedding_matrix` is a leaf Tensor,
since some autograd functions may not propagate sparse gradients correctly.
Additionally, keep in mind that only a limited number of optimizers support
sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`), and :class:`optim.Adagrad` (`CPU`)
Examples::
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.tensor([[1,2,4,5],[4,3,2,9]])
>>> # an embedding matrix containing 10 tensors of size 3
>>> embedding_matrix = torch.rand(10, 3)
>>> F.embedding(input, embedding_matrix)
tensor([[[ 0.8490, 0.9625, 0.6753],
[ 0.9666, 0.7761, 0.6108],
[ 0.6246, 0.9751, 0.3618],
[ 0.4161, 0.2419, 0.7383]],
[[ 0.6246, 0.9751, 0.3618],
[ 0.0237, 0.7794, 0.0528],
[ 0.9666, 0.7761, 0.6108],
[ 0.3385, 0.8612, 0.1867]]])
>>> # example with padding_idx
>>> weights = torch.rand(10, 3)
>>> weights[0, :].zero_()
>>> embedding_matrix = weights
>>> input = torch.tensor([[0,2,0,5]])
>>> F.embedding(input, embedding_matrix, padding_idx=0)
tensor([[[ 0.0000, 0.0000, 0.0000],
[ 0.5609, 0.5384, 0.8720],
[ 0.0000, 0.0000, 0.0000],
[ 0.6262, 0.2438, 0.7471]]])
"""
input = input.contiguous()
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < weight.size(0), 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -weight.size(0), 'Padding_idx must be within num_embeddings'
padding_idx = weight.size(0) + padding_idx
elif padding_idx is None:
padding_idx = -1
if max_norm is not None:
with torch.no_grad():
torch.embedding_renorm_(weight, input, max_norm, norm_type)
return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
def embedding_bag(embedding_matrix, indices, offsets=None,
max_norm=None, norm_type=2, scale_grad_by_freq=False, mode='mean', sparse=False):
r"""Computes sums or means of 'bags' of embeddings, without instantiating the
intermediate embeddings.
For bags of constant length,
* :func:`embedding_bag` with `mode=sum` is equivalent to :func:`nn.functional.embedding` followed by
``torch.sum(dim=1)``
* with `mode=mean` is equivalent to :func:`nn.functional.embedding` followed by ``torch.mean(dim=1)``
* with `mode=max` is equivalent to :func:`nn.functional.embedding` followed by ``torch.max(dim=1)``
However, :func:`embedding_bag` is much more time and memory efficient than using a chain of these
operations.
Args:
embedding_matrix: FloatTensor, where number of rows should correspond to the maximum possible index + 1,
number of columns is the embedding size
indices (N or BxN): LongTensor containing the indices of the embeddings to extract.
When `input` is 1D Tensor of shape `N`, an `offsets` Tensor is given, that contains the
starting position of each new sequence in the mini-batch.
offsets (B or None): LongTensor containing the starting positions of each sample in a mini-batch of variable
length sequences. If `input` is 2D (BxN), then offsets does not need to be given,
as the `input` is treated as a mini-batch of fixed length sequences of length `N` each.
max_norm (float, optional): If given, will renormalize the embeddings to always have a norm lesser than this
norm_type (float, optional): The p of the p-norm to compute for the max_norm option
scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the frequency of
the words in the dictionary.
mode (string, optional): 'sum' | 'mean' | 'max'. Specifies the way to reduce the bag. Default: 'mean'
sparse (boolean, optional): if ``True``, gradient w.r.t. weight matrix will be a sparse tensor. See Notes
for more details regarding sparse gradients.
Shape:
- Embedding_matrix: FloatTensor `(V, embedding_dim)`,
V = number of embeddings, embedding_dim = embedding size
- Input: LongTensor `N`, N = number of embeddings to extract
(or) LongTensor `BxN`, B = number of sequences in mini-batch,
N = number of embeddings per sequence
- Offsets: LongTensor `B`, B = number of bags. The values are the
offsets in `input` for each bag, i.e. the cumsum of lengths.
Offsets is not given if Input is 2D `BxN` Tensor,
the input is considered to be of fixed-length sequences
- Output: `(B, embedding_dim)`
Examples::
>>> # an Embedding module containing 10 tensors of size 3
>>> embedding_matrix = torch.rand(10, 3)
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.tensor([1,2,4,5,4,3,2,9])
>>> offsets = torch.tensor([0,4])
>>> F.embedding_bag(embedding_matrix, input, offsets)
tensor([[ 0.3397, 0.3552, 0.5545],
[ 0.5893, 0.4386, 0.5882]])
"""
if indices.dim() == 2:
if offsets is not None:
raise ValueError("if input is 2D, then offsets has to be None"
", as input is treated is a mini-batch of"
" fixed length sequences. However, found "
"offsets of type {}".format(type(offsets)))
else:
offsets = torch.arange(0, indices.numel(), indices.size(1),
dtype=torch.long, device=indices.device)
indices = indices.view(-1)
elif indices.dim() == 1:
if offsets is None:
raise ValueError("offsets has to be a 1D Tensor but got None")
if offsets.dim() != 1:
raise ValueError("offsets has to be a 1D Tensor")
if offsets[0] != 0:
raise ValueError("offsets[0] has to be 0, i.e. the first sequence"
" in the mini-batch has to start from position 0."
"However, got {}".format(offsets[0]))
if offsets[-1] > indices.size(0):
raise ValueError("offsets[-1] has to be smaller than indices's length"
" ({}), but got offsets[-1] of {}"
.format(indices.size(0), offsets[-1]))
else:
raise ValueError("input has to be 1D or 2D Tensor,"
" but got Tensor of dimension {}".format(indices.dim()))
if mode == 'sum':
mode = 0
elif mode == 'mean':
mode = 1
elif mode == 'max':
mode = 2
if scale_grad_by_freq:
raise ValueError("max mode does not support scaling the gradient by the frequency")
if sparse:
raise ValueError("max mode does not support sparse weights")
else:
raise ValueError("mode has to be one of sum or mean")
if max_norm is not None:
with torch.no_grad():
torch.embedding_renorm_(weight, input, max_norm, norm_type)
ret, _, _, _ = torch.embedding_bag(
embedding_matrix,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse)
return ret
def batch_norm(input, running_mean, running_var, weight=None, bias=None,
training=False, momentum=0.1, eps=1e-5):
r"""Applies Batch Normalization for each channel across a batch of data.
See :class:`~torch.nn.BatchNorm1d`, :class:`~torch.nn.BatchNorm2d`,
:class:`~torch.nn.BatchNorm3d` for details.
"""
if training:
size = list(input.size())
if reduce(mul, size[2:], size[0]) == 1:
raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size))
return torch.batch_norm(
input, weight, bias, running_mean, running_var,
training, momentum, eps, torch.backends.cudnn.enabled
)
def instance_norm(input, running_mean=None, running_var=None, weight=None,
bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
r"""Applies Instance Normalization for each channel in each data sample in a
batch.
See :class:`~torch.nn.InstanceNorm1d`, :class:`~torch.nn.InstanceNorm2d`,
:class:`~torch.nn.InstanceNorm3d` for details.
"""
if not use_input_stats and (running_mean is None or running_var is None):
raise ValueError('Expected running_mean and running_var to be not None when use_input_stats=False')
b, c = input.size(0), input.size(1)
if weight is not None:
weight = weight.repeat(b)
if bias is not None:
bias = bias.repeat(b)
import torch.onnx.symbolic
@torch.onnx.symbolic_override_first_arg_based(torch.onnx.symbolic.instance_norm)
def _instance_norm(input, running_mean=None, running_var=None, weight=None,
bias=None, use_input_stats=None, momentum=None, eps=None):
# Repeat stored stats and affine transform params if necessary
if running_mean is not None:
running_mean_orig = running_mean
running_mean = running_mean_orig.repeat(b)
if running_var is not None:
running_var_orig = running_var
running_var = running_var_orig.repeat(b)
# Apply instance norm
input_reshaped = input.contiguous().view(1, b * c, *input.size()[2:])
out = batch_norm(
input_reshaped, running_mean, running_var, weight=weight, bias=bias,
training=use_input_stats, momentum=momentum, eps=eps)
# Reshape and copy back
if running_mean is not None:
running_mean_orig.copy_(running_mean.view(b, c).mean(0, keepdim=False))
if running_var is not None:
running_var_orig.copy_(running_var.view(b, c).mean(0, keepdim=False))
return out.view(b, c, *input.size()[2:])
return _instance_norm(input, running_mean=running_mean,
running_var=running_var, weight=weight, bias=bias,
use_input_stats=use_input_stats, momentum=momentum,
eps=eps)
def layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-5):
r"""Applies Layer Normalization for last certain number of dimensions.
See :class:`~torch.nn.LayerNorm` for details.
"""
return torch.layer_norm(input, normalized_shape, weight, bias, eps,
torch.backends.cudnn.enabled)
def group_norm(input, num_groups, weight=None, bias=None, eps=1e-5):
r"""Applies Group Normalization for last certain number of dimensions.
See :class:`~torch.nn.GroupNorm` for details.
"""
return torch.group_norm(input, num_groups, weight, bias, eps,
torch.backends.cudnn.enabled)
def local_response_norm(input, size, alpha=1e-4, beta=0.75, k=1):
r"""Applies local response normalization over an input signal composed of
several input planes, where channels occupy the second dimension.
Applies normalization across channels.
See :class:`~torch.nn.LocalResponseNorm` for details.
"""
dim = input.dim()
if dim < 3:
raise ValueError('Expected 3D or higher dimensionality \
input (got {} dimensions)'.format(dim))
div = input.mul(input).unsqueeze(1)
if dim == 3:
div = pad(div, (0, 0, size // 2, (size - 1) // 2))
div = avg_pool2d(div, (size, 1), stride=1).squeeze(1)
else:
sizes = input.size()
div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)
div = pad(div, (0, 0, 0, 0, size // 2, (size - 1) // 2))
div = avg_pool3d(div, (size, 1, 1), stride=1).squeeze(1)
div = div.view(sizes)
div = div.mul(alpha).add(k).pow(beta)
return input / div
# loss
def nll_loss(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True):
r"""The negative log likelihood loss.
See :class:`~torch.nn.NLLLoss` for details.
Args:
input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K > 1`
in the case of K-dimensional loss.
target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`,
or :math:`(N, d_1, d_2, ..., d_K)` where :math:`K \geq 1` for
K-dimensional loss.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. If :attr:`size_average`
is ``False``, the losses are summed for each minibatch. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When :attr:`size_average` is
``True``, the loss is averaged over non-ignored targets. Default: -100
Example::
>>> # input is of size N x C = 3 x 5
>>> input = torch.randn(3, 5, requires_grad=True)
>>> # each element in target has to have 0 <= value < C
>>> target = torch.tensor([1, 0, 4])
>>> output = F.nll_loss(F.log_softmax(input), target)
>>> output.backward()
"""
dim = input.dim()
if dim < 2:
raise ValueError('Expected 2 or more dimensions (got {})'.format(dim))
if input.size(0) != target.size(0):
raise ValueError('Expected input batch_size ({}) to match target batch_size ({}).'
.format(input.size(0), target.size(0)))
if dim == 2:
return torch._C._nn.nll_loss(input, target, weight, size_average, ignore_index, reduce)
elif dim == 4:
return torch._C._nn.nll_loss2d(input, target, weight, size_average, ignore_index, reduce)
elif dim == 3 or dim > 4:
n = input.size(0)
c = input.size(1)
out_size = (n,) + input.size()[2:]
if target.size()[1:] != input.size()[2:]:
raise ValueError('Expected target size {}, got {}'.format(
out_size, target.size()))
input = input.contiguous().view(n, c, 1, -1)
target = target.contiguous().view(n, 1, -1)
if reduce:
return torch._C._nn.nll_loss2d(input, target, weight, size_average, ignore_index, reduce)
out = torch._C._nn.nll_loss2d(input, target, weight, size_average, ignore_index, reduce)
return out.view(out_size)
def poisson_nll_loss(input, target, log_input=True, full=False, size_average=True, eps=1e-8, reduce=True):
r"""Poisson negative log likelihood loss.
See :class:`~torch.nn.PoissonNLLLoss` for details.
Args:
input: expectation of underlying Poisson distribution.
target: random sample :math:`target \sim \text{Poisson}(input)`.
log_input: if ``True`` the loss is computed as
:math:`\exp(\text{input}) - \text{target} * \text{input}`, if ``False`` then loss is
:math:`\text{input} - \text{target} * \log(\text{input}+\text{eps})`. Default: ``True``
full: whether to compute full loss, i. e. to add the Stirling
approximation term. Default: ``False``
:math:`\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})`.
size_average: By default, the losses are averaged over observations for
each minibatch. However, if the field :attr:`size_average` is set to ``False``,
the losses are instead summed for each minibatch. Default: ``True``
eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when
:attr:`log_input`=``False``. Default: 1e-8
reduce (bool, optional): By default, the losses are averaged
over observations for each minibatch, or summed, depending on
:attr:`size_average`. When reduce is ``False``, returns a loss per batch
instead and ignores :attr:`size_average`. Default: ``True``
"""
if log_input:
loss = torch.exp(input) - target * input
else:
loss = input - target * torch.log(input + eps)
if full:
mask = target > 1
loss[mask] += (target * torch.log(target) - target + 0.5 * torch.log(2 * math.pi * target))[mask]
if not reduce:
return loss
if size_average:
return torch.mean(loss)
return torch.sum(loss)
kl_div = _add_docstr(torch._C._nn.kl_div, r"""
kl_div(input, target, size_average=True) -> Tensor
The `Kullback-Leibler divergence`_ Loss.
See :class:`~torch.nn.KLDivLoss` for details.
Args:
input: Tensor of arbitrary shape
target: Tensor of the same shape as input
size_average: if ``True`` the output is divided by the number of elements
in input tensor. Default: ``True``
reduce (bool, optional): By default, the losses are averaged
over observations for each minibatch, or summed, depending on
size_average. When reduce is ``False``, returns a loss per input/target
element instead and ignores :attr:`size_average`. Default: ``True``
""")
def cross_entropy(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True):
r"""This criterion combines `log_softmax` and `nll_loss` in a single
function.
See :class:`~torch.nn.CrossEntropyLoss` for details.
Args:
input (Tensor) : :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K > 1`
in the case of K-dimensional loss.
target (Tensor) : :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`,
or :math:`(N, d_1, d_2, ..., d_K)` where :math:`K \geq 1` for
K-dimensional loss.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
:attr:`size_average` is set to ``False``, the losses are instead summed
for each minibatch. Ignored if :attr:`reduce` is ``False``. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When :attr:`size_average` is
``True``, the loss is averaged over non-ignored targets. Default: -100
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce`
is ``False``, returns a loss per batch instead and ignores
:attr:`size_average`. Default: ``True``
Examples::
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randint(5, (3,), dtype=torch.int64)
>>> loss = F.cross_entropy(input, target)
>>> loss.backward()
"""
return nll_loss(log_softmax(input, 1), target, weight, size_average, ignore_index, reduce)
def binary_cross_entropy(input, target, weight=None, size_average=True, reduce=True):
r"""Function that measures the Binary Cross Entropy
between the target and the output.
See :class:`~torch.nn.BCELoss` for details.
Args:
input: Tensor of arbitrary shape
target: Tensor of the same shape as input
weight (Tensor, optional): a manual rescaling weight
if provided it's repeated to match input tensor shape
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
:attr:`size_average` is set to ``False``, the losses are instead summed
for each minibatch. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce`
is ``False``, returns a loss per input/target element instead and ignores
:attr:`size_average`. Default: ``True``
Examples::
>>> input = torch.randn((3, 2), requires_grad=True)
>>> target = torch.rand((3, 2), requires_grad=False)
>>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
>>> loss.backward()
"""
if not (target.size() == input.size()):
warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
"Please ensure they have the same size.".format(target.size(), input.size()))
if input.nelement() != target.nelement():
raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
"!= input nelement ({})".format(target.nelement(), input.nelement()))
if weight is not None:
new_size = _infer_size(target.size(), weight.size())
weight = weight.expand(new_size)
return torch._C._nn.binary_cross_entropy(input, target, weight, size_average, reduce)
def binary_cross_entropy_with_logits(input, target, weight=None, size_average=True, reduce=True):
r"""Function that measures Binary Cross Entropy between target and output
logits.
See :class:`~torch.nn.BCEWithLogitsLoss` for details.
Args:
input: Tensor of arbitrary shape
target: Tensor of the same shape as input
weight (Tensor, optional): a manual rescaling weight
if provided it's repeated to match input tensor shape
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
:attr:`size_average` is set to ``False``, the losses are instead summed
for each minibatch. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce`
is ``False``, returns a loss per input/target element instead and ignores
:attr:`size_average`. Default: ``True``
Examples::
>>> input = torch.randn(3, requires_grad=True)
>>> target = torch.empty(3).random_(2)
>>> loss = F.binary_cross_entropy_with_logits(input, target)
>>> loss.backward()
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
if weight is not None:
loss = loss * weight
if not reduce:
return loss
elif size_average:
return loss.mean()
else:
return loss.sum()
def _pointwise_loss(lambd, lambd_optimized, input, target, size_average=True, reduce=True):
if target.requires_grad:
d = lambd(input, target)
if not reduce:
return d
return torch.mean(d) if size_average else torch.sum(d)
else:
return lambd_optimized(input, target, size_average, reduce)
smooth_l1_loss = _add_docstr(torch._C._nn.smooth_l1_loss, r"""
smooth_l1_loss(input, target, size_average=True, reduce=True) -> Tensor
Function that uses a squared term if the absolute
element-wise error falls below 1 and an L1 term otherwise.
See :class:`~torch.nn.SmoothL1Loss` for details.
""")
def l1_loss(input, target, size_average=True, reduce=True):
r"""l1_loss(input, target, size_average=True, reduce=True) -> Tensor
Function that takes the mean element-wise absolute value difference.
See :class:`~torch.nn.L1Loss` for details.
"""
return _pointwise_loss(lambda a, b: torch.abs(a - b), torch._C._nn.l1_loss,
input, target, size_average, reduce)
def mse_loss(input, target, size_average=True, reduce=True):
r"""mse_loss(input, target, size_average=True, reduce=True) -> Tensor
Measures the element-wise mean squared error.
See :class:`~torch.nn.MSELoss` for details.
"""
return _pointwise_loss(lambda a, b: (a - b) ** 2, torch._C._nn.mse_loss,
input, target, size_average, reduce)
def margin_ranking_loss(input1, input2, target, margin=0, size_average=True, reduce=True):
r"""margin_ranking_loss(input1, input2, target, margin=0, size_average=True, reduce=True) -> Tensor
See :class:`~torch.nn.MarginRankingLoss` for details.
"""
if input1.dim() == 0 or input2.dim() == 0 or target.dim() == 0:
raise RuntimeError(("margin_ranking_loss does not support scalars, got sizes: "
"input1: {}, input2: {}, target: {} ".format(input1.size(), input2.size(), target.size())))
return torch.margin_ranking_loss(input1, input2, target, margin, size_average, reduce)
def hinge_embedding_loss(input, target, margin=1.0, size_average=True, reduce=True):
r"""hinge_embedding_loss(input, target, margin=1.0, size_average=True, reduce=True) -> Tensor
See :class:`~torch.nn.HingeEmbeddingLoss` for details.
"""
return torch.hinge_embedding_loss(input, target, margin, size_average, reduce)
multilabel_margin_loss = _add_docstr(torch._C._nn.multilabel_margin_loss, r"""
multilabel_margin_loss(input, target, size_average=True, reduce=True) -> Tensor
See :class:`~torch.nn.MultiLabelMarginLoss` for details.
""")
soft_margin_loss = _add_docstr(torch._C._nn.soft_margin_loss, r"""
soft_margin_loss(input, target, size_average=True, reduce=True) -> Tensor
See :class:`~torch.nn.SoftMarginLoss` for details.
""")
def multilabel_soft_margin_loss(input, target, weight=None, size_average=True, reduce=True):
r"""multilabel_soft_margin_loss(input, target, weight=None, size_average=True) -> Tensor
See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details.
"""
input = torch.sigmoid(input)
return binary_cross_entropy(input, target, weight, size_average, reduce)
def cosine_embedding_loss(input1, input2, target, margin=0, size_average=True, reduce=True):
r"""cosine_embedding_loss(input1, input2, target, margin=0, size_average=True, reduce=True) -> Tensor
See :class:`~torch.nn.CosineEmbeddingLoss` for details.
"""
return torch.cosine_embedding_loss(input1, input2, target, margin, size_average, reduce)
def multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=True, reduce=True):
r"""multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=True, reduce=True) -> Tensor
See :class:`~torch.nn.MultiMarginLoss` for details.
"""
if p != 1 and p != 2:
raise ValueError('only p == 1 and p == 2 supported')
if weight is not None and weight.dim() != 1:
raise ValueError('weight must be one-dimensional')
return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, size_average, reduce)
def pixel_shuffle(input, upscale_factor):
r"""Rearranges elements in a tensor of shape :math:`[*, C*r^2, H, W]` to a
tensor of shape :math:`[C, H*r, W*r]`.
See :class:`~torch.nn.PixelShuffle` for details.
Args:
input (Tensor): Input
upscale_factor (int): factor to increase spatial resolution by
Examples::
>>> ps = nn.PixelShuffle(3)
>>> input = torch.empty(1, 9, 4, 4)
>>> output = ps(input)
>>> print(output.size())
torch.Size([1, 1, 12, 12])
"""
batch_size, channels, in_height, in_width = input.size()
channels //= upscale_factor ** 2
out_height = in_height * upscale_factor
out_width = in_width * upscale_factor
input_view = input.contiguous().view(
batch_size, channels, upscale_factor, upscale_factor,
in_height, in_width)
shuffle_out = input_view.permute(0, 1, 4, 2, 5, 3).contiguous()
return shuffle_out.view(batch_size, channels, out_height, out_width)
def upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
r"""Upsamples the input to either the given :attr:`size` or the given
:attr:`scale_factor`
The algorithm used for upsampling is determined by :attr:`mode`.
Currently temporal, spatial and volumetric upsampling are supported, i.e.
expected inputs are 3-D, 4-D or 5-D in shape.
The input dimensions are interpreted in the form:
`mini-batch x channels x [optional depth] x [optional height] x width`.
The modes available for upsampling are: `nearest`, `linear` (3D-only),
`bilinear` (4D-only), `trilinear` (5D-only)
Args:
input (Tensor): the input tensor
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
mode (string): algorithm used for upsampling:
'nearest' | 'linear' | 'bilinear' | 'trilinear'. Default: 'nearest'
align_corners (bool, optional): if True, the corner pixels of the input
and output tensors are aligned, and thus preserving the values at
those pixels. This only has effect when :attr:`mode` is `linear`,
`bilinear`, or `trilinear`. Default: False
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`linear`, `bilinear`, and `trilinear`) don't proportionally align the
output and input pixels, and thus the output values can depend on the
input size. This was the default behavior for these modes up to version
0.3.1. Since then, the default behavior is ``align_corners = False``.
See :class:`~torch.nn.Upsample` for concrete examples on how this
affects the outputs.
"""
from numbers import Integral
from .modules.utils import _ntuple
def _check_size_scale_factor():
if size is None and scale_factor is None:
raise ValueError('either size or scale_factor should be defined')
if size is not None and scale_factor is not None:
raise ValueError('only one of size or scale_factor should be defined')
if scale_factor is not None and not isinstance(scale_factor, (Integral, tuple)):
raise ValueError('scale_factor must be of integer type or a tuple of integer types')
def _scale_factor(dim):
_check_size_scale_factor()
if scale_factor is not None and not isinstance(scale_factor, Integral):
raise ValueError('scale_factor must be a single Integer value for nearest neighbor sampling')
if scale_factor is not None:
return scale_factor
sizes = _ntuple(dim)(size)
computed_scale_factor = sizes[0] // input.size(2)
for d in range(dim):
if sizes[d] % input.size(d + 2) != 0:
raise RuntimeError("output size specified in UpsamplingNearest "
"({}) has to be divisible by the input size, but got: "
"{}".format('x'.join(map(str, sizes)),
'x'.join(map(str, input.size()))))
if sizes[d] // input.size(d + 2) != computed_scale_factor:
raise RuntimeError("input aspect ratio doesn't match the output ratio")
return computed_scale_factor
def _output_size(dim):
_check_size_scale_factor()
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
return [input.size(i + 2) * scale_factors[i] for i in range(dim)]
if mode == 'nearest':
if align_corners is not None:
raise ValueError("align_corners option can only be set with the "
"interpolating modes: linear | bilinear | trilinear")
else:
if align_corners is None:
warnings.warn("Default upsampling behavior when mode={} is changed "
"to align_corners=False since 0.4.0. Please specify "
"align_corners=True if the old behavior is desired. "
"See the documentation of nn.Upsample for details.".format(mode))
align_corners = False
if input.dim() == 3 and mode == 'nearest':
return torch._C._nn.upsample_nearest1d(input, _scale_factor(1))
elif input.dim() == 4 and mode == 'nearest':
return torch._C._nn.upsample_nearest2d(input, _scale_factor(2))
elif input.dim() == 5 and mode == 'nearest':
return torch._C._nn.upsample_nearest3d(input, _scale_factor(3))
elif input.dim() == 3 and mode == 'linear':
return torch._C._nn.upsample_linear1d(input, _output_size(1), align_corners)
elif input.dim() == 3 and mode == 'bilinear':
raise NotImplementedError("Got 3D input, but bilinear mode needs 4D input")
elif input.dim() == 3 and mode == 'trilinear':
raise NotImplementedError("Got 3D input, but trilinear mode needs 5D input")
elif input.dim() == 4 and mode == 'linear':
raise NotImplementedError("Got 4D input, but linear mode needs 3D input")
elif input.dim() == 4 and mode == 'bilinear':
return torch._C._nn.upsample_bilinear2d(input, _output_size(2), align_corners)
elif input.dim() == 4 and mode == 'trilinear':
raise NotImplementedError("Got 4D input, but trilinear mode needs 5D input")
elif input.dim() == 5 and mode == 'linear':
raise NotImplementedError("Got 5D input, but linear mode needs 3D input")
elif input.dim() == 5 and mode == 'bilinear':
raise NotImplementedError("Got 5D input, but bilinear mode needs 4D input")
elif input.dim() == 5 and mode == 'trilinear':
return torch._C._nn.upsample_trilinear3d(input, _output_size(3), align_corners)
else:
raise NotImplementedError("Input Error: Only 3D, 4D and 5D input Tensors supported"
" (got {}D) for the modes: nearest | linear | bilinear | trilinear"
" (got {})".format(input.dim(), mode))
def upsample_nearest(input, size=None, scale_factor=None):
r"""Upsamples the input, using nearest neighbours' pixel values.
.. warning::
This function is deprecated in favor of :func:`torch.nn.functional.upsample`.
This is equivalent with ``nn.functional.upsample(..., mode='nearest')``.
Currently spatial and volumetric upsampling are supported (i.e. expected
inputs are 4 or 5 dimensional).
Args:
input (Tensor): input
size (int or Tuple[int, int] or Tuple[int, int, int]): output spatia
size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
"""
# DeprecationWarning is ignored by default
warnings.warn("nn.functional.upsample_nearest is deprecated. Use nn.functional.upsample instead.")
return upsample(input, size, scale_factor, mode='nearest')
def upsample_bilinear(input, size=None, scale_factor=None):
r"""Upsamples the input, using bilinear upsampling.
.. warning::
This function is deprecated in favor of :func:`torch.nn.functional.upsample`.
This is equivalent with
``nn.functional.upsample(..., mode='bilinear', align_corners=True)``.
Expected inputs are spatial (4 dimensional). Use `upsample_trilinear` fo
volumetric (5 dimensional) inputs.
Args:
input (Tensor): input
size (int or Tuple[int, int]): output spatial size.
scale_factor (int or Tuple[int, int]): multiplier for spatial size
"""
# DeprecationWarning is ignored by default
warnings.warn("nn.functional.upsample_bilinear is deprecated. Use nn.functional.upsample instead.")
return upsample(input, size, scale_factor, mode='bilinear', align_corners=True)
def grid_sample(input, grid, mode='bilinear', padding_mode='zeros'):
r"""Given an :attr:`input` and a flow-field :attr:`grid`, computes the
`output` using input pixel locations from the grid.
Uses bilinear interpolation to sample the input pixels.
Currently, only spatial (4 dimensional) and volumetric (5 dimensional)
inputs are supported.
For each output location, :attr:`grid` has `x`, `y`
input pixel locations which are used to compute output.
In the case of 5D inputs, :attr:`grid` has `x`, `y`, `z` pixel locations.
.. Note::
To avoid confusion in notation, let's note that `x` corresponds to the `width` dimension `IW`,
`y` corresponds to the height dimension `IH` and `z` corresponds to the `depth` dimension `ID`.
:attr:`grid` has values in the range of `[-1, 1]`. This is because the
pixel locations are normalized by the input height and width.
For example, values: x: -1, y: -1 is the left-top pixel of the input, and
values: x: 1, y: 1 is the right-bottom pixel of the input.
If :attr:`grid` has values outside the range of `[-1, 1]`, those locations
are handled as defined by `padding_mode`. Options are `zeros` or `border`,
defining those locations to use 0 or image border values as contribution
to the bilinear interpolation.
.. Note:: This function is used in building Spatial Transformer Networks
Args:
input (Tensor): input batch (N x C x IH x IW) or (N x C x ID x IH x IW)
grid (Tensor): flow-field of size (N x OH x OW x 2) or (N x OD x OH x OW x 3)
padding_mode (str): padding mode for outside grid values
'zeros' | 'border'. Default: 'zeros'
Returns:
output (Tensor): output Tensor
"""
return vision.grid_sampler(input, grid, padding_mode)
def affine_grid(theta, size):
r"""Generates a 2d flow field, given a batch of affine matrices :attr:`theta`
Generally used in conjunction with :func:`grid_sample` to
implement Spatial Transformer Networks.
Args:
theta (Tensor): input batch of affine matrices (:math:`N \times 2 \times 3`)
size (torch.Size): the target output image size (:math:`N \times C \times H \times W`)
Example: torch.Size((32, 3, 24, 24))
Returns:
output (Tensor): output Tensor of size (:math:`N \times H \times W \times 2`)
"""
return vision.affine_grid_generator(theta, size)
def pad(input, pad, mode='constant', value=0):
r"""Pads tensor.
`Nd` constant padding: The number of dimensions to pad is
:math:`\left\lfloor\frac{len(padding)}{2}\right\rfloor` and the dimensions that get padded begins with the
last dimension and moves forward. See below for examples.
`1D`, `2D` and `3D` "reflect" / "replicate" padding:
for 1D:
3D input tensor with padding of the form `(padLeft, padRight)`
for 2D:
4D input tensor with padding of the form `(padLeft, padRight, padTop, padBottom)`.
for 3D:
5D input tensor with padding of the form
`(padLeft, padRight, padTop, padBottom, padFront, padBack)`. No "reflect" implementation.
See :class:`torch.nn.ConstantPad2d`, :class:`torch.nn.ReflectionPad2d`, and
:class:`torch.nn.ReplicationPad2d` for concrete examples on how each of the
padding modes works.
Args:
input (Tensor): `Nd` tensor
pad (tuple): m-elem tuple, where :math:`\frac{m}{2} \leq` input dimensions and :math:`m` is even.
mode: 'constant', 'reflect' or 'replicate'. Default: 'constant'
value: fill value for 'constant' padding. Default: 0
Examples::
>>> t4d = torch.empty(3, 3, 4, 2)
>>> p1d = (1, 1) # pad last dim by 1 on each side
>>> out = F.pad(t4d, p1d, "constant", 0) # effectively zero padding
>>> print(out.data.size())
torch.Size([3, 3, 4, 4])
>>> p2d = (1, 1, 2, 2) # pad last dim by (1, 1) and 2nd to last by (2, 2)
>>> out = F.pad(t4d, p2d, "constant", 0)
>>> print(out.data.size())
torch.Size([3, 3, 8, 4])
>>> t4d = torch.empty(3, 3, 4, 2)
>>> p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3)
>>> out = F.pad(t4d, p3d, "constant", 0)
>>> print(out.data.size())
torch.Size([3, 9, 7, 3])
"""
assert len(pad) % 2 == 0, 'Padding length must be divisible by 2'
assert len(pad) // 2 <= input.dim(), 'Padding length too large'
if mode == 'constant':
return ConstantPadNd.apply(input, pad, value)
else:
assert value == 0, 'Padding mode "{}"" doesn\'t take in value argument'.format(mode)
if input.dim() == 3:
assert len(pad) == 2, '3D tensors expect 2 values for padding'
if mode == 'reflect':
return torch._C._nn.reflection_pad1d(input, pad)
elif mode == 'replicate':
return torch._C._nn.replication_pad1d(input, pad)
elif input.dim() == 4:
assert len(pad) == 4, '4D tensors expect 4 values for padding'
if mode == 'reflect':
return torch._C._nn.reflection_pad2d(input, pad)
elif mode == 'replicate':
return torch._C._nn.replication_pad2d(input, pad)
elif input.dim() == 5:
assert len(pad) == 6, '5D tensors expect 6 values for padding'
if mode == 'reflect':
raise NotImplementedError
elif mode == 'replicate':
return torch._C._nn.replication_pad3d(input, pad)
else:
raise NotImplementedError("Only 3D, 4D, 5D padding with non-constant padding are supported for now")
# distance
def pairwise_distance(x1, x2, p=2, eps=1e-6, keepdim=False):
r"""
See :class:`torch.nn.PairwiseDistance` for details
"""
return torch.pairwise_distance(x1, x2, p, eps, keepdim)
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
r"""Returns cosine similarity between x1 and x2, computed along dim.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}
Args:
x1 (Tensor): First input.
x2 (Tensor): Second input (of size matching x1).
dim (int, optional): Dimension of vectors. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
Shape:
- Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
- Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.
Example::
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> output = F.cosine_similarity(input1, input2)
>>> print(output)
"""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return w12 / (w1 * w2).clamp(min=eps)
def triplet_margin_loss(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False, size_average=True,
reduce=True):
r"""
See :class:`~torch.nn.TripletMarginLoss` for details
"""
return torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps,
swap, size_average, reduce)
def normalize(input, p=2, dim=1, eps=1e-12):
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
input: input tensor of any shape
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
eps (float): small value to avoid division by zero. Default: 1e-12
"""
return input / input.norm(p, dim, True).clamp(min=eps).expand_as(input)
def assert_int_or_pair(arg, arg_name, message):
assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name)
def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
r"""
See :class:`torch.nn.Unfold` for details
"""
if input is not None and input.dim() == 4:
msg = '{} must be int or 2-tuple for 4D input'
assert_int_or_pair(kernel_size, 'kernel_size', msg)
assert_int_or_pair(dilation, 'dilation', msg)
assert_int_or_pair(padding, 'padding', msg)
assert_int_or_pair(stride, 'stride', msg)
return Im2Col.apply(input, _pair(kernel_size),
_pair(dilation), _pair(padding), _pair(stride))
else:
raise NotImplementedError("Input Error: Only 4D input Tensors supported (got {}D)".format(input.dim()))
def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
r"""
See :class:`torch.nn.Fold` for details
"""
if input is not None and input.dim() == 3:
msg = '{} must be int or 2-tuple for 3D input'
assert_int_or_pair(output_size, 'output_size', msg)
assert_int_or_pair(kernel_size, 'kernel_size', msg)
assert_int_or_pair(dilation, 'dilation', msg)
assert_int_or_pair(padding, 'padding', msg)
assert_int_or_pair(stride, 'stride', msg)
return Col2Im.apply(input, _pair(output_size), _pair(kernel_size),
_pair(dilation), _pair(padding), _pair(stride))
else:
raise NotImplementedError("Input Error: Only 3D input Tensors supported (got {}D)".format(input.dim())) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/functional.py | 0.9255 | 0.606906 | functional.py | pypi |
import torch
from .modules.utils import _single, _pair, _triple
def _grad_input_padding(grad_output, input_size, stride, padding, kernel_size):
input_size = list(input_size)
k = grad_output.dim() - 2
if len(input_size) == k + 2:
input_size = input_size[-k:]
if len(input_size) != k:
raise ValueError("input_size must have {} elements (got {})"
.format(k + 2, len(input_size)))
def dim_size(d):
return ((grad_output.size(d + 2) - 1) * stride[d] - 2 * padding[d] +
kernel_size[d])
min_sizes = [dim_size(d) for d in range(k)]
max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]
for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):
if size < min_size or size > max_size:
raise ValueError(
("requested an input grad size of {}, but valid sizes range "
"from {} to {} (for a grad_output of {})").format(
input_size, min_sizes, max_sizes,
grad_output.size()[2:]))
return tuple(input_size[d] - min_sizes[d] for d in range(k))
def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):
r"""
Computes the gradient of conv1d with respect to the input of the convolution.
This is same as the 1D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kW)
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias: optional bias tensor (out_channels). Default: None
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv1d_input(input.shape, weight, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
kernel_size = [weight.shape[2]]
if input_size is None:
raise ValueError("grad.conv1d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size)
return torch.conv_transpose1d(
grad_output, weight, bias, stride, padding, grad_input_padding, groups,
dilation)
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):
r"""
Computes the gradient of conv1d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias: optional bias tensor (out_channels). Default: None
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv1d_weight(input, weight.shape, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2])
grad_weight = torch.conv1d(input, grad_output, bias, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2]).transpose(
0, 1).narrow(2, 0, weight_size[2])
def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):
r"""
Computes the gradient of conv2d with respect to the input of the convolution.
This is same as the 2D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias: optional bias tensor (out_channels). Default: None
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv2d_input(input.shape, weight, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
kernel_size = (weight.shape[2], weight.shape[3])
if input_size is None:
raise ValueError("grad.conv2d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size)
return torch.conv_transpose2d(
grad_output, weight, bias, stride, padding, grad_input_padding, groups,
dilation)
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):
r"""
Computes the gradient of conv2d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias: optional bias tensor (out_channels). Default: None
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv2d_weight(input, weight.shape, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,
1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3])
grad_weight = torch.conv2d(input, grad_output, bias, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels,
grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3])
def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):
r"""
Computes the gradient of conv3d with respect to the input of the convolution.
This is same as the 3D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias: optional bias tensor (out_channels). Default: None
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv3d_input(input.shape, weight, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
kernel_size = (weight.shape[2], weight.shape[3], weight.shape[4])
if input_size is None:
raise ValueError("grad.conv3d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size)
return torch.conv_transpose3d(
grad_output, weight, bias, stride, padding, grad_input_padding, groups,
dilation)
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):
r"""
Computes the gradient of conv3d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias: optional bias tensor (out_channels). Default: None
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, weight, grad_output)
>>> F.grad.conv3d_weight(input, weight.shape, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.repeat(1, in_channels // groups, 1, 1, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3], grad_output.shape[4])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3],
input.shape[4])
grad_weight = torch.conv3d(input, grad_output, bias, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(
4, 0, weight_size[4]) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/grad.py | 0.911648 | 0.583975 | grad.py | pypi |
import math
import torch
from torch.nn.parameter import Parameter
from .. import functional as F
from .module import Module
from .utils import _single, _pair, _triple
class _ConvNd(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
class Conv1d(_ConvNd):
r"""Applies a 1D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{in}, L)` and output :math:`(N, C_{out}, L_{out})` can be
precisely described as:
.. math::
\begin{equation*}
\text{out}(N_i, C_{out_j}) = \text{bias}(C_{out_j}) +
\sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{out_j}, k) \star \text{input}(N_i, k)
\end{equation*},
where :math:`\star` is the valid `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`L` is a length of signal sequence.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a one-element tuple.
* :attr:`padding` controls the amount of implicit zero-paddings on both sides
for :attr:`padding` number of points.
* :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\left\lfloor \frac{\text{out_channels}}{\text{in_channels}} \right\rfloor`).
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid
`cross-correlation`_, and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The configuration when `groups == in_channels` and `out_channels == K * in_channels`
where `K` is a positive integer is termed in literature as depthwise convolution.
In other words, for an input of size :math:`(N, C_{in}, L_{in})`, if you want a
depthwise convolution with a depthwise multiplier `K`,
then you use the constructor arguments
:math:`(\text{in_channels}=C_{in}, \text{out_channels}=C_{in} * K, ..., \text{groups}=C_{in})`
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, L_{in})`
- Output: :math:`(N, C_{out}, L_{out})` where
.. math::
L_{out} = \left\lfloor\frac{L_{in} + 2 * \text{padding} - \text{dilation}
* (\text{kernel_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
(out_channels, in_channels, kernel_size)
bias (Tensor): the learnable bias of the module of shape
(out_channels)
Examples::
>>> m = nn.Conv1d(16, 33, 3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
super(Conv1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _single(0), groups, bias)
def forward(self, input):
return F.conv1d(input, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class Conv2d(_ConvNd):
r"""Applies a 2D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`
can be precisely described as:
.. math::
\begin{equation*}
\text{out}(N_i, C_{out_j}) = \text{bias}(C_{out_j}) +
\sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{out_j}, k) \star \text{input}(N_i, k)
\end{equation*},
where :math:`\star` is the valid 2D `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension.
* :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\left\lfloor\frac{\text{out_channels}}{\text{in_channels}}\right\rfloor`).
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The configuration when `groups == in_channels` and `out_channels == K * in_channels`
where `K` is a positive integer is termed in literature as depthwise convolution.
In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`, if you want a
depthwise convolution with a depthwise multiplier `K`,
then you use the constructor arguments
:math:`(\text{in_channels}=C_{in}, \text{out_channels}=C_{in} * K, ..., \text{groups}=C_{in})`
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding}[0] - \text{dilation}[0]
* (\text{kernel_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding}[1] - \text{dilation}[1]
* (\text{kernel_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
(out_channels, in_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (out_channels)
Examples::
>>> # With square kernels and equal stride
>>> m = nn.Conv2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
>>> input = torch.randn(20, 16, 50, 100)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
def forward(self, input):
return F.conv2d(input, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class Conv3d(_ConvNd):
r"""Applies a 3D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)`
and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as:
.. math::
\begin{equation*}
\text{out}(N_i, C_{out_j}) = \text{bias}(C_{out_j}) +
\sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{out_j}, k) \star \text{input}(N_i, k)
\end{equation*},
where :math:`\star` is the valid 3D `cross-correlation`_ operator
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\left\lfloor\frac{\text{out_channels}}{\text{in_channels}}\right\rfloor`).
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The configuration when `groups == in_channels` and `out_channels == K * in_channels`
where `K` is a positive integer is termed in literature as depthwise convolution.
In other words, for an input of size :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`, if you want a
depthwise convolution with a depthwise multiplier `K`,
then you use the constructor arguments
:math:`(\text{in_channels}=C_{in}, \text{out_channels}=C_{in} * K, ..., \text{groups}=C_{in})`
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to all three sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 * \text{padding}[0] - \text{dilation}[0]
* (\text{kernel_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding}[1] - \text{dilation}[1]
* (\text{kernel_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding}[2] - \text{dilation}[2]
* (\text{kernel_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
(out_channels, in_channels, kernel_size[0], kernel_size[1], kernel_size[2])
bias (Tensor): the learnable bias of the module of shape (out_channels)
Examples::
>>> # With square kernels and equal stride
>>> m = nn.Conv3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
>>> input = torch.randn(20, 16, 10, 50, 100)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
super(Conv3d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _triple(0), groups, bias)
def forward(self, input):
return F.conv3d(input, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class _ConvTransposeMixin(object):
def forward(self, input, output_size=None):
output_padding = self._output_padding(input, output_size)
func = self._backend.ConvNd(
self.stride, self.padding, self.dilation, self.transposed,
output_padding, self.groups)
if self.bias is None:
return func(input, self.weight)
else:
return func(input, self.weight, self.bias)
def _output_padding(self, input, output_size):
if output_size is None:
return self.output_padding
output_size = list(output_size)
k = input.dim() - 2
if len(output_size) == k + 2:
output_size = output_size[-2:]
if len(output_size) != k:
raise ValueError(
"output_size must have {} or {} elements (got {})"
.format(k, k + 2, len(output_size)))
def dim_size(d):
return ((input.size(d + 2) - 1) * self.stride[d] -
2 * self.padding[d] + self.kernel_size[d])
min_sizes = [dim_size(d) for d in range(k)]
max_sizes = [min_sizes[d] + self.stride[d] - 1 for d in range(k)]
for size, min_size, max_size in zip(output_size, min_sizes, max_sizes):
if size < min_size or size > max_size:
raise ValueError((
"requested an output size of {}, but valid sizes range "
"from {} to {} (for an input of {})").format(
output_size, min_sizes, max_sizes, input.size()[2:]))
return tuple([output_size[d] - min_sizes[d] for d in range(k)])
class ConvTranspose1d(_ConvTransposeMixin, _ConvNd):
r"""Applies a 1D transposed convolution operator over an input image
composed of several input planes.
This module can be seen as the gradient of Conv1d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation).
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points.
* :attr:`output_padding` controls the amount of implicit zero-paddings on
both sides of the output for :attr:`output_padding` number of points.
number of points.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\left\lfloor\frac{\text{out_channels}}{\text{in_channels}}\right\rfloor`).
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The :attr:`padding` argument effectively adds ``kernel_size - 1 - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv1d` and a :class:`~torch.nn.ConvTranspose1d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when :attr`stride` ``>1``,
:class:`~torch.nn.Conv1d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``kernel_size - 1 - padding`` zero-padding
will be added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
Shape:
- Input: :math:`(N, C_{in}, L_{in})`
- Output: :math:`(N, C_{out}, L_{out})` where
.. math::
L_{out} = (L_{in} - 1) * \text{stride} - 2 * \text{padding} + \text{kernel_size} + \text{output_padding}
Attributes:
weight (Tensor): the learnable weights of the module of shape
(in_channels, out_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (out_channels)
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True, dilation=1):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
output_padding = _single(output_padding)
super(ConvTranspose1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias)
def forward(self, input, output_size=None):
output_padding = self._output_padding(input, output_size)
return F.conv_transpose1d(
input, self.weight, self.bias, self.stride, self.padding,
output_padding, self.groups, self.dilation)
class ConvTranspose2d(_ConvTransposeMixin, _ConvNd):
r"""Applies a 2D transposed convolution operator over an input image
composed of several input planes.
This module can be seen as the gradient of Conv2d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation).
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension.
* :attr:`output_padding` controls the amount of implicit zero-paddings on
both sides of the output for :attr:`output_padding` number of points for
each dimension.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\left\lfloor\frac{\text{out_channels}}{\text{in_channels}}\right\rfloor`).
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimensions
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The :attr:`padding` argument effectively adds ``kernel_size - 1 - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv2d` and a :class:`~torch.nn.ConvTranspose2d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when :attr`stride` ``>1``,
:class:`~torch.nn.Conv2d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``kernel_size - 1 - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
.. math::
H_{out} = (H_{in} - 1) * \text{stride}[0] - 2 * \text{padding}[0]
+ \text{kernel_size}[0] + \text{output_padding}[0]
W_{out} = (W_{in} - 1) * \text{stride}[1] - 2 * \text{padding}[1]
+ \text{kernel_size}[1] + \text{output_padding}[1]
Attributes:
weight (Tensor): the learnable weights of the module of shape
(in_channels, out_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (out_channels)
Examples::
>>> # With square kernels and equal stride
>>> m = nn.ConvTranspose2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = torch.randn(20, 16, 50, 100)
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> input = torch.randn(1, 16, 12, 12)
>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True, dilation=1):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
output_padding = _pair(output_padding)
super(ConvTranspose2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias)
def forward(self, input, output_size=None):
output_padding = self._output_padding(input, output_size)
return F.conv_transpose2d(
input, self.weight, self.bias, self.stride, self.padding,
output_padding, self.groups, self.dilation)
class ConvTranspose3d(_ConvTransposeMixin, _ConvNd):
r"""Applies a 3D transposed convolution operator over an input image composed of several input
planes.
The transposed convolution operator multiplies each input value element-wise by a learnable kernel,
and sums over the outputs from all input feature planes.
This module can be seen as the gradient of Conv3d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation).
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension.
* :attr:`output_padding` controls the amount of implicit zero-paddings on
both sides of the output for :attr:`output_padding` number of points for
each dimension.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\left\lfloor\frac{\text{out_channels}}{\text{in_channels}}\right\rfloor`).
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimensions
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The :attr:`padding` argument effectively adds ``kernel_size - 1 - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv3d` and a :class:`~torch.nn.ConvTranspose3d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when :attr`stride` ``>1``,
:class:`~torch.nn.Conv3d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``kernel_size - 1 - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
Shape:
- Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where
.. math::
D_{out} = (D_{in} - 1) * \text{stride}[0] - 2 * \text{padding}[0]
+ \text{kernel_size}[0] + \text{output_padding}[0]
H_{out} = (H_{in} - 1) * \text{stride}[1] - 2 * \text{padding}[1]
+ \text{kernel_size}[1] + \text{output_padding}[1]
W_{out} = (W_{in} - 1) * \text{stride}[2] - 2 * \text{padding}[2]
+ \text{kernel_size}[2] + \text{output_padding}[2]
Attributes:
weight (Tensor): the learnable weights of the module of shape
(in_channels, out_channels, kernel_size[0], kernel_size[1], kernel_size[2])
bias (Tensor): the learnable bias of the module of shape (out_channels)
Examples::
>>> # With square kernels and equal stride
>>> m = nn.ConvTranspose3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2))
>>> input = torch.randn(20, 16, 10, 50, 100)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True, dilation=1):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
super(ConvTranspose3d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias)
def forward(self, input, output_size=None):
output_padding = self._output_padding(input, output_size)
return F.conv_transpose3d(
input, self.weight, self.bias, self.stride, self.padding,
output_padding, self.groups, self.dilation)
# TODO: Conv2dLocal
# TODO: Conv2dMap
# TODO: ConvTranspose2dMap | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/conv.py | 0.951363 | 0.417568 | conv.py | pypi |
from .module import Module
from .. import functional as F
class Fold(Module):
"""
De-interleaves vectors of length :math:`\prod(kernel_size)` from the "channel"
dimension of the input tensor to generate blocks of size :math:`kernel_size`
of the output. These blocks populate the "spatial" dimensions [2:]
of the output via a sliding window with positions determined by the
padding, stride and dilation values. The "channel" dimension 1 of the output
is determined by the vectors interleaevd position in the "channel" dimension
of the input.
Each element of the output batch dimension 0 has :math:`C / \prod(kernel_size)`
channels (dimension 1) and spatial dimensions [2:] of shape :math:`output_size`.
| If :attr:`padding` is non-zero, then the input is implicitly
zero-padded on both sides by :attr:`padding` number of points
| :attr:`dilation` controls the intenal spacing between the kernel points in the output.
It is harder to describe, but this `link`_ has a nice visualization of what
dilation does.
Args:
output_size (int or tuple): the shape of the spatial dimensions [2:] of the output
kernel_size (int or tuple): the size of the sliding blocks to convert
to columns.
stride (int or tuple): the stride of the sliding blocks in the input
spatial dimensions. Default: 1
padding (int or tuple, optional): implicit zero padding to be added on
both sides of input. Default: 0
dilation (int or tuple, optional): a parameter that controls the
stride of elements within the
neighborhood. Default: 1
| If :attr:`output_size`, :attr:`kernel_size`, :attr:`dilation`,
:attr:`padding` or :attr:`stride` is of length 1 then
their value will be replicated across all spatial dimensions
| For the case of two output spatial dimensions this operation is sometimes called col2im
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N * C * \prod(kernel_size), L_{out},)` where
:math:`L_{out} = floor((L_{in} + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1)
Examples::
>>> # output_size (3, 3) kernel_size (2, 2), dilation (1, 1), padding (0, 0), stride (1, 1)
>>> fold = nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1))
>>> input = torch.randn(1, 36, 1)
>>> output = unfold(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, output_size, kernel_size, dilation=1, padding=0, stride=1):
super(Fold, self).__init__()
self.output_size = output_size
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def forward(self, input):
return F.fold(input, self.output_size, self.kernel_size, self.dilation,
self.padding, self.stride)
def extra_repr(self):
return 'output_size={output_size}, kernel_size={kernel_size}, ' \
'dilation={dilation}, padding={padding}, stride={stride}'.format(
**self.__dict__
)
class Unfold(Module):
"""
Converts each sliding :math:`kernel_size` block of the "spatial" dimensions [2:]
of the input tensor into a column of the output. These columns are interleaved
with the "channel" dimension 1 such that in the output the channel dimension combines
both the spatial position of the block within the input and the original
channel position. We denote size of the "batch" dimension 0 as :math:`N`.
Each element of the output batch dimension 0 has :math:`C * \prod(kernel_size)`
rows and contains as many columns as there are :math:`kernel_size` neighborhoods
of the input according to the padding, stride and dilation values.
| If :attr:`padding` is non-zero, then the input is implicitly
zero-padded on both sides by :attr:`padding` number of points before reshaping
| :attr:`dilation` controls the internal spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what
dilation does.
Args:
kernel_size (int or tuple): the size of the sliding blocks to convert
to columns.
stride (int or tuple, optional): the stride of the sliding blocks in the input
spatial dimensions. Default: 1
padding (int or tuple, optional): implicit zero padding to be added on
both sides of input. Default: 0
dilation (int or tuple, optional): a parameter that controls the
stride of elements within the
neighborhood. Default: 1
| If :attr:`kernel_size`, :attr:`dilation`, :attr:`padding` or :attr:`stride`
is of length 1 then their value will be replicated across all spatial dimensions
| For the case of two input spatial dimensions this operation is sometimes called im2col
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C * \prod(kernel_size), L_{out},)` where
:math:`L_{out} = floor((L_{in} + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1)
Examples::
>>> # kernel_size (2, 2), dilation (1, 1), padding (0, 0), stride (1, 1)
>>> unfold = nn.Unfold((3, 3), (1, 1), (0, 0), (1, 1))
>>> input = torch.randn(2, 4, 3, 3)
>>> output = unfold(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, kernel_size, dilation=1, padding=0, stride=1):
super(Unfold, self).__init__()
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def forward(self, input):
return F.unfold(input, self.kernel_size, self.dilation,
self.padding, self.stride)
def extra_repr(self):
return 'kernel_size={kernel_size}, dilation={dilation}, padding={padding},' \
' stride={stride}'.format(**self.__dict__) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/fold.py | 0.952541 | 0.841565 | fold.py | pypi |
from .module import Module
from .utils import _pair, _quadruple, _ntuple
from .. import functional as F
# TODO: grad_output size asserts in THNN
class _ConstantPadNd(Module):
def __init__(self, value):
super(_ConstantPadNd, self).__init__()
self.value = value
def forward(self, input):
return F.pad(input, self.padding, 'constant', self.value)
def extra_repr(self):
return 'padding={}, value={}'.format(self.padding, self.value)
class ConstantPad1d(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`d-padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in both boundaries. If a 2-`tuple`, uses (`paddingLeft`, `paddingRight`)
Shape:
- Input: :math:`(N, C, W_{in})`
- Output: :math:`(N, C, W_{out})` where
:math:`W_{out} = W_{in} + \textit{paddingLeft} + \textit{paddingRight}`
Examples::
>>> m = nn.ConstantPad1d(2, 3.5)
>>> input = torch.randn(1, 2, 4)
>>> input
(0 ,.,.) =
0.1875 0.5046 -1.0074 2.0005
-0.3540 -1.8645 1.1530 0.0632
[torch.FloatTensor of size (1,2,4)]
>>> m(input)
(0 ,.,.) =
3.5000 3.5000 0.1875 0.5046 -1.0074 2.0005 3.5000 3.5000
3.5000 3.5000 -0.3540 -1.8645 1.1530 0.0632 3.5000 3.5000
[torch.FloatTensor of size (1,2,8)]
>>> # using different paddings
>>> m = nn.ConstantPad1d((3, 1), 3.5)
>>> m(input)
(0 ,.,.) =
3.5000 3.5000 3.5000 0.1875 0.5046 -1.0074 2.0005 3.5000
3.5000 3.5000 3.5000 -0.3540 -1.8645 1.1530 0.0632 3.5000
[torch.FloatTensor of size (1,2,8)]
"""
def __init__(self, padding, value):
super(ConstantPad1d, self).__init__(value)
self.padding = _pair(padding)
class ConstantPad2d(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`d-padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (`paddingLeft`, `paddingRight`,
`paddingTop`, `paddingBottom`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \textit{paddingTop} + \textit{paddingBottom}`
:math:`W_{out} = W_{in} + \textit{paddingLeft} + \textit{paddingRight}`
Examples::
>>> m = nn.ConstantPad2d(2, 3.5)
>>> input = torch.randn(1, 2, 2)
>>> input
(0 ,.,.) =
-0.2295 -0.9774
-0.3335 -1.4178
[torch.FloatTensor of size (1,2,2)]
>>> m(input)
(0 ,.,.) =
3.5000 3.5000 3.5000 3.5000 3.5000 3.5000
3.5000 3.5000 3.5000 3.5000 3.5000 3.5000
3.5000 3.5000 -0.2295 -0.9774 3.5000 3.5000
3.5000 3.5000 -0.3335 -1.4178 3.5000 3.5000
3.5000 3.5000 3.5000 3.5000 3.5000 3.5000
3.5000 3.5000 3.5000 3.5000 3.5000 3.5000
[torch.FloatTensor of size (1,6,6)]
>>> # using different paddings
>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
>>> m(input)
(0 ,.,.) =
3.5000 3.5000 3.5000 3.5000 3.5000
3.5000 3.5000 3.5000 3.5000 3.5000
3.5000 3.5000 3.5000 -0.2295 -0.9774
3.5000 3.5000 3.5000 -0.3335 -1.4178
3.5000 3.5000 3.5000 3.5000 3.5000
[torch.FloatTensor of size (1,5,5)]
"""
def __init__(self, padding, value):
super(ConstantPad2d, self).__init__(value)
self.padding = _quadruple(padding)
class ConstantPad3d(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`d-padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses
(`paddingLeft`, `paddingRight`, `paddingTop`, `paddingBottom`, `paddingFront`, `paddingBack`)
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
:math:`D_{out} = D_{in} + \textit{paddingFront} + \textit{paddingBack}`
:math:`H_{out} = H_{in} + \textit{paddingTop} + \textit{paddingBottom}`
:math:`W_{out} = W_{in} + \textit{paddingLeft} + \textit{paddingRight}`
Examples::
>>> m = nn.ConstantPad3d(3, 3.5)
>>> input = torch.randn(16, 3, 10, 20, 30)
>>> output = m(input)
>>> # using different paddings
>>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
>>> output = m(input)
"""
def __init__(self, padding, value):
super(ConstantPad3d, self).__init__(value)
self.padding = _ntuple(6)(padding)
class _ReflectionPadNd(Module):
def forward(self, input):
return F.pad(input, self.padding, 'reflect')
def extra_repr(self):
return '{}'.format(self.padding)
class ReflectionPad1d(_ReflectionPadNd):
r"""Pads the input tensor using the reflection of the input boundary.
For `N`d-padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 2-`tuple`, uses (`paddingLeft`, `paddingRight`)
Shape:
- Input: :math:`(N, C, W_{in})`
- Output: :math:`(N, C, W_{out})` where
:math:`W_{out} = W_{in} + \textit{paddingLeft} + \textit{paddingRight}`
Examples::
>>> m = nn.ReflectionPad1d(2)
>>> input = torch.arange(8).reshape(1, 2, 4)
>>> input
(0 ,.,.) =
0 1 2 3
4 5 6 7
[torch.FloatTensor of size (1,2,4)]
>>> m(input)
(0 ,.,.) =
2 1 0 1 2 3 2 1
6 5 4 5 6 7 6 5
[torch.FloatTensor of size (1,2,8)]
>>> # using different paddings
>>> m = nn.ReflectionPad1d((3, 1))
>>> m(input)
(0 ,.,.) =
3 2 1 0 1 2 3 2
7 6 5 4 5 6 7 6
[torch.FloatTensor of size (1,2,8)]
"""
def __init__(self, padding):
super(ReflectionPad1d, self).__init__()
self.padding = _pair(padding)
class ReflectionPad2d(_ReflectionPadNd):
r"""Pads the input tensor using the reflection of the input boundary.
For `N`d-padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (`paddingLeft`, `paddingRight`,
`paddingTop`, `paddingBottom`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \textit{paddingTop} + \textit{paddingBottom}`
:math:`W_{out} = W_{in} + \textit{paddingLeft} + \textit{paddingRight}`
Examples::
>>> m = nn.ReflectionPad2d(2)
>>> input = torch.arange(9).reshape(1, 1, 3, 3)
>>> input
(0 ,0 ,.,.) =
0 1 2
3 4 5
6 7 8
[torch.FloatTensor of size (1,1,3,3)]
>>> m(input)
(0 ,0 ,.,.) =
8 7 6 7 8 7 6
5 4 3 4 5 4 3
2 1 0 1 2 1 0
5 4 3 4 5 4 3
8 7 6 7 8 7 6
5 4 3 4 5 4 3
2 1 0 1 2 1 0
[torch.FloatTensor of size (1,1,7,7)]
>>> # using different paddings
>>> m = nn.ReflectionPad2d((1, 1, 2, 0))
>>> m(input)
(0 ,0 ,.,.) =
7 6 7 8 7
4 3 4 5 4
1 0 1 2 1
4 3 4 5 4
7 6 7 8 7
[torch.FloatTensor of size (1,1,5,5)]
"""
def __init__(self, padding):
super(ReflectionPad2d, self).__init__()
self.padding = _quadruple(padding)
class _ReplicationPadNd(Module):
def forward(self, input):
return F.pad(input, self.padding, 'replicate')
def extra_repr(self):
return '{}'.format(self.padding)
class ReplicationPad1d(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`d-padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 2-`tuple`, uses (`paddingLeft`, `paddingRight`)
Shape:
- Input: :math:`(N, C, W_{in})`
- Output: :math:`(N, C, W_{out})` where
:math:`W_{out} = W_{in} + \textit{paddingLeft} + \textit{paddingRight}`
Examples::
>>> m = nn.ReplicationPad1d(2)
>>> input = torch.arange(8).reshape(1, 2, 4)
>>> input
(0 ,.,.) =
0 1 2 3
4 5 6 7
[torch.FloatTensor of size (1,2,4)]
>>> m(input)
(0 ,.,.) =
0 0 0 1 2 3 3 3
4 4 4 5 6 7 7 7
[torch.FloatTensor of size (1,2,8)]
>>> # using different paddings
>>> m = nn.ReplicationPad1d((3, 1))
>>> m(input)
(0 ,.,.) =
0 0 0 0 1 2 3 3
4 4 4 4 5 6 7 7
[torch.FloatTensor of size (1,2,8)]
"""
def __init__(self, padding):
super(ReplicationPad1d, self).__init__()
self.padding = _pair(padding)
class ReplicationPad2d(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`d-padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (`paddingLeft`, `paddingRight`,
`paddingTop`, `paddingBottom`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \textit{paddingTop} + \textit{paddingBottom}`
:math:`W_{out} = W_{in} + \textit{paddingLeft} + \textit{paddingRight}`
Examples::
>>> m = nn.ReplicationPad2d(2)
>>> input = torch.arange(9).reshape(1, 1, 3, 3)
>>> input
(0 ,0 ,.,.) =
0 1 2
3 4 5
6 7 8
[torch.FloatTensor of size (1,1,3,3)]
>>> m(input)
(0 ,0 ,.,.) =
0 0 0 1 2 2 2
0 0 0 1 2 2 2
0 0 0 1 2 2 2
3 3 3 4 5 5 5
6 6 6 7 8 8 8
6 6 6 7 8 8 8
6 6 6 7 8 8 8
[torch.FloatTensor of size (1,1,7,7)]
>>> # using different paddings
>>> m = nn.ReplicationPad2d((1, 1, 2, 0))
>>> m(input)
(0 ,0 ,.,.) =
0 0 1 2 2
0 0 1 2 2
0 0 1 2 2
3 3 4 5 5
6 6 7 8 8
[torch.FloatTensor of size (1,1,5,5)]
"""
def __init__(self, padding):
super(ReplicationPad2d, self).__init__()
self.padding = _quadruple(padding)
class ReplicationPad3d(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`d-padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses (`paddingLeft`, `paddingRight`,
`paddingTop`, `paddingBottom`, `paddingFront`, `paddingBack`)
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
:math:`D_{out} = D_{in} + \textit{paddingFront} + \textit{paddingBack}`
:math:`H_{out} = H_{in} + \textit{paddingTop} + \textit{paddingBottom}`
:math:`W_{out} = W_{in} + \textit{paddingLeft} + \textit{paddingRight}`
Examples::
>>> m = nn.ReplicationPad3d(3)
>>> input = torch.randn(16, 3, 8, 320, 480)
>>> output = m(input)
>>> # using different paddings
>>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
>>> output = m(input)
"""
def __init__(self, padding):
super(ReplicationPad3d, self).__init__()
self.padding = _ntuple(6)(padding)
class ZeroPad2d(ConstantPad2d):
r"""Pads the input tensor boundaries with zero.
For `N`d-padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (`paddingLeft`, `paddingRight`,
`paddingTop`, `paddingBottom`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \textit{paddingTop} + \textit{paddingBottom}`
:math:`W_{out} = W_{in} + \textit{paddingLeft} + \textit{paddingRight}`
Examples::
>>> m = nn.ZeroPad2d(2)
>>> input = torch.randn(1, 1, 3, 3)
>>> input
(0 ,0 ,.,.) =
1.4418 -1.9812 -0.3815
-0.3828 -0.6833 -0.2376
0.1433 0.0211 0.4311
[torch.FloatTensor of size (1,1,3,3)]
>>> m(input)
(0 ,0 ,.,.) =
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 1.4418 -1.9812 -0.3815 0.0000 0.0000
0.0000 0.0000 -0.3828 -0.6833 -0.2376 0.0000 0.0000
0.0000 0.0000 0.1433 0.0211 0.4311 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
[torch.FloatTensor of size (1,1,7,7)]
>>> # using different paddings
>>> m = nn.ZeroPad2d((1, 1, 2, 0))
>>> m(input)
(0 ,0 ,.,.) =
0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 1.4418 -1.9812 -0.3815 0.0000
0.0000 -0.3828 -0.6833 -0.2376 0.0000
0.0000 0.1433 0.0211 0.4311 0.0000
[torch.FloatTensor of size (1,1,5,5)]
"""
def __init__(self, padding):
super(ZeroPad2d, self).__init__(padding, 0) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/padding.py | 0.784319 | 0.585131 | padding.py | pypi |
import warnings
import torch
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
class Threshold(Module):
r"""Thresholds each element of the input Tensor
Threshold is defined as:
.. math::
y =
\begin{cases}
x, &\text{ if } x > \text{threshold} \\
\text{value}, &\text{ otherwise }
\end{cases}
Args:
threshold: The value to threshold at
value: The value to replace with
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = nn.Threshold(0.1, 20)
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, threshold, value, inplace=False):
super(Threshold, self).__init__()
self.threshold = threshold
self.value = value
self.inplace = inplace
# TODO: check in THNN (if inplace == True, then assert value <= threshold)
def forward(self, input):
return F.threshold(input, self.threshold, self.value, self.inplace)
def extra_repr(self):
inplace_str = ', inplace' if self.inplace else ''
return 'threshold={}, value={}{}'.format(
self.threshold, self.value, inplace_str
)
class ReLU(Threshold):
r"""Applies the rectified linear unit function element-wise
:math:`\text{ReLU}(x)= \max(0, x)`
.. image:: scripts/activation_images/ReLU.png
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = nn.ReLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, inplace=False):
super(ReLU, self).__init__(0, 0, inplace)
def extra_repr(self):
inplace_str = 'inplace' if self.inplace else ''
return inplace_str
class RReLU(Module):
r"""Applies the randomized leaky rectified liner unit function element-wise
described in the paper
`Empirical Evaluation of Rectified Activations in Convolutional Network`_.
The function is defined as:
.. math::
\text{RReLU}(x) = \begin{cases}
x & \text{if } x \geq 0 \\
ax & \text{ otherwise }
\end{cases},
where :math:`a` is randomly sampled from uniform distribution
:math:`\mathcal{U}(\text{lower}, \text{upper})`.
See: https://arxiv.org/pdf/1505.00853.pdf
Args:
lower: lower bound of the uniform distribution. Default: :math:`\frac{1}{8}`
upper: upper bound of the uniform distribution. Default: :math:`\frac{1}{3}`
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = nn.RReLU(0.1, 0.3)
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Empirical Evaluation of Rectified Activations in Convolutional Network`:
https://arxiv.org/abs/1505.00853
"""
def __init__(self, lower=1. / 8, upper=1. / 3, inplace=False):
super(RReLU, self).__init__()
self.lower = lower
self.upper = upper
self.inplace = inplace
def forward(self, input):
return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)
def extra_repr(self):
inplace_str = ', inplace' if self.inplace else ''
return 'lower={}, upper={}{}'.format(self.lower, self.upper, inplace_str)
class Hardtanh(Module):
r"""Applies the HardTanh function element-wise
HardTanh is defined as:
.. math::
\text{HardTanh}(x) = \begin{cases}
1 & \text{ if } x > 1 \\
-1 & \text{ if } x < -1 \\
x & \text{ otherwise } \\
\end{cases}
The range of the linear region :math:`[-1, 1]` can be adjusted using
:attr:`min_val` and :attr:`max_val`.
.. image:: scripts/activation_images/Hardtanh.png
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
inplace: can optionally do the operation in-place. Default: ``False``
Keyword arguments :attr:`min_value` and :attr:`max_value`
have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = nn.Hardtanh(-2, 2)
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, min_val=-1, max_val=1, inplace=False, min_value=None, max_value=None):
super(Hardtanh, self).__init__()
if min_value is not None:
warnings.warn("keyword argument min_value is deprecated and renamed to min_val")
min_val = min_value
if max_value is not None:
warnings.warn("keyword argument max_value is deprecated and renamed to max_val")
max_val = max_value
self.min_val = min_val
self.max_val = max_val
self.inplace = inplace
assert self.max_val > self.min_val
def forward(self, input):
return F.hardtanh(input, self.min_val, self.max_val, self.inplace)
def extra_repr(self):
inplace_str = ', inplace' if self.inplace else ''
return 'min_val={}, max_val={}{}'.format(
self.min_val, self.max_val, inplace_str
)
class ReLU6(Hardtanh):
r"""Applies the element-wise function :math:`\text{ReLU6}(x) = \min(\max(0,x), 6)`
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.ReLU6()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, inplace=False):
super(ReLU6, self).__init__(0, 6, inplace)
def extra_repr(self):
inplace_str = 'inplace' if self.inplace else ''
return inplace_str
class Sigmoid(Module):
r"""Applies the element-wise function :math:`\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}`
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Sigmoid.png
Examples::
>>> m = nn.Sigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return torch.sigmoid(input)
class Tanh(Module):
r"""Applies element-wise,
:math:`\text{Tanh}(x) = \tanh(x) = \frac{e^x - e^{-x}} {e^x + e^{-x}}`
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Tanh.png
Examples::
>>> m = nn.Tanh()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return torch.tanh(input)
class ELU(Module):
r"""Applies element-wise,
:math:`\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))`
Args:
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/ELU.png
Examples::
>>> m = nn.ELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, alpha=1., inplace=False):
super(ELU, self).__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input):
return F.elu(input, self.alpha, self.inplace)
def extra_repr(self):
inplace_str = ', inplace' if self.inplace else ''
return 'alpha={}{}'.format(self.alpha, inplace_str)
class SELU(Module):
r"""Applies element-wise,
:math:`\text{SELU}(x) = \text{scale} * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))`,
with :math:`\alpha = 1.6732632423543772848170429916717` and
:math:`\text{scale} = 1.0507009873554804934193349852946`.
.. image:: scripts/activation_images/SELU.png
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
Args:
inplace (bool, optional): can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = nn.SELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
"""
def __init__(self, inplace=False):
super(SELU, self).__init__()
self.inplace = inplace
def forward(self, input):
return F.selu(input, self.inplace)
def extra_repr(self):
inplace_str = 'inplace' if self.inplace else ''
return inplace_str
class GLU(Module):
r"""Applies the gated linear unit function
:math:`{GLU}(a, b)= a \otimes \sigma(b)` where `a` is the first half of
the input vector and `b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
Shape:
- Input: :math:`(*, N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(*, N / 2, *)`
Examples::
>>> m = nn.GLU()
>>> input = torch.randn(4, 2)
>>> output = m(input)
"""
def __init__(self, dim=-1):
super(GLU, self).__init__()
self.dim = dim
def forward(self, input):
return F.glu(input, self.dim)
def extra_repr(self):
return 'dim={}'.format(self.dim)
class Hardshrink(Module):
r"""Applies the hard shrinkage function element-wise
Hardshrink is defined as:
.. math::
\text{HardShrink}(x) =
\begin{cases}
x, & \text{ if } x > \lambda \\
x, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` value for the Hardshrink formulation. Default: 0.5
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Hardshrink.png
Examples::
>>> m = nn.Hardshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, lambd=0.5):
super(Hardshrink, self).__init__()
self.lambd = lambd
def forward(self, input):
return F.hardshrink(input, self.lambd)
def extra_repr(self):
return '{}'.format(self.lambd)
class LeakyReLU(Module):
r"""Applies element-wise,
:math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative_slope} * \min(0, x)` or
.. math::
\text{LeakyRELU}(x) =
\begin{cases}
x, & \text{ if } x \geq 0 \\
\text{negative_slope} \times x, & \text{ otherwise }
\end{cases}
Args:
negative_slope: Controls the angle of the negative slope. Default: 1e-2
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/LeakyReLU.png
Examples::
>>> m = nn.LeakyReLU(0.1)
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, negative_slope=1e-2, inplace=False):
super(LeakyReLU, self).__init__()
self.negative_slope = negative_slope
self.inplace = inplace
def forward(self, input):
return F.leaky_relu(input, self.negative_slope, self.inplace)
def extra_repr(self):
inplace_str = ', inplace' if self.inplace else ''
return 'negative_slope={}{}'.format(self.negative_slope, inplace_str)
class LogSigmoid(Module):
r"""Applies element-wise :math:`\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)`
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/LogSigmoid.png
Examples::
>>> m = nn.LogSigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return F.logsigmoid(input)
class Softplus(Module):
r"""Applies element-wise :math:`\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))`
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
For numerical stability the implementation reverts to the linear function
for inputs above a certain value.
Args:
beta: the :math:`\beta` value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Softplus.png
Examples::
>>> m = nn.Softplus()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, beta=1, threshold=20):
super(Softplus, self).__init__()
self.beta = beta
self.threshold = threshold
def forward(self, input):
return F.softplus(input, self.beta, self.threshold)
def extra_repr(self):
return 'beta={}, threshold={}'.format(self.beta, self.threshold)
class Softshrink(Module):
r"""Applies the soft shrinkage function elementwise
SoftShrinkage function is defined as:
.. math::
\text{SoftShrinkage}(x) =
\begin{cases}
x - \lambda, & \text{ if } x > \lambda \\
x + \lambda, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` value for the Softshrink formulation. Default: 0.5
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Softshrink.png
Examples::
>>> m = nn.Softshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, lambd=0.5):
super(Softshrink, self).__init__()
self.lambd = lambd
def forward(self, input):
return F.softshrink(input, self.lambd)
def extra_repr(self):
return str(self.lambd)
class PReLU(Module):
r"""Applies element-wise the function
:math:`\text{PReLU}(x) = \max(0,x) + a * \min(0,x)` or
.. math::
\text{PReLU}(x) =
\begin{cases}
x, & \text{ if } x \geq 0 \\
ax, & \text{ otherwise }
\end{cases}
Here :math:`a` is a learnable parameter. When called without arguments, `nn.PReLU()` uses a single
parameter :math:`a` across all input channels. If called with `nn.PReLU(nChannels)`,
a separate :math:`a` is used for each input channel.
.. note::
weight decay should not be used when learning :math:`a` for good performance.
Args:
num_parameters: number of :math:`a` to learn. Default: 1
init: the initial value of :math:`a`. Default: 0.25
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/PReLU.png
Examples::
>>> m = nn.PReLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, num_parameters=1, init=0.25):
self.num_parameters = num_parameters
super(PReLU, self).__init__()
self.weight = Parameter(torch.Tensor(num_parameters).fill_(init))
def forward(self, input):
return F.prelu(input, self.weight)
def extra_repr(self):
return 'num_parameters={}'.format(self.num_parameters)
class Softsign(Module):
r"""Applies element-wise, the function :math:`\text{SoftSign}(x) = \frac{x}{ 1 + |x|}`
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Softsign.png
Examples::
>>> m = nn.Softsign()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return F.softsign(input)
class Tanhshrink(Module):
r"""Applies element-wise, :math:`\text{Tanhshrink}(x) = x - \text{Tanh}(x)`
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Tanhshrink.png
Examples::
>>> m = nn.Tanhshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return F.tanhshrink(input)
class Softmin(Module):
r"""Applies the Softmin function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range `(0, 1)` and sum to 1
:math:`\text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}`
Shape:
- Input: any shape
- Output: same as input
Arguments:
dim (int): A dimension along which Softmin will be computed (so every slice
along dim will sum to 1).
Returns:
a Tensor of the same dimension and shape as the input, with
values in the range [0, 1]
Examples::
>>> m = nn.Softmin()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
def __init__(self, dim=None):
super(Softmin, self).__init__()
self.dim = dim
def forward(self, input):
return F.softmin(input, self.dim, _stacklevel=5)
class Softmax(Module):
r"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range (0,1) and sum to 1
Softmax is defined as
:math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}`
Shape:
- Input: any shape
- Output: same as input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Arguments:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
.. note::
This module doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use `LogSoftmax` instead (it's faster and has better numerical properties).
Examples::
>>> m = nn.Softmax()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
def __init__(self, dim=None):
super(Softmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input):
return F.softmax(input, self.dim, _stacklevel=5)
class Softmax2d(Module):
r"""Applies SoftMax over features to each spatial location.
When given an image of ``Channels x Height x Width``, it will
apply `Softmax` to each location :math:`(Channels, h_i, w_j)`
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Examples::
>>> m = nn.Softmax2d()
>>> # you softmax over the 2nd dimension
>>> input = torch.randn(2, 3, 12, 13)
>>> output = m(input)
"""
def forward(self, input):
assert input.dim() == 4, 'Softmax2d requires a 4D tensor as input'
return F.softmax(input, 1, _stacklevel=5)
class LogSoftmax(Module):
r"""Applies the `Log(Softmax(x))` function to an n-dimensional input Tensor.
The LogSoftmax formulation can be simplified as
:math:`\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)`
Shape:
- Input: any shape
- Output: same as input
Arguments:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
Examples::
>>> m = nn.LogSoftmax()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
def __init__(self, dim=None):
super(LogSoftmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input):
return F.log_softmax(input, self.dim, _stacklevel=5) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/activation.py | 0.921759 | 0.674225 | activation.py | pypi |
from .module import Module
from .. import functional as F
class _DropoutNd(Module):
def __init__(self, p=0.5, inplace=False):
super(_DropoutNd, self).__init__()
if p < 0 or p > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(p))
self.p = p
self.inplace = inplace
def extra_repr(self):
inplace_str = ', inplace' if self.inplace else ''
return 'p={}{}'.format(self.p, inplace_str)
class Dropout(_DropoutNd):
r"""During training, randomly zeroes some of the elements of the input
tensor with probability :attr:`p` using samples from a Bernoulli
distribution. The elements to zero are randomized on every forward call.
This has proven to be an effective technique for regularization and
preventing the co-adaptation of neurons as described in the paper
`Improving neural networks by preventing co-adaptation of feature
detectors`_ .
Furthermore, the outputs are scaled by a factor of :math:`\frac{1}{1-p}` during
training. This means that during evaluation the module simply computes an
identity function.
Args:
p: probability of an element to be zeroed. Default: 0.5
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
Shape:
- Input: `Any`. Input can be of any shape
- Output: `Same`. Output is of the same shape as input
Examples::
>>> m = nn.Dropout(p=0.2)
>>> input = torch.randn(20, 16)
>>> output = m(input)
.. _Improving neural networks by preventing co-adaptation of feature
detectors: https://arxiv.org/abs/1207.0580
"""
def forward(self, input):
return F.dropout(input, self.p, self.training, self.inplace)
class Dropout2d(_DropoutNd):
r"""Randomly zeroes whole channels of the input tensor.
The channels to zero-out are randomized on every forward call.
Usually the input comes from :class:`nn.Conv2d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout2d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zero-ed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> m = nn.Dropout2d(p=0.2)
>>> input = torch.randn(20, 16, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
http://arxiv.org/abs/1411.4280
"""
def forward(self, input):
return F.dropout2d(input, self.p, self.training, self.inplace)
class Dropout3d(_DropoutNd):
r"""Randomly zeroes whole channels of the input tensor.
The channels to zero are randomized on every forward call.
Usually the input comes from :class:`nn.Conv3d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout3d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zeroed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> m = nn.Dropout3d(p=0.2)
>>> input = torch.randn(20, 16, 4, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
http://arxiv.org/abs/1411.4280
"""
def forward(self, input):
return F.dropout3d(input, self.p, self.training, self.inplace)
class AlphaDropout(Module):
r"""Applies Alpha Dropout over the input.
Alpha Dropout is a type of Dropout that maintains the self-normalizing
property.
For an input with zero mean and unit standard deviation, the output of
Alpha Dropout maintains the original mean and standard deviation of the
input.
Alpha Dropout goes hand-in-hand with SELU activation function, which ensures
that the outputs have zero mean and unit standard deviation.
During training, it randomly masks some of the elements of the input
tensor with probability *p* using samples from a bernoulli distribution.
The elements to masked are randomized on every forward call, and scaled
and shifted to maintain zero mean and unit standard deviation.
During evaluation the module simply computes an identity function.
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
Args:
p (float): probability of an element to be dropped. Default: 0.5
Shape:
- Input: `Any`. Input can be of any shape
- Output: `Same`. Output is of the same shape as input
Examples::
>>> m = nn.AlphaDropout(p=0.2)
>>> input = torch.randn(20, 16)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
"""
def __init__(self, p=0.5):
super(AlphaDropout, self).__init__()
if p < 0 or p > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(p))
self.p = p
def forward(self, input):
return F.alpha_dropout(input, self.p, self.training)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ')' | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/dropout.py | 0.940817 | 0.775732 | dropout.py | pypi |
import torch
import numbers
from torch.nn.parameter import Parameter
from .module import Module
from .batchnorm import _BatchNorm
from .. import functional as F
class LocalResponseNorm(Module):
r"""Applies local response normalization over an input signal composed
of several input planes, where channels occupy the second dimension.
Applies normalization across channels.
.. math::
b_{c} = a_{c}\left(k + \frac{\alpha}{n}
\sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta}
Args:
size: amount of neighbouring channels used for normalization
alpha: multiplicative factor. Default: 0.0001
beta: exponent. Default: 0.75
k: additive factor. Default: 1
Shape:
- Input: :math:`(N, C, ...)`
- Output: :math:`(N, C, ...)` (same shape as input)
Examples::
>>> lrn = nn.LocalResponseNorm(2)
>>> signal_2d = torch.randn(32, 5, 24, 24)
>>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7)
>>> output_2d = lrn(signal_2d)
>>> output_4d = lrn(signal_4d)
"""
def __init__(self, size, alpha=1e-4, beta=0.75, k=1):
super(LocalResponseNorm, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input):
return F.local_response_norm(input, self.size, self.alpha, self.beta,
self.k)
def extra_repr(self):
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
class CrossMapLRN2d(Module):
def __init__(self, size, alpha=1e-4, beta=0.75, k=1):
super(CrossMapLRN2d, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input):
return self._backend.CrossMapLRN2d(self.size, self.alpha, self.beta,
self.k)(input)
def extra_repr(self):
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
class LayerNorm(Module):
r"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
The mean and standard-deviation are calculated separately over the last
certain number dimensions with shape specified by :attr:`normalized_shape`.
:math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1]
\times \ldots \times \text{normalized_shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension with that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = nn.LayerNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = nn.LayerNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
self.weight.data.fill_(1)
self.bias.data.zero_()
def forward(self, input):
return F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
class GroupNorm(Module):
r"""Applies Group Normalization over a mini-batch of inputs as described in
the paper `Group Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
The input channels are separated into :attr:`num_groups` groups, each containing
``num_channels / num_groups`` channels. The mean and standard-deviation are calculated
separately over the each group. :math:`\gamma` and :math:`\beta` are learnable
per-channel affine transform parameter vectorss of size :attr:`num_channels` if
:attr:`affine` is ``True``.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
num_groups (int): number of groups to separate the channels into
num_channels (int): number of channels expected in input
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, num\_channels, *)`
- Output: :math:`(N, num\_channels, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 6, 10, 10)
>>> # Separate 6 channels into 3 groups
>>> m = nn.GroupNorm(3, 6)
>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
>>> m = nn.GroupNorm(6, 6)
>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
>>> m = nn.GroupNorm(1, 6)
>>> # Activating the module
>>> output = m(input)
.. _`Group Normalization`: https://arxiv.org/abs/1803.08494
"""
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super(GroupNorm, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.Tensor(num_channels))
self.bias = Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
self.weight.data.fill_(1)
self.bias.data.zero_()
def forward(self, input):
return F.group_norm(
input, self.num_groups, self.weight, self.bias, self.eps)
def extra_repr(self):
return '{num_groups}, {num_channels}, eps={eps}, ' \
'affine={affine}'.format(**self.__dict__)
# TODO: ContrastiveNorm2d
# TODO: DivisiveNorm2d
# TODO: SubtractiveNorm2d | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/normalization.py | 0.963386 | 0.680112 | normalization.py | pypi |
import torch
from .module import Module
from .. import functional as F
class PairwiseDistance(Module):
r"""
Computes the batchwise pairwise distance between vectors :math:`v_1`,:math:`v_2` using the p-norm:
.. math ::
\Vert x \Vert _p := \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}
Args:
p (real): the norm degree. Default: 2
eps (float, optional): Small value to avoid division by zero.
Default: 1e-6
keepdim (bool, optional): Determines whether or not to keep the batch dimension.
Default: False
Shape:
- Input1: :math:`(N, D)` where `D = vector dimension`
- Input2: :math:`(N, D)`, same shape as the Input1
- Output: :math:`(N)`. If :attr:`keepdim` is ``False``, then :math:`(N, 1)`.
Examples::
>>> pdist = nn.PairwiseDistance(p=2)
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> output = pdist(input1, input2)
"""
def __init__(self, p=2, eps=1e-6, keepdim=False):
super(PairwiseDistance, self).__init__()
self.norm = p
self.eps = eps
self.keepdim = keepdim
def forward(self, x1, x2):
return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
class CosineSimilarity(Module):
r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along dim.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}
Args:
dim (int, optional): Dimension where cosine similarity is computed. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
Shape:
- Input1: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`
- Input2: :math:`(\ast_1, D, \ast_2)`, same shape as the Input1
- Output: :math:`(\ast_1, \ast_2)`
Examples::
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> cos = nn.CosineSimilarity(dim=1, eps=1e-6)
>>> output = cos(input1, input2)
"""
def __init__(self, dim=1, eps=1e-8):
super(CosineSimilarity, self).__init__()
self.dim = dim
self.eps = eps
def forward(self, x1, x2):
return F.cosine_similarity(x1, x2, self.dim, self.eps) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/distance.py | 0.958109 | 0.761538 | distance.py | pypi |
from numbers import Integral
import warnings
from .module import Module
from .. import functional as F
class Upsample(Module):
r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
The input data is assumed to be of the form
`minibatch x channels x [optional depth] x [optional height] x width`.
Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.
The algorithms available for upsampling are nearest neighbor and linear, bilinear and trilinear
for 3D, 4D and 5D input Tensor, respectively.
One can either give a :attr:`scale_factor` or the target output :attr:`size` to
calculate the output size. (You cannot give both, as it is ambiguous)
Args:
size (tuple, optional): a tuple of ints `([optional D_out], [optional H_out], W_out)` output sizes
scale_factor (int / tuple of ints, optional): the multiplier for the image height / width / depth
mode (string, optional): the upsampling algorithm: one of `nearest`, `linear`, `bilinear` and `trilinear`.
Default: `nearest`
align_corners (bool, optional): if True, the corner pixels of the input
and output tensors are aligned, and thus preserving the values at
those pixels. This only has effect when :attr:`mode` is `linear`,
`bilinear`, or `trilinear`. Default: False
Shape:
- Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \left\lfloor D_{in} \times \text{scale_factor} \right\rfloor \text{ or size}[-3]
H_{out} = \left\lfloor H_{in} \times \text{scale_factor} \right\rfloor \text{ or size}[-2]
W_{out} = \left\lfloor W_{in} \times \text{scale_factor} \right\rfloor \text{ or size}[-1]
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`linear`, `bilinear`, and `trilinear`) don't proportionally align the
output and input pixels, and thus the output values can depend on the
input size. This was the default behavior for these modes up to version
0.3.1. Since then, the default behavior is ``align_corners = False``.
See below for concrete examples on how this affects the outputs.
Examples::
>>> input = torch.arange(1, 5).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='nearest')
>>> m(input)
tensor([[[[ 1., 1., 2., 2.],
[ 1., 1., 2., 2.],
[ 3., 3., 4., 4.],
[ 3., 3., 4., 4.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
>>> m(input)
tensor([[[[ 1.0000, 1.2500, 1.7500, 2.0000],
[ 1.5000, 1.7500, 2.2500, 2.5000],
[ 2.5000, 2.7500, 3.2500, 3.5000],
[ 3.0000, 3.2500, 3.7500, 4.0000]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
>>> m(input)
tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000],
[ 1.6667, 2.0000, 2.3333, 2.6667],
[ 2.3333, 2.6667, 3.0000, 3.3333],
[ 3.0000, 3.3333, 3.6667, 4.0000]]]])
>>> # Try scaling the same data in a larger tensor
>>>
>>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3)
>>> input_3x3[:, :, :2, :2].copy_(input)
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> input_3x3
tensor([[[[ 1., 2., 0.],
[ 3., 4., 0.],
[ 0., 0., 0.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
>>> # Notice that values in top left corner are the same with the small input (except at boundary)
>>> m(input_3x3)
tensor([[[[ 1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000],
[ 1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000],
[ 2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000],
[ 2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000],
[ 0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
>>> # Notice that values in top left corner are now changed
>>> m(input_3x3)
tensor([[[[ 1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
[ 1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
[ 2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
[ 2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
[ 1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
"""
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None):
super(Upsample, self).__init__()
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, input):
return F.upsample(input, self.size, self.scale_factor, self.mode, self.align_corners)
def extra_repr(self):
if self.scale_factor is not None:
info = 'scale_factor=' + str(self.scale_factor)
else:
info = 'size=' + str(self.size)
info += ', mode=' + self.mode
return info
class UpsamplingNearest2d(Upsample):
r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When `size` is given, it is the output size of the image `(h, w)`.
Args:
size (tuple, optional): a tuple of ints `(H_out, W_out)` output sizes
scale_factor (int, optional): the multiplier for the image height or width
.. warning::
This class is deprecated in favor of :class:`~nn.Upsample`.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale_factor} \right\rfloor
W_{out} = \left\lfloor W_{in} \times \text{scale_factor} \right\rfloor
Examples::
>>> input = torch.arange(1, 5).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.UpsamplingNearest2d(scale_factor=2)
>>> m(input)
tensor([[[[ 1., 1., 2., 2.],
[ 1., 1., 2., 2.],
[ 3., 3., 4., 4.],
[ 3., 3., 4., 4.]]]])
"""
def __init__(self, size=None, scale_factor=None):
super(UpsamplingNearest2d, self).__init__(size, scale_factor, mode='nearest')
def forward(self, input):
warnings.warn("nn.UpsamplingNearest2d is deprecated. Use nn.Upsample instead.")
return super(UpsamplingNearest2d, self).forward(input)
class UpsamplingBilinear2d(Upsample):
r"""Applies a 2D bilinear upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When `size` is given, it is the output size of the image `(h, w)`.
Args:
size (tuple, optional): a tuple of ints `(H_out, W_out)` output sizes
scale_factor (int, optional): the multiplier for the image height or width
.. warning::
This class is deprecated in favor of :class:`~nn.Upsample`. It is
equivalent to ``nn.Upsample(..., mode='bilinear', align_corners=True)``.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale_factor} \right\rfloor
W_{out} = \left\lfloor W_{in} \times \text{scale_factor} \right\rfloor
Examples::
>>> input = torch.arange(1, 5).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.UpsamplingBilinear2d(scale_factor=2)
>>> m(input)
tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000],
[ 1.6667, 2.0000, 2.3333, 2.6667],
[ 2.3333, 2.6667, 3.0000, 3.3333],
[ 3.0000, 3.3333, 3.6667, 4.0000]]]])
"""
def __init__(self, size=None, scale_factor=None):
super(UpsamplingBilinear2d, self).__init__(size, scale_factor, mode='bilinear', align_corners=True)
def forward(self, input):
warnings.warn("nn.UpsamplingBilinear2d is deprecated. Use nn.Upsample instead.")
return super(UpsamplingBilinear2d, self).forward(input) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/upsampling.py | 0.899348 | 0.887253 | upsampling.py | pypi |
import torch
from .module import Module
from .utils import _single, _pair, _triple
from .. import functional as F
class _MaxPoolNd(Module):
def __init__(self, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False):
super(_MaxPoolNd, self).__init__()
self.kernel_size = kernel_size
self.stride = stride or kernel_size
self.padding = padding
self.dilation = dilation
self.return_indices = return_indices
self.ceil_mode = ceil_mode
def extra_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
class MaxPool1d(_MaxPoolNd):
r"""Applies a 1D max pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
and output :math:`(N, C, L_{out})` can be precisely described as:
.. math::
\begin{equation*}
\text{out}(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel_size}-1}
\text{input}(N_i, C_j, \text{stride} * k + m)
\end{equation*}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful when Unpooling later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})` where
.. math::
L_{out} = \left\lfloor \frac{L_{in} + 2 * \text{padding} - \text{dilation}
* (\text{kernel_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
Examples::
>>> # pool of size=3, stride=2
>>> m = nn.MaxPool1d(3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def forward(self, input):
return F.max_pool1d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
def extra_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
class MaxPool2d(_MaxPoolNd):
r"""Applies a 2D max pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
\begin{equation*}
\text{out}(N_i, C_j, h, w) = \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1}
\text{input}(N_i, C_j, \text{stride}[0] * h + m, \text{stride}[1] * w + n)
\end{equation*}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful when Unpooling later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding}[0] - \text{dilation}[0]
* (\text{kernel_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding}[1] - \text{dilation}[1]
* (\text{kernel_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.MaxPool2d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def forward(self, input):
return F.max_pool2d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
class MaxPool3d(_MaxPoolNd):
r"""Applies a 3D max pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
\begin{align*}
\text{out}(N_i, C_j, d, h, w) &= \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1}
\text{input}(N_i, C_j, \text{stride}[0] * k + d,\\ &\text{stride}[1] * h + m, \text{stride}[2] * w + n)
\end{align*}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on all three sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful when Unpooling later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 * \text{padding}[0] - \text{dilation}[0] *
(\text{kernel_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding}[1] - \text{dilation}[1] *
(\text{kernel_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding}[2] - \text{dilation}[2] *
(\text{kernel_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.MaxPool3d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
>>> input = torch.randn(20, 16, 50,44, 31)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def forward(self, input):
return F.max_pool3d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
class _MaxUnpoolNd(Module):
def extra_repr(self):
return 'kernel_size={}, stride={}, padding={}'.format(
self.kernel_size, self.stride, self.padding
)
class MaxUnpool1d(_MaxUnpoolNd):
r"""Computes a partial inverse of :class:`MaxPool1d`.
:class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost.
:class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d`
including the indices of the maximal values and computes a partial inverse
in which all non-maximal values are set to zero.
.. note:: `MaxPool1d` can map several input sizes to the same output sizes.
Hence, the inversion process can get ambiguous.
To accommodate this, you can provide the needed output size
as an additional argument `output_size` in the forward call.
See the Inputs and Example below.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
It is set to ``kernel_size`` by default.
padding (int or tuple): Padding that was added to the input
Inputs:
- `input`: the input Tensor to invert
- `indices`: the indices given out by `MaxPool1d`
- `output_size` (optional) : a `torch.Size` that specifies the targeted output size
Shape:
- Input: :math:`(N, C, H_{in})`
- Output: :math:`(N, C, H_{out})` where
.. math::
H_{out} = (H_{in} - 1) * \text{stride}[0] - 2 * \text{padding}[0] + \text{kernel_size}[0]
or as given by :attr:`output_size` in the call operator
Example::
>>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool1d(2, stride=2)
>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
>>> output, indices = pool(input)
>>> unpool(output, indices)
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
>>> # Example showcasing the use of output_size
>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
>>> output, indices = pool(input)
>>> unpool(output, indices, output_size=input.size())
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]])
>>> unpool(output, indices)
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
"""
def __init__(self, kernel_size, stride=None, padding=0):
super(MaxUnpool1d, self).__init__()
self.kernel_size = _single(kernel_size)
self.stride = _single(stride or kernel_size)
self.padding = _single(padding)
def forward(self, input, indices, output_size=None):
return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class MaxUnpool2d(_MaxUnpoolNd):
r"""Computes a partial inverse of :class:`MaxPool2d`.
:class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost.
:class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d`
including the indices of the maximal values and computes a partial inverse
in which all non-maximal values are set to zero.
.. note:: `MaxPool2d` can map several input sizes to the same output sizes.
Hence, the inversion process can get ambiguous.
To accommodate this, you can provide the needed output size
as an additional argument `output_size` in the forward call.
See the Inputs and Example below.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
It is set to ``kernel_size`` by default.
padding (int or tuple): Padding that was added to the input
Inputs:
- `input`: the input Tensor to invert
- `indices`: the indices given out by `MaxPool2d`
- `output_size` (optional) : a `torch.Size` that specifies the targeted output size
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = (H_{in} - 1) * \text{stride}[0] - 2 * \text{padding}[0] + \text{kernel_size}[0]
W_{out} = (W_{in} - 1) * \text{stride}[1] - 2 * \text{padding}[1] + \text{kernel_size}[1]
or as given by :attr:`output_size` in the call operator
Example::
>>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool2d(2, stride=2)
>>> input = torch.tensor([[[[ 1., 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]]]])
>>> output, indices = pool(input)
>>> unpool(output, indices)
tensor([[[[ 0., 0., 0., 0.],
[ 0., 6., 0., 8.],
[ 0., 0., 0., 0.],
[ 0., 14., 0., 16.]]]])
>>> # specify a different output size than input size
>>> unpool(output, indices, output_size=torch.Size([1, 1, 5, 5]))
tensor([[[[ 0., 0., 0., 0., 0.],
[ 6., 0., 8., 0., 0.],
[ 0., 0., 0., 14., 0.],
[ 16., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]]]])
"""
def __init__(self, kernel_size, stride=None, padding=0):
super(MaxUnpool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride or kernel_size)
self.padding = _pair(padding)
def forward(self, input, indices, output_size=None):
return F.max_unpool2d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class MaxUnpool3d(_MaxUnpoolNd):
r"""Computes a partial inverse of :class:`MaxPool3d`.
:class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost.
:class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d`
including the indices of the maximal values and computes a partial inverse
in which all non-maximal values are set to zero.
.. note:: `MaxPool3d` can map several input sizes to the same output sizes.
Hence, the inversion process can get ambiguous.
To accommodate this, you can provide the needed output size
as an additional argument `output_size` in the forward call.
See the Inputs section below.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
It is set to ``kernel_size`` by default.
padding (int or tuple): Padding that was added to the input
Inputs:
- `input`: the input Tensor to invert
- `indices`: the indices given out by `MaxPool3d`
- `output_size` (optional) : a `torch.Size` that specifies the targeted output size
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
.. math::
D_{out} = (D_{in} - 1) * \text{stride}[0] - 2 * \text{padding}[0] + \text{kernel_size}[0]
H_{out} = (H_{in} - 1) * \text{stride}[1] - 2 * \text{padding}[1] + \text{kernel_size}[1]
W_{out} = (W_{in} - 1) * \text{stride}[2] - 2 * \text{padding}[2] + \text{kernel_size}[2]
or as given by :attr:`output_size` in the call operator
Example::
>>> # pool of square window of size=3, stride=2
>>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool3d(3, stride=2)
>>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
>>> unpooled_output = unpool(output, indices)
>>> unpooled_output.size()
torch.Size([20, 16, 51, 33, 15])
"""
def __init__(self, kernel_size, stride=None, padding=0):
super(MaxUnpool3d, self).__init__()
self.kernel_size = _triple(kernel_size)
self.stride = _triple(stride or kernel_size)
self.padding = _triple(padding)
def forward(self, input, indices, output_size=None):
return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class _AvgPoolNd(Module):
def extra_repr(self):
return 'kernel_size={}, stride={}, padding={}'.format(
self.kernel_size, self.stride, self.padding
)
class AvgPool1d(_AvgPoolNd):
r"""Applies a 1D average pooling over an input signal composed of several
input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`,
output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k`
can be precisely described as:
.. math::
\begin{equation*}
\text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k}
\text{input}(N_i, C_j, \text{stride} * l + m)
\end{equation*}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be
an ``int`` or a one-element tuple.
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})` where
.. math::
L_{out} = \left\lfloor \frac{L_{in} +
2 * \text{padding} - \text{kernel_size}}{\text{stride}} + 1\right\rfloor
Examples::
>>> # pool with window of size=3, stride=2
>>> m = nn.AvgPool1d(3, stride=2)
>>> m(torch.tensor([[[1.,2,3,4,5,6,7]]]))
tensor([[[ 2., 4., 6.]]])
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True):
super(AvgPool1d, self).__init__()
self.kernel_size = _single(kernel_size)
self.stride = _single(stride if stride is not None else kernel_size)
self.padding = _single(padding)
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
def forward(self, input):
return F.avg_pool1d(
input, self.kernel_size, self.stride, self.padding, self.ceil_mode,
self.count_include_pad)
class AvgPool2d(_AvgPoolNd):
r"""Applies a 2D average pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
\begin{equation*}
\text{out}(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
\text{input}(N_i, C_j, \text{stride}[0] * h + m, \text{stride}[1] * w + n)
\end{equation*}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding}[0] -
\text{kernel_size}[0]}{\text{stride}[0]} + 1\right\rfloor
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding}[1] -
\text{kernel_size}[1]}{\text{stride}[1]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.AvgPool2d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True):
super(AvgPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride or kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
def forward(self, input):
return F.avg_pool2d(input, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad)
class AvgPool3d(_AvgPoolNd):
r"""Applies a 3D average pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
\begin{equation*}
\text{out}(N_i, C_j, d, h, w) = \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
\frac{\text{input}(N_i, C_j, \text{stride}[0] * d + k, \text{stride}[1] * h + m,
\text{stride}[2] * w + n)}
{kD * kH * kW}
\end{equation*}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides
for :attr:`padding` number of points.
The parameters :attr:`kernel_size`, :attr:`stride` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on all three sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 * \text{padding}[0] -
\text{kernel_size}[0]}{\text{stride}[0]} + 1\right\rfloor
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding}[1] -
\text{kernel_size}[1]}{\text{stride}[1]} + 1\right\rfloor
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding}[2] -
\text{kernel_size}[2]}{\text{stride}[2]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.AvgPool3d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
>>> input = torch.randn(20, 16, 50,44, 31)
>>> output = m(input)
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True):
super(AvgPool3d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride or kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
def forward(self, input):
return F.avg_pool3d(input, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad)
def __setstate__(self, d):
super(AvgPool3d, self).__setstate__(d)
self.__dict__.setdefault('padding', 0)
self.__dict__.setdefault('ceil_mode', False)
self.__dict__.setdefault('count_include_pad', True)
class FractionalMaxPool2d(Module):
r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kHxkW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number k (for a square kernel of k x k) or a tuple `(kh x kw)`
output_size: the target output size of the image of the form `oH x oW`.
Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
Examples:
>>> # pool of square window of size=3, and target output size 13x12
>>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
>>> # pool of square window and target output size being half of input image size
>>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
def __init__(self, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None):
super(FractionalMaxPool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.return_indices = return_indices
self.register_buffer('_random_samples', _random_samples)
self.output_size = _pair(output_size) if output_size is not None else None
self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
if output_size is None and output_ratio is None:
raise ValueError("FractionalMaxPool2d requires specifying either "
"an output size, or a pooling ratio")
if output_size is not None and output_ratio is not None:
raise ValueError("only one of output_size and output_ratio may be specified")
if self.output_ratio is not None:
if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
raise ValueError("output_ratio must be between 0 and 1 (got {})"
.format(output_ratio))
def forward(self, input):
samples = None if self._random_samples is None else self._random_samples
return F.fractional_max_pool2d(
input, self.kernel_size, self.output_size, self.output_ratio,
self.return_indices,
_random_samples=samples)
class _LPPoolNd(Module):
def __init__(self, norm_type, kernel_size, stride=None, ceil_mode=False):
super(_LPPoolNd, self).__init__()
self.norm_type = norm_type
self.kernel_size = kernel_size
self.stride = stride
self.ceil_mode = ceil_mode
def extra_repr(self):
return 'norm_type={norm_type}, kernel_size{kernel_size}, stride={stride}, ' \
'ceil_mode={ceil_mode}'.format(**self.__dict__)
class LPPool1d(_LPPoolNd):
r"""Applies a 1D power-average pooling over an input signal composed of several input
planes.
On each window, the function computed is:
.. math::
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
.. note:: If the sum to the power of `p` is zero, the gradient of this function is
not defined. This implementation will set the gradient to zero in this case.
Args:
kernel_size: a single int, the size of the window
stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})` where
.. math::
L_{out} = \left\lfloor\frac{L_{in} +
2 * \text{padding} - \text{kernel_size}}{\text{stride}} + 1\right\rfloor
Examples::
>>> # power-2 pool of window of length 3, with stride 2.
>>> m = nn.LPPool1d(2, 3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
"""
def forward(self, input):
return F.lp_pool1d(input, self.norm_type, self.kernel_size,
self.stride, self.ceil_mode)
class LPPool2d(_LPPoolNd):
r"""Applies a 2D power-average pooling over an input signal composed of several input
planes.
On each window, the function computed is:
.. math::
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
- At p = :math:`\infty`, one gets Max Pooling
- At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
The parameters :attr:`kernel_size`, :attr:`stride` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
.. note:: If the sum to the power of `p` is zero, the gradient of this function is
not defined. This implementation will set the gradient to zero in this case.
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding}[0] - \text{dilation}[0] *
(\text{kernel_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding}[1] - \text{dilation}[1] *
(\text{kernel_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
Examples::
>>> # power-2 pool of square window of size=3, stride=2
>>> m = nn.LPPool2d(2, 3, stride=2)
>>> # pool of non-square window of power 1.2
>>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
"""
def forward(self, input):
return F.lp_pool2d(input, self.norm_type, self.kernel_size,
self.stride, self.ceil_mode)
class _AdaptiveMaxPoolNd(Module):
def __init__(self, output_size, return_indices=False):
super(_AdaptiveMaxPoolNd, self).__init__()
self.output_size = output_size
self.return_indices = return_indices
def extra_repr(self):
return 'output_size={}'.format(self.output_size)
class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
The output size is H, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size H
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool1d. Default: ``False``
Examples:
>>> # target output size of 5
>>> m = nn.AdaptiveMaxPool1d(5)
>>> input = torch.randn(1, 64, 8)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H.
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool2d. Default: ``False``
Examples:
>>> # target output size of 5x7
>>> m = nn.AdaptiveMaxPool2d((5,7))
>>> input = torch.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveMaxPool2d(7)
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveMaxPool2d((None, 7))
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes.
The output is of size D x H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form D x H x W.
Can be a tuple (D, H, W) or a single D for a cube D x D x D.
D, H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool3d. Default: ``False``
Examples:
>>> # target output size of 5x7x9
>>> m = nn.AdaptiveMaxPool3d((5,7,9))
>>> input = torch.randn(1, 64, 8, 9, 10)
>>> output = m(input)
>>> # target output size of 7x7x7 (cube)
>>> m = nn.AdaptiveMaxPool3d(7)
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
>>> # target output size of 7x9x8
>>> m = nn.AdaptiveMaxPool3d((7, None, None))
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
class _AdaptiveAvgPoolNd(Module):
def __init__(self, output_size):
super(_AdaptiveAvgPoolNd, self).__init__()
self.output_size = output_size
def extra_repr(self):
return 'output_size={}'.format(self.output_size)
class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
The output size is H, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size H
Examples:
>>> # target output size of 5
>>> m = nn.AdaptiveAvgPool1d(5)
>>> input = torch.randn(1, 64, 8)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_avg_pool1d(input, self.output_size)
class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
Examples:
>>> # target output size of 5x7
>>> m = nn.AdaptiveAvgPool2d((5,7))
>>> input = torch.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveAvgPool2d(7)
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveMaxPool2d((None, 7))
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_avg_pool2d(input, self.output_size)
class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes.
The output is of size D x H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the form D x H x W.
Can be a tuple (D, H, W) or a single number D for a cube D x D x D
D, H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
Examples:
>>> # target output size of 5x7x9
>>> m = nn.AdaptiveAvgPool3d((5,7,9))
>>> input = torch.randn(1, 64, 8, 9, 10)
>>> output = m(input)
>>> # target output size of 7x7x7 (cube)
>>> m = nn.AdaptiveAvgPool3d(7)
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
>>> # target output size of 7x9x8
>>> m = nn.AdaptiveMaxPool3d((7, None, None))
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_avg_pool3d(input, self.output_size) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/pooling.py | 0.953416 | 0.54692 | pooling.py | pypi |
import torch
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
class Embedding(Module):
r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding
word embeddings.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
padding_idx (int, optional): If given, pads the output with the embedding vector at :attr:`padding_idx`
(initialized to zeros) whenever it encounters the index.
max_norm (float, optional): If given, will renormalize the embeddings to always have a norm lesser than this
norm_type (float, optional): The p of the p-norm to compute for the max_norm option
scale_grad_by_freq (bool, optional): if given, this will scale gradients by the frequency of
the words in the mini-batch.
sparse (bool, optional): if ``True``, gradient w.r.t. weight matrix will be a sparse tensor. See Notes for
more details regarding sparse gradients.
Attributes:
weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
Shape:
- Input: LongTensor of arbitrary shape containing the indices to extract
- Output: `(*, embedding_dim)`, where `*` is the input shape
.. note::
Keep in mind that only a limited number of optimizers support
sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
:class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
.. note::
With :attr:`padding_idx` set, the embedding vector at
:attr:`padding_idx` is initialized to all zeros. However, note that this
vector can be modified afterwards, e.g., using a customized
initialization method, and thus changing the vector used to pad the
output. The gradient for this vector from :class:`~torch.nn.Embedding`
is always zero.
Examples::
>>> # an Embedding module containing 10 tensors of size 3
>>> embedding = nn.Embedding(10, 3)
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.LongTensor([[1,2,4,5],[4,3,2,9]])
>>> embedding(input)
tensor([[[-0.0251, -1.6902, 0.7172],
[-0.6431, 0.0748, 0.6969],
[ 1.4970, 1.3448, -0.9685],
[-0.3677, -2.7265, -0.1685]],
[[ 1.4970, 1.3448, -0.9685],
[ 0.4362, -0.4004, 0.9400],
[-0.6431, 0.0748, 0.6969],
[ 0.9124, -2.3616, 1.1151]]])
>>> # example with padding_idx
>>> embedding = nn.Embedding(10, 3, padding_idx=0)
>>> input = torch.LongTensor([[0,2,0,5]])
>>> embedding(input)
tensor([[[ 0.0000, 0.0000, 0.0000],
[ 0.1535, -2.0309, 0.9315],
[ 0.0000, 0.0000, 0.0000],
[-0.1655, 0.9897, 0.0635]]])
"""
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
max_norm=None, norm_type=2, scale_grad_by_freq=False,
sparse=False, _weight=None):
super(Embedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if _weight is None:
self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = Parameter(_weight)
self.sparse = sparse
def reset_parameters(self):
self.weight.data.normal_(0, 1)
if self.padding_idx is not None:
self.weight.data[self.padding_idx].fill_(0)
def forward(self, input):
return F.embedding(
input, self.weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.padding_idx is not None:
s += ', padding_idx={padding_idx}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
if self.sparse is not False:
s += ', sparse=True'
return s.format(**self.__dict__)
@classmethod
def from_pretrained(cls, embeddings, freeze=True, sparse=False):
r"""Creates Embedding instance from given 2-dimensional FloatTensor.
Args:
embeddings (Tensor): FloatTensor containing weights for the Embedding.
First dimension is being passed to Embedding as 'num_embeddings', second as 'embedding_dim'.
freeze (boolean, optional): If ``True``, the tensor does not get updated in the learning process.
Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True``
sparse (bool, optional): if ``True``, gradient w.r.t. weight matrix will be a sparse tensor.
See Notes for more details regarding sparse gradients.
Examples::
>>> # FloatTensor containing pretrained weights
>>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
>>> embedding = nn.Embedding.from_pretrained(weight)
>>> # Get embeddings for index 1
>>> input = torch.LongTensor([1])
>>> embedding(input)
tensor([[ 4.0000, 5.1000, 6.3000]])
"""
assert embeddings.dim() == 2, \
'Embeddings parameter is expected to be 2-dimensional'
rows, cols = embeddings.shape
embedding = cls(
num_embeddings=rows,
embedding_dim=cols,
_weight=embeddings,
sparse=sparse,
)
embedding.weight.requires_grad = not freeze
return embedding
class EmbeddingBag(Module):
r"""Computes sums or means of 'bags' of embeddings, without instantiating the
intermediate embeddings.
For bags of constant length,
* nn.EmbeddingBag with `mode=sum` is equivalent to nn.Embedding followed by `torch.sum(dim=1)`
* with `mode=mean` is equivalent to nn.Embedding followed by `torch.mean(dim=1)`
* with `mode=max` is equivalent to nn.Embedding followed by `torch.max(dim=1)`
However, nn.EmbeddingBag is much more time and memory efficient than using a chain of these
operations.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
max_norm (float, optional): If given, will renormalize the embeddings to always have a norm lesser than this
norm_type (float, optional): The p of the p-norm to compute for the max_norm option
scale_grad_by_freq (bool, optional): if given, this will scale gradients by the frequency of
the words in the dictionary. Note: this option is not supported when
using max mode.
mode (string, optional): 'sum' | 'mean' | 'max'. Specifies the way to reduce the bag. Default: 'mean'
sparse (bool, optional): if ``True``, gradient w.r.t. weight matrix will be a sparse tensor. See Notes for
more details regarding sparse gradients. Note: this option is not supported when
using max mode.
Attributes:
weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
Inputs: input, offsets
- **input** (``N`` or ``B x N``): LongTensor containing the indices of the embeddings
to extract. When `input` is 1D Tensor of shape `N`,
an `offsets` Tensor is given, that contains the
starting position of each new sequence in the
mini-batch.
- **offsets** (``B`` or ``None``): LongTensor containing the starting positions of
each sample in a mini-batch of variable length
sequences. If `input` is 2D (``B x N``), then offsets
does not need to be given, as the `input` is
treated as a mini-batch of fixed length sequences
of length `N` each.
Shape:
- Input: LongTensor `N`, N = number of embeddings to extract
(or) LongTensor ``B x N``, B = number of sequences in mini-batch,
N = number of embeddings per sequence
- Offsets: LongTensor `B`, B = number of bags. The values are the
offsets in `input` for each bag, i.e. the cumsum of lengths.
Offsets is not given if Input is 2D ``B x N`` Tensor,
the input is considered to be of fixed-length sequences
- Output: `(B, embedding_dim)`
Examples::
>>> # an Embedding module containing 10 tensors of size 3
>>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.LongTensor([1,2,4,5,4,3,2,9])
>>> offsets = torch.LongTensor([0,4])
>>> embedding_sum(input, offsets)
tensor([[-0.8861, -5.4350, -0.0523],
[ 1.1306, -2.5798, -1.0044]])
"""
def __init__(self, num_embeddings, embedding_dim,
max_norm=None, norm_type=2, scale_grad_by_freq=False,
mode='mean', sparse=False):
super(EmbeddingBag, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim))
self.mode = mode
self.sparse = sparse
self.reset_parameters()
def reset_parameters(self):
self.weight.data.normal_(0, 1)
def forward(self, input, offsets=None):
return F.embedding_bag(self.weight, input, offsets,
self.max_norm, self.norm_type,
self.scale_grad_by_freq, self.mode, self.sparse)
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
s += ', mode={mode}'
return s.format(**self.__dict__)
# TODO: SparseLinear | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/sparse.py | 0.964187 | 0.76819 | sparse.py | pypi |
import warnings
from collections import OrderedDict, Iterable
from itertools import islice
import operator
import torch
from .module import Module
class Container(Module):
def __init__(self, **kwargs):
super(Container, self).__init__()
# DeprecationWarning is ignored by default <sigh>
warnings.warn("nn.Container is deprecated. All of it's functionality "
"is now implemented in nn.Module. Subclass that instead.")
for key, value in kwargs.items():
self.add_module(key, value)
class Sequential(Module):
r"""A sequential container.
Modules will be added to it in the order they are passed in the constructor.
Alternatively, an ordered dict of modules can also be passed in.
To make it easier to understand, here is a small example::
# Example of using Sequential
model = nn.Sequential(
nn.Conv2d(1,20,5),
nn.ReLU(),
nn.Conv2d(20,64,5),
nn.ReLU()
)
# Example of using Sequential with OrderedDict
model = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(1,20,5)),
('relu1', nn.ReLU()),
('conv2', nn.Conv2d(20,64,5)),
('relu2', nn.ReLU())
]))
"""
def __init__(self, *args):
super(Sequential, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx):
"""Get the idx-th item of the iterator"""
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError('index {} is out of range'.format(idx))
idx %= size
return next(islice(iterator, idx, None))
def __getitem__(self, idx):
if isinstance(idx, slice):
return Sequential(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx, module):
key = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx):
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
def __len__(self):
return len(self._modules)
def __dir__(self):
keys = super(Sequential, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def forward(self, input):
for module in self._modules.values():
input = module(input)
return input
class ModuleList(Module):
r"""Holds submodules in a list.
ModuleList can be indexed like a regular Python list, but modules it
contains are properly registered, and will be visible by all Module methods.
Arguments:
modules (iterable, optional): an iterable of modules to add
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
"""
def __init__(self, modules=None):
super(ModuleList, self).__init__()
if modules is not None:
self += modules
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
def __getitem__(self, idx):
if isinstance(idx, slice):
return ModuleList(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx, module):
idx = operator.index(idx)
return setattr(self, str(idx), module)
def __delitem__(self, idx):
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
delattr(self, str(k))
else:
delattr(self, self._get_abs_string_index(idx))
# To preserve numbering, self._modules is being reconstructed with modules after deletion
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
def __len__(self):
return len(self._modules)
def __iter__(self):
return iter(self._modules.values())
def __iadd__(self, modules):
return self.extend(modules)
def __dir__(self):
keys = super(ModuleList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def append(self, module):
r"""Appends a given module to the end of the list.
Arguments:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def extend(self, modules):
r"""Appends modules from a Python iterable to the end of the list.
Arguments:
modules (iterable): iterable of modules to append
"""
if not isinstance(modules, Iterable):
raise TypeError("ModuleList.extend should be called with an "
"iterable, but got " + type(modules).__name__)
offset = len(self)
for i, module in enumerate(modules):
self.add_module(str(offset + i), module)
return self
class ParameterList(Module):
r"""Holds parameters in a list.
ParameterList can be indexed like a regular Python list, but parameters it
contains are properly registered, and will be visible by all Module methods.
Arguments:
parameters (iterable, optional): an iterable of :class:`~torch.nn.Parameter`` to add
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
"""
def __init__(self, parameters=None):
super(ParameterList, self).__init__()
if parameters is not None:
self += parameters
def __getitem__(self, idx):
if isinstance(idx, slice):
return ParameterList(list(self._parameters.values())[idx])
else:
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return self._parameters[str(idx)]
def __setitem__(self, idx, param):
idx = operator.index(idx)
return self.register_parameter(str(idx), param)
def __len__(self):
return len(self._parameters)
def __iter__(self):
return iter(self._parameters.values())
def __iadd__(self, parameters):
return self.extend(parameters)
def __dir__(self):
keys = super(ParameterList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def append(self, parameter):
"""Appends a given parameter at the end of the list.
Arguments:
parameter (nn.Parameter): parameter to append
"""
self.register_parameter(str(len(self)), parameter)
return self
def extend(self, parameters):
"""Appends parameters from a Python iterable to the end of the list.
Arguments:
parameters (iterable): iterable of parameters to append
"""
if not isinstance(parameters, Iterable):
raise TypeError("ParameterList.extend should be called with an "
"iterable, but got " + type(parameters).__name__)
offset = len(self)
for i, param in enumerate(parameters):
self.register_parameter(str(offset + i), param)
return self
def extra_repr(self):
tmpstr = ''
for k, p in self._parameters.items():
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p.data), size_str, device_str)
tmpstr = tmpstr + ' (' + k + '): ' + parastr + '\n'
return tmpstr | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/container.py | 0.85449 | 0.269656 | container.py | pypi |
import warnings
import torch
from .module import Module
from .container import Sequential
from .activation import LogSoftmax
from .. import functional as F
def _assert_no_grad(tensor):
assert not tensor.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these tensors as not requiring gradients"
class _Loss(Module):
def __init__(self, size_average=True, reduce=True):
super(_Loss, self).__init__()
self.size_average = size_average
self.reduce = reduce
class _WeightedLoss(_Loss):
def __init__(self, weight=None, size_average=True, reduce=True):
super(_WeightedLoss, self).__init__(size_average, reduce)
self.register_buffer('weight', weight)
class L1Loss(_Loss):
r"""Creates a criterion that measures the mean absolute value of the
element-wise difference between input `x` and target `y`:
The loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \left| x_n - y_n \right|,
where :math:`N` is the batch size. If reduce is ``True``, then:
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\
\operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}.
\end{cases}
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the constructor argument
`size_average=False`.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to ``False``, the losses are instead summed for
each minibatch. Ignored when reduce is ``False``. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed
for each minibatch. When reduce is ``False``, the loss function returns
a loss per input/target element instead and ignores size_average.
Default: ``True``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar. If reduce is ``False``, then
:math:`(N, *)`, same shape as the input
Examples::
>>> loss = nn.L1Loss()
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randn(3, 5)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, size_average=True, reduce=True):
super(L1Loss, self).__init__(size_average, reduce)
def forward(self, input, target):
_assert_no_grad(target)
return F.l1_loss(input, target, size_average=self.size_average,
reduce=self.reduce)
class NLLLoss(_WeightedLoss):
r"""The negative log likelihood loss. It is useful to train a classification
problem with `C` classes.
If provided, the optional argument `weight` should be a 1D Tensor assigning
weight to each of the classes. This is particularly useful when you have an
unbalanced training set.
The input given through a forward call is expected to contain
log-probabilities of each class. `input` has to be a Tensor of size either
:math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \geq 2` for the `K`-dimensional case (described later).
Obtaining log-probabilities in a neural network is easily achieved by
adding a `LogSoftmax` layer in the last layer of your network.
You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
layer.
The target that this loss expects is a class index
`(0 to C-1, where C = number of classes)`
If :attr:`reduce` is ``False``, the loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_{y_n} x_{n,y_n}, \quad
w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore_index}\},
where :math:`N` is the batch size. If :attr:`reduce` is ``True`` (default),
then
.. math::
\ell(x, y) = \begin{cases}
\sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & \text{if}\;
\text{size_average} = \text{True},\\
\sum_{n=1}^N l_n, & \text{if}\;
\text{size_average} = \text{False}.
\end{cases}
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 2`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below). In the case of images, it computes NLL loss per-pixel.
Args:
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, it has to be a Tensor of size `C`. Otherwise, it is
treated as if having all ones.
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch with weights set by
:attr:`weight`. However, if the field :attr:`size_average` is set to
``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When
:attr:`size_average` is ``True``, the loss is averaged over
non-ignored targets.
reduce (bool, optional): By default, the losses are averaged or summed
for each minibatch. When :attr:`reduce` is ``False``, the loss
function returns a loss per batch instead and
ignores :attr:`size_average`. Default: ``True``
Shape:
- Input: :math:`(N, C)` where `C = number of classes`, or
:math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 2`
in the case of `K`-dimensional loss.
- Target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, or
:math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 2` in the case of
K-dimensional loss.
- Output: scalar. If reduce is ``False``, then the same size
as the target: :math:`(N)`, or
:math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 2` in the case
of K-dimensional loss.
Examples::
>>> m = nn.LogSoftmax()
>>> loss = nn.NLLLoss()
>>> # input is of size N x C = 3 x 5
>>> input = torch.randn(3, 5, requires_grad=True)
>>> # each element in target has to have 0 <= value < C
>>> target = torch.tensor([1, 0, 4])
>>> output = loss(m(input), target)
>>> output.backward()
>>>
>>>
>>> # 2D loss example (used, for example, with image inputs)
>>> N, C = 5, 4
>>> loss = nn.NLLLoss()
>>> # input is of size N x C x height x width
>>> data = torch.randn(N, 16, 10, 10)
>>> m = nn.Conv2d(16, C, (3, 3))
>>> # each element in target has to have 0 <= value < C
>>> target = torch.tensor(N, 8, 8).random_(0, C)
>>> output = loss(m(data), target)
>>> output.backward()
"""
def __init__(self, weight=None, size_average=True, ignore_index=-100, reduce=True):
super(NLLLoss, self).__init__(weight, size_average, reduce)
self.ignore_index = ignore_index
def forward(self, input, target):
_assert_no_grad(target)
return F.nll_loss(input, target, self.weight, self.size_average,
self.ignore_index, self.reduce)
class NLLLoss2d(NLLLoss):
def __init__(self, weight=None, size_average=True, ignore_index=-100, reduce=True):
warnings.warn("NLLLoss2d has been deprecated. "
"Please use NLLLoss instead as a drop-in replacement and see "
"http://pytorch.org/docs/master/nn.html#torch.nn.NLLLoss for more details.")
super(NLLLoss2d, self).__init__(weight, size_average, ignore_index, reduce)
class PoissonNLLLoss(_Loss):
r"""Negative log likelihood loss with Poisson distribution of target.
The loss can be described as:
.. math::
\text{target} \sim \mathrm{Poisson}(\text{input})
\text{loss}(\text{input}, \text{target}) = \text{input} - \text{target} * \log(\text{input})
+ \log(\text{target!})
The last term can be omitted or approximated with Stirling formula. The
approximation is used for target values more than 1. For targets less or
equal to 1 zeros are added to the loss.
Args:
log_input (bool, optional): if ``True`` the loss is computed as
:math:`\exp(\text{input}) - \text{target}*\text{input}`, if ``False`` the loss is
:math:`\text{input} - \text{target}*\log(\text{input}+\text{eps})`.
full (bool, optional): whether to compute full loss, i. e. to add the
Stirling approximation term
.. math::
\text{target}*\log(\text{target}) - \text{target} + 0.5 * \log(2\pi\text{target}).
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field `size_average`
is set to ``False``, the losses are instead summed for each minibatch.
eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when
:attr:`log_input == False`. Default: 1e-8
reduce (bool, optional): By default, the losses are averaged
over observations for each minibatch, or summed, depending on
size_average. When reduce is ``False``, returns a loss per input/target
element instead and ignores `size_average`. Default: ``True``
Examples::
>>> loss = nn.PoissonNLLLoss()
>>> log_input = torch.randn(5, 2, requires_grad=True)
>>> target = torch.randn(5, 2)
>>> output = loss(log_input, target)
>>> output.backward()
"""
def __init__(self, log_input=True, full=False, size_average=True, eps=1e-8, reduce=True):
super(PoissonNLLLoss, self).__init__(size_average, reduce)
self.log_input = log_input
self.full = full
self.eps = eps
def forward(self, log_input, target):
_assert_no_grad(target)
return F.poisson_nll_loss(log_input, target, self.log_input, self.full,
self.size_average, self.eps, self.reduce)
class KLDivLoss(_Loss):
r"""The `Kullback-Leibler divergence`_ Loss
KL divergence is a useful distance measure for continuous distributions
and is often useful when performing direct regression over the space of
(discretely sampled) continuous output distributions.
As with :class:`~torch.nn.NLLLoss`, the `input` given is expected to contain
*log-probabilities*. However, unlike :class:`~torch.nn.NLLLoss`, `input` is not
restricted to a 2D Tensor, because the criterion is applied element-wise.
The targets are given as *probabilities* (i.e. without taking the logarithm).
This criterion expects a `target` `Tensor` of the same size as the
`input` `Tensor`.
The unreduced (i.e. with :attr:`reduce` set to ``False``) loss can be described as:
.. math::
l(x,y) = L := \{ l_1,\dots,l_N \}, \quad
l_n = y_n \cdot \left( \log y_n - x_n \right),
where the index :math:`N` spans all dimensions of ``input`` and :math:`L` has the same
shape as ``input``. If :attr:`reduce` is ``True`` (the default), then:
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\
\operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}.
\end{cases}
By default, the losses are averaged for each minibatch over observations
**as well as** over dimensions. However, if the field
:attr:`size_average` is set to ``False``, the losses are instead summed.
.. _Kullback-Leibler divergence:
https://en.wikipedia.org/wiki/Kullback-Leibler_divergence
.. note:: The default averaging means that the loss is actually **not** the
KL Divergence because the terms are already probability weighted.
A future release of PyTorch may move the default loss closer to the
mathematical definition.
To get the real KL Divergence, use ``size_average=False``, and
then divide the output by the batch size.
Example::
>>> loss = nn.KLDivLoss(size_average=False)
>>> batch_size = 5
>>> log_probs1 = F.log_softmax(torch.randn(batch_size, 10), 1)
>>> probs2 = F.softmax(torch.randn(batch_size, 10), 1)
>>> loss(log_probs1, probs2) / batch_size
tensor(0.7142)
Args:
size_average (bool, optional): By default, the losses are averaged
for each minibatch over observations **as well as** over
dimensions. However, if ``False`` the losses are instead summed.
reduce (bool, optional): By default, the losses are averaged
over observations for each minibatch, or summed, depending on
size_average. When reduce is ``False``, returns a loss per input/target
element instead and ignores size_average. Default: ``True``
Shape:
- input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- target: :math:`(N, *)`, same shape as the input
- output: scalar by default. If `reduce` is ``False``, then :math:`(N, *)`,
the same shape as the input
"""
def __init__(self, size_average=True, reduce=True):
super(KLDivLoss, self).__init__(size_average, reduce)
def forward(self, input, target):
_assert_no_grad(target)
return F.kl_div(input, target, size_average=self.size_average, reduce=self.reduce)
class MSELoss(_Loss):
r"""Creates a criterion that measures the mean squared error between
`n` elements in the input `x` and target `y`.
The loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \left( x_n - y_n \right)^2,
where :math:`N` is the batch size. If reduce is ``True``, then:
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\
\operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}.
\end{cases}
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets :attr:`size_average` to ``False``.
To get a batch of losses, a loss per batch element, set `reduce` to
``False``. These losses are not averaged and are not affected by
`size_average`.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to ``False``, the losses are instead summed for
each minibatch. Only applies when reduce is ``True``. Default: ``True``
reduce (bool, optional): By default, the losses are averaged
over observations for each minibatch, or summed, depending on
size_average. When reduce is ``False``, returns a loss per input/target
element instead and ignores size_average. Default: ``True``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
Examples::
>>> loss = nn.MSELoss()
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randn(3, 5)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, size_average=True, reduce=True):
super(MSELoss, self).__init__(size_average, reduce)
def forward(self, input, target):
_assert_no_grad(target)
return F.mse_loss(input, target, size_average=self.size_average, reduce=self.reduce)
class BCELoss(_WeightedLoss):
r"""Creates a criterion that measures the Binary Cross Entropy
between the target and the output:
The loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right],
where :math:`N` is the batch size. If reduce is ``True``, then
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\
\operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}.
\end{cases}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets `y` should be numbers
between 0 and 1.
Args:
weight (Tensor, optional): a manual rescaling weight given to the loss
of each batch element. If given, has to be a Tensor of size
"nbatch".
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to ``False``, the losses are instead summed for
each minibatch. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on size_average. When reduce
is False, returns a loss per input/target element instead and ignores
size_average. Default: True
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar. If `reduce` is False, then `(N, *)`, same shape as
input.
Examples::
>>> m = nn.Sigmoid()
>>> loss = nn.BCELoss()
>>> input = torch.randn(3, requires_grad=True)
>>> target = torch.empty(3).random_(2)
>>> output = loss(m(input), target)
>>> output.backward()
"""
def __init__(self, weight=None, size_average=True, reduce=True):
super(BCELoss, self).__init__(weight, size_average, reduce)
def forward(self, input, target):
_assert_no_grad(target)
return F.binary_cross_entropy(input, target, weight=self.weight,
size_average=self.size_average,
reduce=self.reduce)
class BCEWithLogitsLoss(_Loss):
r"""This loss combines a `Sigmoid` layer and the `BCELoss` in one single
class. This version is more numerically stable than using a plain `Sigmoid`
followed by a `BCELoss` as, by combining the operations into one layer,
we take advantage of the log-sum-exp trick for numerical stability.
The loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ t_n \cdot \log \sigma(x_n)
+ (1 - t_n) \cdot \log (1 - \sigma(x_n)) \right],
where :math:`N` is the batch size. If reduce is ``True``, then
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\
\operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}.
\end{cases}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets `t[i]` should be numbers
between 0 and 1.
Args:
weight (Tensor, optional): a manual rescaling weight given to the loss
of each batch element. If given, has to be a Tensor of size
"nbatch".
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to ``False``, the losses are instead summed for
each minibatch. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on size_average. When reduce
is False, returns a loss per input/target element instead and ignores
size_average. Default: True
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
Examples::
>>> loss = nn.BCEWithLogitsLoss()
>>> input = torch.randn(3, requires_grad=True)
>>> target = torch.empty(3).random_(2)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, weight=None, size_average=True, reduce=True):
super(BCEWithLogitsLoss, self).__init__(size_average, reduce)
self.register_buffer('weight', weight)
def forward(self, input, target):
if self.weight is not None:
return F.binary_cross_entropy_with_logits(input, target,
self.weight,
self.size_average,
reduce=self.reduce)
else:
return F.binary_cross_entropy_with_logits(input, target,
size_average=self.size_average,
reduce=self.reduce)
class HingeEmbeddingLoss(_Loss):
r"""Measures the loss given an input tensor `x` and a labels tensor `y`
containing values (`1` or `-1`).
This is usually used for measuring whether two inputs are similar or
dissimilar, e.g. using the L1 pairwise distance as `x`, and is typically
used for learning nonlinear embeddings or semi-supervised learning::
The loss function for :math:`n`-th sample in the mini-batch is
.. math::
l_n = \begin{cases}
x_n, & \text{if}\; y_n = 1,\\
\max \{0, \Delta - x_n\}, & \text{if}\; y_n = -1,
\end{cases}
and the total loss functions is
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\
\operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}.
\end{cases}
where :math:`L = \{l_1,\dots,l_N\}^\top`.
Args:
margin (float, optional): Has a default value of `1`.
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch.
Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When
:attr:`reduce` is ``False``, returns a loss per batch element instead and
ignores :attr:`size_average`. Default: ``True``
Shape:
- Input: Tensor of arbitrary shape. The sum operation operates over all the elements.
- Target: Same shape as input.
- Output: scalar. If reduce is ``False``, then same shape as the input
"""
def __init__(self, margin=1.0, size_average=True, reduce=True):
super(HingeEmbeddingLoss, self).__init__(size_average, reduce)
self.margin = margin
def forward(self, input, target):
return F.hinge_embedding_loss(input, target, self.margin, self.size_average,
self.reduce)
class MultiLabelMarginLoss(_Loss):
r"""Creates a criterion that optimizes a multi-class multi-classification
hinge loss (margin-based loss) between input `x` (a 2D mini-batch `Tensor`)
and output `y` (which is a 2D `Tensor` of target class indices).
For each sample in the mini-batch:
.. math::
\text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)}
where `i == 0` to `x.size(0)`, `j == 0` to `y.size(0)`,
:math:`y[j] \geq 0`, and :math:`i \neq y[j]` for all `i` and `j`.
`y` and `x` must have the same size.
The criterion only considers a contiguous block of non-negative targets that
starts at the front.
This allows for different samples to have variable amounts of target classes
Args:
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch.
Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When
:attr:`reduce` is ``False``, returns a loss per batch element instead and
ignores :attr:`size_average`. Default: ``True``
Shape:
- Input: :math:`(C)` or :math:`(N, C)` where `N` is the batch size and `C`
is the number of classes.
- Target: :math:`(C)` or :math:`(N, C)`, same shape as the input.
- Output: scalar. If `reduce` is False, then `(N)`.
"""
def __init__(self, size_average=True, reduce=True):
super(MultiLabelMarginLoss, self).__init__(size_average, reduce)
def forward(self, input, target):
_assert_no_grad(target)
return F.multilabel_margin_loss(input, target, size_average=self.size_average,
reduce=self.reduce)
class SmoothL1Loss(_Loss):
r"""Creates a criterion that uses a squared term if the absolute
element-wise error falls below 1 and an L1 term otherwise.
It is less sensitive to outliers than the `MSELoss` and in some cases
prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
Also known as the Huber loss:
.. math::
\text{loss}(x, y) = \frac{1}{n} \sum_{i} z_{i}
where :math:`z_{i}` is given by:
.. math::
z_{i} =
\begin{cases}
0.5 (x_i - y_i)^2, & \text{if } |x_i - y_i| < 1 \\
|x_i - y_i| - 0.5, & \text{otherwise }
\end{cases}
`x` and `y` arbitrary shapes with a total of `n` elements each
the sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets :attr:`size_average` to ``False``
Args:
size_average (bool, optional): By default, the losses are averaged
over all elements. However, if the field size_average is set to ``False``,
the losses are instead summed. Ignored when reduce is ``False``. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed
over elements. When reduce is ``False``, the loss function returns
a loss per input/target element instead and ignores size_average.
Default: ``True``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar. If reduce is ``False``, then
:math:`(N, *)`, same shape as the input
"""
def __init__(self, size_average=True, reduce=True):
super(SmoothL1Loss, self).__init__(size_average, reduce)
def forward(self, input, target):
_assert_no_grad(target)
return F.smooth_l1_loss(input, target, size_average=self.size_average,
reduce=self.reduce)
class SoftMarginLoss(_Loss):
r"""Creates a criterion that optimizes a two-class classification
logistic loss between input tensor `x` and target tensor `y` (containing 1 or
-1).
.. math::
\text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()}
Args:
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch.
Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When
:attr:`reduce` is ``False``, returns a loss per batch element instead and
ignores :attr:`size_average`. Default: ``True``
Shape:
- Input: Tensor of arbitrary shape.
- Target: Same shape as input.
- Output: scalar. If reduce is ``False``, then same shape as the input
"""
def __init__(self, size_average=True, reduce=True):
super(SoftMarginLoss, self).__init__(size_average, reduce)
def forward(self, input, target):
_assert_no_grad(target)
return F.soft_margin_loss(input, target, size_average=self.size_average,
reduce=self.reduce)
class CrossEntropyLoss(_WeightedLoss):
r"""This criterion combines :func:`nn.LogSoftmax` and :func:`nn.NLLLoss` in one single class.
It is useful when training a classification problem with `C` classes.
If provided, the optional argument :attr:`weight` should be a 1D `Tensor`
assigning weight to each of the classes.
This is particularly useful when you have an unbalanced training set.
The `input` is expected to contain scores for each class.
`input` has to be a Tensor of size either :math:`(minibatch, C)` or
:math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \geq 2` for the `K`-dimensional case (described later).
This criterion expects a class index (0 to `C-1`) as the
`target` for each value of a 1D tensor of size `minibatch`
The loss can be described as:
.. math::
\text{loss}(x, class) = -\log\left(\frac{\exp(x[class])}{\sum_j \exp(x[j])}\right)
= -x[class] + \log\left(\sum_j \exp(x[j])\right)
or in the case of the `weight` argument being specified:
.. math::
\text{loss}(x, class) = weight[class] \left(-x[class] + \log\left(\sum_j \exp(x[j])\right)\right)
The losses are averaged across observations for each minibatch.
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 2`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below).
Args:
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size `C`
size_average (bool, optional): By default, the losses are averaged over observations for each minibatch.
However, if the field `size_average` is set to ``False``, the losses are
instead summed for each minibatch. Ignored if reduce is ``False``.
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When `size_average` is
``True``, the loss is averaged over non-ignored targets.
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on `size_average`. When reduce
is ``False``, returns a loss per batch instead and ignores
size_average. Default: ``True``
Shape:
- Input: :math:`(N, C)` where `C = number of classes`, or
:math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 2`
in the case of `K`-dimensional loss.
- Target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, or
:math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 2` in the case of
K-dimensional loss.
- Output: scalar. If reduce is ``False``, then the same size
as the target: :math:`(N)`, or
:math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 2` in the case
of K-dimensional loss.
Examples::
>>> loss = nn.CrossEntropyLoss()
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.empty(3, dtype=torch.long).random_(5)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, weight=None, size_average=True, ignore_index=-100, reduce=True):
super(CrossEntropyLoss, self).__init__(weight, size_average, reduce)
self.ignore_index = ignore_index
def forward(self, input, target):
_assert_no_grad(target)
return F.cross_entropy(input, target, self.weight, self.size_average,
self.ignore_index, self.reduce)
class MultiLabelSoftMarginLoss(_WeightedLoss):
r"""Creates a criterion that optimizes a multi-label one-versus-all
loss based on max-entropy, between input `x` and target `y` of size `(N, C)`.
For each sample in the minibatch:
.. math::
loss(x, y) = - \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1})
+ (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right)
where `i == 0` to `x.nElement()-1`, `y[i] in {0,1}`.
Args:
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, it has to be a Tensor of size `C`. Otherwise, it is
treated as if having all ones.
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch.
Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When
:attr:`reduce` is ``False``, returns a loss per batch element instead and
ignores :attr:`size_average`. Default: ``True``
Shape:
- Input: :math:`(N, C)` where `N` is the batch size and `C` is the number of classes.
- Target: :math:`(N, C)`, same shape as the input.
- Output: scalar. If `reduce` is False, then `(N)`.
"""
def __init__(self, weight=None, size_average=True, reduce=True):
super(MultiLabelSoftMarginLoss, self).__init__(weight, size_average, reduce)
def forward(self, input, target):
return F.multilabel_soft_margin_loss(input, target, self.weight, self.size_average,
self.reduce)
class CosineEmbeddingLoss(_Loss):
r"""Creates a criterion that measures the loss given input tensors
:math:`x_1`, :math:`x_2` and a `Tensor` label `y` with values 1 or -1.
This is used for measuring whether two inputs are similar or dissimilar,
using the cosine distance, and is typically used for learning nonlinear
embeddings or semi-supervised learning.
The loss function for each sample is:
.. math::
\text{loss}(x, y) =
\begin{cases}
1 - \cos(x_1, x_2), & \text{if } y == 1 \\
\max(0, \cos(x_1, x_2) - \text{margin}), & \text{if } y == -1
\end{cases}
Args:
margin (float, optional): Should be a number from `-1` to `1`, `0` to `0.5`
is suggested. If `margin` is missing, the default value is `0`.
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch.
Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When
:attr:`reduce` is ``False``, returns a loss per batch element instead and
ignores :attr:`size_average`. Default: ``True``
"""
def __init__(self, margin=0, size_average=True, reduce=True):
super(CosineEmbeddingLoss, self).__init__(size_average, reduce)
self.margin = margin
def forward(self, input1, input2, target):
return F.cosine_embedding_loss(input1, input2, target, self.margin, self.size_average,
self.reduce)
class MarginRankingLoss(_Loss):
r"""Creates a criterion that measures the loss given
inputs `x1`, `x2`, two 1D mini-batch `Tensor`s,
and a label 1D mini-batch tensor `y` with values (`1` or `-1`).
If `y == 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for `y == -1`.
The loss function for each sample in the mini-batch is:
.. math::
\text{loss}(x, y) = \max(0, -y * (x1 - x2) + \text{margin})
Args:
margin (float, optional): Has a default value of `0`.
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch.
Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When
:attr:`reduce` is ``False``, returns a loss per batch element instead and
ignores :attr:`size_average`. Default: ``True``
Shape:
- Input: :math:`(N, D)` where `N` is the batch size and `D` is the size of a sample.
- Target: :math:`(N)`
- Output: scalar. If `reduce` is False, then `(N)`.
"""
def __init__(self, margin=0, size_average=True, reduce=True):
super(MarginRankingLoss, self).__init__(size_average, reduce)
self.margin = margin
def forward(self, input1, input2, target):
return F.margin_ranking_loss(input1, input2, target, self.margin, self.size_average,
self.reduce)
class MultiMarginLoss(_WeightedLoss):
r"""Creates a criterion that optimizes a multi-class classification hinge
loss (margin-based loss) between input `x` (a 2D mini-batch `Tensor`) and
output `y` (which is a 1D tensor of target class indices,
:math:`0 \leq y \leq \text{x.size}(1)`):
For each mini-batch sample, the loss in terms of the 1D input `x` and scalar
output `y` is:
.. math::
\text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i]))^p}{\text{x.size}(0)}
where `i == 0` to `x.size(0)` and :math:`i \neq y`.
Optionally, you can give non-equal weighting on the classes by passing
a 1D `weight` tensor into the constructor.
The loss function then becomes:
.. math::
\text{loss}(x, y) = \frac{\sum_i \max(0, w[y] * (\text{margin} - x[y] + x[i]))^p)}{\text{x.size}(0)}
Args:
p (int, optional): Has a default value of `1`. `1` and `2` are the only
supported values
margin (float, optional): Has a default value of `1`.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, it has to be a Tensor of size `C`. Otherwise, it is
treated as if having all ones.
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch.
Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When
:attr:`reduce` is ``False``, returns a loss per batch element instead and
ignores :attr:`size_average`. Default: ``True``
"""
def __init__(self, p=1, margin=1, weight=None, size_average=True, reduce=True):
super(MultiMarginLoss, self).__init__(weight, size_average, reduce)
if p != 1 and p != 2:
raise ValueError("only p == 1 and p == 2 supported")
assert weight is None or weight.dim() == 1
self.p = p
self.margin = margin
def forward(self, input, target):
return F.multi_margin_loss(input, target, self.p, self.margin, self.weight,
self.size_average, self.reduce)
class TripletMarginLoss(_Loss):
r"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3 and a margin with a value greater than 0.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shapes of all input tensors should be
:math:`(N, D)`.
The distance swap is described in detail in the paper `Learning shallow
convolutional feature descriptors with triplet losses`_ by
V. Balntas, E. Riba et al.
The loss function for each sample in the mini-batch is:
.. math::
L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
where :math:`d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p`.
Args:
margin (float, optional): Default: `1`.
p (int, optional): The norm degree for pairwise distance. Default: `2`.
swap (float, optional): The distance swap is described in detail in the paper
`Learning shallow convolutional feature descriptors with triplet losses` by
V. Balntas, E. Riba et al. Default: ``False``.
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch.
Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on :attr:`size_average`. When
:attr:`reduce` is ``False``, returns a loss per batch element instead and
ignores :attr:`size_average`. Default: ``True``
Shape:
- Input: :math:`(N, D)` where `D` is the vector dimension.
- Output: scalar. If `reduce` is False, then `(N)`.
>>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
>>> input1 = torch.randn(100, 128, requires_grad=True)
>>> input2 = torch.randn(100, 128, requires_grad=True)
>>> input3 = torch.randn(100, 128, requires_grad=True)
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, margin=1.0, p=2, eps=1e-6, swap=False, size_average=True, reduce=True):
super(TripletMarginLoss, self).__init__(size_average, reduce)
self.margin = margin
self.p = p
self.eps = eps
self.swap = swap
def forward(self, anchor, positive, negative):
return F.triplet_margin_loss(anchor, positive, negative, self.margin, self.p,
self.eps, self.swap, self.size_average, self.reduce)
# TODO: L1HingeEmbeddingCriterion
# TODO: MSECriterion weight
# TODO: ClassSimplexCriterion | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/loss.py | 0.942049 | 0.665431 | loss.py | pypi |
from collections import OrderedDict
import functools
import itertools
import torch
from ..backends.thnn import backend as thnn_backend
from ..parameter import Parameter
import torch.utils.hooks as hooks
def _addindent(s_, numSpaces):
s = s_.split('\n')
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
return s
class Module(object):
r"""Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in
a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call `.cuda()`, etc.
"""
dump_patches = False
r"""This allows better BC support for :meth:`load_state_dict`. In
:meth:`state_dict`, the version number will be saved as in the attribute
`_metadata` of the returned state dict, and thus pickled. `_metadata` is a
dictionary with keys follow the naming convention of state dict. See
``_load_from_state_dict`` on how to use this information in loading.
If new parameters/buffers are added/removed from a module, this number shall
be bumped, and the module's `_load_from_state_dict` method can compare the
version number and do appropriate changes if the state dict is from before
the change."""
_version = 1
def __init__(self):
self._backend = thnn_backend
self._parameters = OrderedDict()
self._buffers = OrderedDict()
self._backward_hooks = OrderedDict()
self._forward_hooks = OrderedDict()
self._forward_pre_hooks = OrderedDict()
self._modules = OrderedDict()
self.training = True
def forward(self, *input):
r"""Defines the computation performed at every call.
Should be overridden by all subclasses.
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
registered hooks while the latter silently ignores them.
"""
raise NotImplementedError
def register_buffer(self, name, tensor):
r"""Adds a persistent buffer to the module.
This is typically used to register a buffer that should not to be
considered a model parameter. For example, BatchNorm's ``running_mean``
is not a parameter, but is part of the persistent state.
Buffers can be accessed as attributes using given names.
Args:
name (string): name of the buffer. The buffer can be accessed
from this module using the given name
tensor (Tensor): buffer to be registered.
Example::
>>> self.register_buffer('running_mean', torch.zeros(num_features))
"""
if hasattr(self, name) and name not in self._buffers:
raise KeyError("attribute '{}' already exists".format(name))
elif '.' in name:
raise KeyError("buffer name can't contain \".\"")
elif name == '':
raise KeyError("buffer name can't be empty string \"\"")
elif tensor is not None and not isinstance(tensor, torch.Tensor):
raise TypeError("cannot assign '{}' object to buffer '{}' "
"(torch Tensor or None required)"
.format(torch.typename(tensor), name))
else:
self._buffers[name] = tensor
def register_parameter(self, name, param):
r"""Adds a parameter to the module.
The parameter can be accessed as an attribute using given name.
Args:
name (string): name of the parameter. The parameter can be accessed
from this module using the given name
parameter (Parameter): parameter to be added to the module.
"""
if '_parameters' not in self.__dict__:
raise AttributeError(
"cannot assign parameter before Module.__init__() call")
elif hasattr(self, name) and name not in self._parameters:
raise KeyError("attribute '{}' already exists".format(name))
elif '.' in name:
raise KeyError("parameter name can't contain \".\"")
elif name == '':
raise KeyError("parameter name can't be empty string \"\"")
if param is None:
self._parameters[name] = None
elif not isinstance(param, Parameter):
raise TypeError("cannot assign '{}' object to parameter '{}' "
"(torch.nn.Parameter or None required)"
.format(torch.typename(param), name))
elif param.grad_fn:
raise ValueError(
"Cannot assign non-leaf Tensor to parameter '{0}'. Model "
"parameters must be created explicitly. To express '{0}' "
"as a function of another Tensor, compute the value in "
"the forward() method.".format(name))
else:
self._parameters[name] = param
def add_module(self, name, module):
r"""Adds a child module to the current module.
The module can be accessed as an attribute using the given name.
Args:
name (string): name of the child module. The child module can be
accessed from this module using the given name
parameter (Module): child module to be added to the module.
"""
if not isinstance(module, Module) and module is not None:
raise TypeError("{} is not a Module subclass".format(
torch.typename(module)))
elif hasattr(self, name) and name not in self._modules:
raise KeyError("attribute '{}' already exists".format(name))
elif '.' in name:
raise KeyError("module name can't contain \".\"")
elif name == '':
raise KeyError("module name can't be empty string \"\"")
self._modules[name] = module
def _apply(self, fn):
for module in self.children():
module._apply(fn)
for param in self._parameters.values():
if param is not None:
# Tensors stored in modules are graph leaves, and we don't
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for key, buf in self._buffers.items():
if buf is not None:
self._buffers[key] = fn(buf)
return self
def apply(self, fn):
r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)
as well as self. Typical use includes initializing the parameters of a model
(see also :ref:`torch-nn-init`).
Args:
fn (:class:`Module` -> None): function to be applied to each submodule
Returns:
Module: self
Example::
>>> def init_weights(m):
print(m)
if type(m) == nn.Linear:
m.weight.data.fill_(1.0)
print(m.weight)
>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
>>> net.apply(init_weights)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1., 1.],
[ 1., 1.]])
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1., 1.],
[ 1., 1.]])
Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=2, bias=True)
)
Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=2, bias=True)
)
"""
for module in self.children():
module.apply(fn)
fn(self)
return self
def cuda(self, device=None):
r"""Moves all model parameters and buffers to the GPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on GPU while being optimized.
Arguments:
device (int, optional): if specified, all parameters will be
copied to that device
Returns:
Module: self
"""
return self._apply(lambda t: t.cuda(device))
def cpu(self):
r"""Moves all model parameters and buffers to the CPU.
Returns:
Module: self
"""
return self._apply(lambda t: t.cpu())
def type(self, dst_type):
r"""Casts all parameters and buffers to :attr:`dst_type`.
Arguments:
dst_type (type or string): the desired type
Returns:
Module: self
"""
return self._apply(lambda t: t.type(dst_type))
def float(self):
r"""Casts all floating point parameters and buffers to float datatype.
Returns:
Module: self
"""
return self._apply(lambda t: t.float() if t.is_floating_point() else t)
def double(self):
r"""Casts all floating point parameters and buffers to ``double`` datatype.
Returns:
Module: self
"""
return self._apply(lambda t: t.double() if t.is_floating_point() else t)
def half(self):
r"""Casts all floating point parameters and buffers to ``half`` datatype.
Returns:
Module: self
"""
return self._apply(lambda t: t.half() if t.is_floating_point() else t)
def to(self, *args, **kwargs):
r"""Moves and/or casts the parameters and buffers.
This can be called as
.. function:: to(device)
.. function:: to(dtype)
.. function:: to(device, dtype)
It has similar signature as :meth:`torch.Tensor.to`, but does not take
a Tensor and only takes in floating point :attr:`dtype` s. In
particular, this method will only cast the floating point parameters and
buffers to :attr:`dtype`. It will still move the integral parameters and
buffers to :attr:`device`, if that is given. See below for examples.
.. note::
This method modifies the module in-place.
Args:
device (:class:`torch.device`): the desired device of the parameters
and buffers in this module
dtype (:class:`torch.dtype`): the desired floating point type of
the floating point parameters and buffers in this module
Returns:
Module: self
Example::
>>> linear = nn.Linear(2, 2)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
[-0.5113, -0.2325]])
>>> linear.to(torch.double)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
[-0.5113, -0.2325]], dtype=torch.float64)
>>> gpu1 = torch.device("cuda:1")
>>> linear.to(gpu1, dtype=torch.half)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
[-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
>>> cpu = torch.device("cpu")
>>> linear.to(cpu)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
[-0.5112, -0.2324]], dtype=torch.float16)
"""
def arg_error():
arg_reprs = list(repr(arg) for arg in args)
for key, val in kwargs.items():
arg_reprs.append("{}={}".format(key, val))
return ValueError('module.to expects .to(device), .to(dtype) or '
'.to(device, dtype), where dtype is a floating '
'point type, but got .to({})'
.format(", ".join(arg_reprs)))
nargs = len(args) + len(kwargs)
device = dtype = None
if nargs < 1 or nargs > 2:
raise arg_error()
else:
for key, val in kwargs.items():
if key == 'dtype':
dtype = kwargs['dtype']
elif 'device' in kwargs:
device = kwargs['device']
else:
raise arg_error()
for arg in args:
if isinstance(arg, torch.dtype):
if dtype is not None:
raise arg_error()
dtype = arg
else:
if device is not None:
raise arg_error()
device = arg
if dtype is not None:
if not dtype.is_floating_point:
raise arg_error()
if device is None:
return self._apply(lambda t: t.to(dtype) if t.is_floating_point() else t)
else:
return self._apply(lambda t: t.to(device, dtype) if t.is_floating_point() else t.to(device))
else:
return self._apply(lambda t: t.to(device))
def register_backward_hook(self, hook):
r"""Registers a backward hook on the module.
The hook will be called every time the gradients with respect to module
inputs are computed. The hook should have the following signature::
hook(module, grad_input, grad_output) -> Tensor or None
The :attr:`grad_input` and :attr:`grad_output` may be tuples if the
module has multiple inputs or outputs. The hook should not modify its
arguments, but it can optionally return a new gradient with respect to
input that will be used in place of :attr:`grad_input` in subsequent
computations.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._backward_hooks)
self._backward_hooks[handle.id] = hook
return handle
def register_forward_pre_hook(self, hook):
r"""Registers a forward pre-hook on the module.
The hook will be called every time before :func:`forward` is invoked.
It should have the following signature::
hook(module, input) -> None
The hook should not modify the input.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._forward_pre_hooks)
self._forward_pre_hooks[handle.id] = hook
return handle
def register_forward_hook(self, hook):
r"""Registers a forward hook on the module.
The hook will be called every time after :func:`forward` has computed an output.
It should have the following signature::
hook(module, input, output) -> None
The hook should not modify the input or output.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._forward_hooks)
self._forward_hooks[handle.id] = hook
return handle
def _tracing_name(self, tracing_state):
if not tracing_state._traced_module_stack:
return None
module = tracing_state._traced_module_stack[-1]
for name, child in module.named_children():
if child is self:
return name
return None
def _slow_forward(self, *input, **kwargs):
input_vars = tuple(torch.autograd.function._iter_tensors(input))
tracing_state = torch.jit.get_tracing_state(input_vars)
if not tracing_state:
return self.forward(*input, **kwargs)
if not hasattr(tracing_state, '_traced_module_stack'):
tracing_state._traced_module_stack = []
name = self._tracing_name(tracing_state)
if name:
tracing_state.push_scope('%s[%s]' % (self.__class__.__name__, name))
else:
tracing_state.push_scope(self.__class__.__name__)
tracing_state._traced_module_stack.append(self)
try:
result = self.forward(*input, **kwargs)
finally:
tracing_state.pop_scope()
tracing_state._traced_module_stack.pop()
return result
def __call__(self, *input, **kwargs):
for hook in self._forward_pre_hooks.values():
hook(self, input)
if torch.jit._tracing:
result = self._slow_forward(*input, **kwargs)
else:
result = self.forward(*input, **kwargs)
for hook in self._forward_hooks.values():
hook_result = hook(self, input, result)
if hook_result is not None:
raise RuntimeError(
"forward hooks should never return any values, but '{}'"
"didn't return None".format(hook))
if len(self._backward_hooks) > 0:
var = result
while not isinstance(var, torch.Tensor):
if isinstance(var, dict):
var = next((v for v in var.values() if isinstance(v, torch.Tensor)))
else:
var = var[0]
grad_fn = var.grad_fn
if grad_fn is not None:
for hook in self._backward_hooks.values():
wrapper = functools.partial(hook, self)
functools.update_wrapper(wrapper, hook)
grad_fn.register_hook(wrapper)
return result
def __setstate__(self, state):
self.__dict__.update(state)
if '_forward_pre_hooks' not in self.__dict__:
self._forward_pre_hooks = OrderedDict()
def __getattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return _parameters[name]
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return _buffers[name]
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return modules[name]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, name))
def __setattr__(self, name, value):
def remove_from(*dicts):
for d in dicts:
if name in d:
del d[name]
params = self.__dict__.get('_parameters')
if isinstance(value, Parameter):
if params is None:
raise AttributeError(
"cannot assign parameters before Module.__init__() call")
remove_from(self.__dict__, self._buffers, self._modules)
self.register_parameter(name, value)
elif params is not None and name in params:
if value is not None:
raise TypeError("cannot assign '{}' as parameter '{}' "
"(torch.nn.Parameter or None expected)"
.format(torch.typename(value), name))
self.register_parameter(name, value)
else:
modules = self.__dict__.get('_modules')
if isinstance(value, Module):
if modules is None:
raise AttributeError(
"cannot assign module before Module.__init__() call")
remove_from(self.__dict__, self._parameters, self._buffers)
modules[name] = value
elif modules is not None and name in modules:
if value is not None:
raise TypeError("cannot assign '{}' as child module '{}' "
"(torch.nn.Module or None expected)"
.format(torch.typename(value), name))
modules[name] = value
else:
buffers = self.__dict__.get('_buffers')
if buffers is not None and name in buffers:
if value is not None and not isinstance(value, torch.Tensor):
raise TypeError("cannot assign '{}' as buffer '{}' "
"(torch.Tensor or None expected)"
.format(torch.typename(value), name))
buffers[name] = value
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in self._parameters:
del self._parameters[name]
elif name in self._buffers:
del self._buffers[name]
elif name in self._modules:
del self._modules[name]
else:
object.__delattr__(self, name)
def state_dict(self, destination=None, prefix='', keep_vars=False):
r"""Returns a dictionary containing a whole state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
Returns:
dict:
a dictionary containing a whole state of the module
Example::
>>> module.state_dict().keys()
['bias', 'weight']
"""
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = dict(version=self._version)
for name, param in self._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.data
for name, buf in self._buffers.items():
if buf is not None:
destination[prefix + name] = buf
for name, module in self._modules.items():
if module is not None:
module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)
return destination
def _load_from_state_dict(self, state_dict, prefix, strict, missing_keys, unexpected_keys, error_msgs):
r"""Copies parameters and buffers from :attr:`state_dict` into only
this module, but not its descendants. This is called on every submodule
in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this
module in input :attr:`state_dict` is at ``state_dict._metadata[prefix]``.
Subclasses can achieve class-specific backward compatible loading using
the version number at ``state_dict._metadata[prefix]["version"]``.
.. note::
:attr:`state_dict` is not the same object as the input
:attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So
it can be modified.
Arguments:
state_dict (dict): a dict containing parameters and
persistent buffers.
prefix (str): the prefix for parameters and buffers used in this
module
strict (bool): whether to strictly enforce that the keys in
:attr:`state_dict` with :attr:`prefix` match the names of
parameters and buffers in this module
missing_keys (list of str): if ``strict=False``, add missing keys to
this list
unexpected_keys (list of str): if ``strict=False``, add unexpected
keys to this list
error_msgs (list of str): error messages should be added to this
list, and will be reported together in
:meth:`~torch.nn.Module.load_state_dict`
"""
local_name_params = itertools.chain(self._parameters.items(), self._buffers.items())
local_state = {k: v.data for k, v in local_name_params if v is not None}
for name, param in local_state.items():
key = prefix + name
if key in state_dict:
input_param = state_dict[key]
if isinstance(input_param, Parameter):
# backwards compatibility for serialized parameters
input_param = input_param.data
try:
param.copy_(input_param)
except Exception:
error_msgs.append('While copying the parameter named "{}", '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(key, param.size(), input_param.size()))
elif strict:
missing_keys.append(key)
if strict:
for key, input_param in state_dict.items():
if key.startswith(prefix):
input_name = key[len(prefix):]
input_name = input_name.split('.', 1)[0] # get the name of param/buffer/child
if input_name not in self._modules and input_name not in local_state:
unexpected_keys.append(key)
def load_state_dict(self, state_dict, strict=True):
r"""Copies parameters and buffers from :attr:`state_dict` into
this module and its descendants. If :attr:`strict` is ``True``, then
the keys of :attr:`state_dict` must exactly match the keys returned
by this module's :meth:`~torch.nn.Module.state_dict` function.
Arguments:
state_dict (dict): a dict containing parameters and
persistent buffers.
strict (bool, optional): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
"""
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
module._load_from_state_dict(
state_dict, prefix, strict, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(self)
if strict:
error_msg = ''
if len(unexpected_keys) > 0:
error_msgs.insert(
0, 'Unexpected key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in unexpected_keys)))
if len(missing_keys) > 0:
error_msgs.insert(
0, 'Missing key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in missing_keys)))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
self.__class__.__name__, "\n\t".join(error_msgs)))
def parameters(self):
r"""Returns an iterator over module parameters.
This is typically passed to an optimizer.
Yields:
Parameter: module parameter
Example::
>>> for param in model.parameters():
>>> print(type(param.data), param.size())
<class 'torch.FloatTensor'> (20L,)
<class 'torch.FloatTensor'> (20L, 1L, 5L, 5L)
"""
for name, param in self.named_parameters():
yield param
def named_parameters(self, memo=None, prefix=''):
r"""Returns an iterator over module parameters, yielding both the
name of the parameter as well as the parameter itself
Yields:
(string, Parameter): Tuple containing the name and parameter
Example::
>>> for name, param in self.named_parameters():
>>> if name in ['bias']:
>>> print(param.size())
"""
if memo is None:
memo = set()
for name, p in self._parameters.items():
if p is not None and p not in memo:
memo.add(p)
yield prefix + ('.' if prefix else '') + name, p
for mname, module in self.named_children():
submodule_prefix = prefix + ('.' if prefix else '') + mname
for name, p in module.named_parameters(memo, submodule_prefix):
yield name, p
def _all_buffers(self, memo=None):
if memo is None:
memo = set()
for name, b in self._buffers.items():
if b is not None and b not in memo:
memo.add(b)
yield b
for module in self.children():
for b in module._all_buffers(memo):
yield b
def children(self):
r"""Returns an iterator over immediate children modules.
Yields:
Module: a child module
"""
for name, module in self.named_children():
yield module
def named_children(self):
r"""Returns an iterator over immediate children modules, yielding both
the name of the module as well as the module itself.
Yields:
(string, Module): Tuple containing a name and child module
Example::
>>> for name, module in model.named_children():
>>> if name in ['conv4', 'conv5']:
>>> print(module)
"""
memo = set()
for name, module in self._modules.items():
if module is not None and module not in memo:
memo.add(module)
yield name, module
def modules(self):
r"""Returns an iterator over all modules in the network.
Yields:
Module: a module in the network
Note:
Duplicate modules are returned only once. In the following
example, ``l`` will be returned only once.
Example::
>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.modules()):
print(idx, '->', m)
0 -> Sequential (
(0): Linear (2 -> 2)
(1): Linear (2 -> 2)
)
1 -> Linear (2 -> 2)
"""
for name, module in self.named_modules():
yield module
def named_modules(self, memo=None, prefix=''):
r"""Returns an iterator over all modules in the network, yielding
both the name of the module as well as the module itself.
Yields:
(string, Module): Tuple of name and module
Note:
Duplicate modules are returned only once. In the following
example, ``l`` will be returned only once.
Example::
>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.named_modules()):
print(idx, '->', m)
0 -> ('', Sequential (
(0): Linear (2 -> 2)
(1): Linear (2 -> 2)
))
1 -> ('0', Linear (2 -> 2))
"""
if memo is None:
memo = set()
if self not in memo:
memo.add(self)
yield prefix, self
for name, module in self._modules.items():
if module is None:
continue
submodule_prefix = prefix + ('.' if prefix else '') + name
for m in module.named_modules(memo, submodule_prefix):
yield m
def train(self, mode=True):
r"""Sets the module in training mode.
This has any effect only on certain modules. See documentations of
particular modules for details of their behaviors in training/evaluation
mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,
etc.
Returns:
Module: self
"""
self.training = mode
for module in self.children():
module.train(mode)
return self
def eval(self):
r"""Sets the module in evaluation mode.
This has any effect only on certain modules. See documentations of
particular modules for details of their behaviors in training/evaluation
mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,
etc.
"""
return self.train(False)
def zero_grad(self):
r"""Sets gradients of all model parameters to zero."""
for p in self.parameters():
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def share_memory(self):
return self._apply(lambda t: t.share_memory_())
def _get_name(self):
return self.__class__.__name__
def extra_repr(self):
r"""Set the extra representation of the module
To print customized extra information, you should reimplement
this method in your own modules. Both single-line and multi-line
strings are acceptable.
"""
return ''
def __repr__(self):
# We treat the extra repr like the sub-module, one item per line
extra_lines = []
extra_repr = self.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
for key, module in self._modules.items():
mod_str = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + '('
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
return main_str
def __dir__(self):
module_attrs = dir(self.__class__)
attrs = list(self.__dict__.keys())
parameters = list(self._parameters.keys())
modules = list(self._modules.keys())
buffers = list(self._buffers.keys())
keys = module_attrs + attrs + parameters + modules + buffers
# Eliminate attrs that are not legal Python variable names
keys = [key for key in keys if not key[0].isdigit()]
return sorted(keys) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/module.py | 0.898522 | 0.305477 | module.py | pypi |
import torch
from .module import Module
from torch.nn.parameter import Parameter
from .. import functional as F
# TODO: check contiguous in THNN
# TODO: use separate backend functions?
class _BatchNorm(Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(_BatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
raise NotImplementedError
def forward(self, input):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
def _load_from_state_dict(self,
state_dict, prefix, strict,
missing_keys, unexpected_keys, error_msgs):
try:
version = state_dict._metadata[prefix[:-1]]['version']
except (AttributeError, KeyError):
version = None
if version is None and self.track_running_stats:
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key not in state_dict:
# Add the missing num_batches_tracked counter
state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
super(_BatchNorm, self)._load_from_state_dict(
state_dict, prefix, strict,
missing_keys, unexpected_keys, error_msgs)
class BatchNorm1d(_BatchNorm):
r"""Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D
inputs with optional additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm1d(100, affine=False)
>>> input = torch.randn(20, 100)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class BatchNorm2d(_BatchNorm):
r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm2d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class BatchNorm3d(_BatchNorm):
r"""Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
or Spatio-temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm3d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim())) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/batchnorm.py | 0.761361 | 0.51379 | batchnorm.py | pypi |
import math
import torch
import warnings
import itertools
import numbers
from .module import Module
from ..parameter import Parameter
from ..utils.rnn import PackedSequence
class RNNBase(Module):
def __init__(self, mode, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False,
dropout=0, bidirectional=False):
super(RNNBase, self).__init__()
self.mode = mode
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.dropout_state = {}
self.bidirectional = bidirectional
num_directions = 2 if bidirectional else 1
if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
isinstance(dropout, bool):
raise ValueError("dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed")
if dropout > 0 and num_layers == 1:
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers))
if mode == 'LSTM':
gate_size = 4 * hidden_size
elif mode == 'GRU':
gate_size = 3 * hidden_size
else:
gate_size = hidden_size
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = Parameter(torch.Tensor(gate_size, layer_input_size))
w_hh = Parameter(torch.Tensor(gate_size, hidden_size))
b_ih = Parameter(torch.Tensor(gate_size))
b_hh = Parameter(torch.Tensor(gate_size))
layer_params = (w_ih, w_hh, b_ih, b_hh)
suffix = '_reverse' if direction == 1 else ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
if bias:
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
self.flatten_parameters()
self.reset_parameters()
def flatten_parameters(self):
"""Resets parameter data pointer so that they can use faster code paths.
Right now, this works only if the module is on the GPU and cuDNN is enabled.
Otherwise, it's a no-op.
"""
any_param = next(self.parameters()).data
if not any_param.is_cuda or not torch.backends.cudnn.is_acceptable(any_param):
self._data_ptrs = []
return
# If any parameters alias, we fall back to the slower, copying code path. This is
# a sufficient check, because overlapping parameter buffers that don't completely
# alias would break the assumptions of the uniqueness check in
# Module.named_parameters().
unique_data_ptrs = set(p.data_ptr() for l in self.all_weights for p in l)
if len(unique_data_ptrs) != sum(len(l) for l in self.all_weights):
self._data_ptrs = []
return
with torch.cuda.device_of(any_param):
import torch.backends.cudnn.rnn as rnn
weight_arr = list(itertools.chain.from_iterable(self.all_weights))
weight_stride0 = len(self.all_weights[0])
# NB: This is a temporary hack while we still don't have Tensor
# bindings for ATen functions
with torch.no_grad():
# NB: this is an INPLACE function on weight_arr, that's why the
# no_grad() is necessary.
weight_buf = torch._cudnn_rnn_flatten_weight(
weight_arr, weight_stride0,
self.input_size, rnn.get_cudnn_mode(self.mode), self.hidden_size, self.num_layers,
self.batch_first, bool(self.bidirectional))
self._param_buf_size = weight_buf.size(0)
self._data_ptrs = list(p.data.data_ptr() for p in self.parameters())
def _apply(self, fn):
ret = super(RNNBase, self)._apply(fn)
self.flatten_parameters()
return ret
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def check_forward_args(self, input, hidden, batch_sizes):
is_input_packed = batch_sizes is not None
expected_input_dim = 2 if is_input_packed else 3
if input.dim() != expected_input_dim:
raise RuntimeError(
'input must have {} dimensions, got {}'.format(
expected_input_dim, input.dim()))
if self.input_size != input.size(-1):
raise RuntimeError(
'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
self.input_size, input.size(-1)))
if is_input_packed:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
def check_hidden_size(hx, expected_hidden_size, msg='Expected hidden size {}, got {}'):
if tuple(hx.size()) != expected_hidden_size:
raise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))
if self.mode == 'LSTM':
check_hidden_size(hidden[0], expected_hidden_size,
'Expected hidden[0] size {}, got {}')
check_hidden_size(hidden[1], expected_hidden_size,
'Expected hidden[1] size {}, got {}')
else:
check_hidden_size(hidden, expected_hidden_size)
def forward(self, input, hx=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
input, batch_sizes = input
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = input.new_zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
requires_grad=False)
if self.mode == 'LSTM':
hx = (hx, hx)
has_flat_weights = list(p.data.data_ptr() for p in self.parameters()) == self._data_ptrs
if has_flat_weights:
first_data = next(self.parameters()).data
assert first_data.storage().size() == self._param_buf_size
flat_weight = first_data.new().set_(first_data.storage(), 0, torch.Size([self._param_buf_size]))
else:
flat_weight = None
self.check_forward_args(input, hx, batch_sizes)
func = self._backend.RNN(
self.mode,
self.input_size,
self.hidden_size,
num_layers=self.num_layers,
batch_first=self.batch_first,
dropout=self.dropout,
train=self.training,
bidirectional=self.bidirectional,
dropout_state=self.dropout_state,
variable_length=is_packed,
flat_weight=flat_weight
)
output, hidden = func(input, self.all_weights, hx, batch_sizes)
if is_packed:
output = PackedSequence(output, batch_sizes)
return output, hidden
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.bias is not True:
s += ', bias={bias}'
if self.batch_first is not False:
s += ', batch_first={batch_first}'
if self.dropout != 0:
s += ', dropout={dropout}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
return s.format(**self.__dict__)
def __setstate__(self, d):
super(RNNBase, self).__setstate__(d)
self.__dict__.setdefault('_data_ptrs', [])
if 'all_weights' in d:
self._all_weights = d['all_weights']
if isinstance(self._all_weights[0][0], str):
return
num_layers = self.num_layers
num_directions = 2 if self.bidirectional else 1
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}']
weights = [x.format(layer, suffix) for x in weights]
if self.bias:
self._all_weights += [weights]
else:
self._all_weights += [weights[:2]]
@property
def all_weights(self):
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
class RNN(RNNBase):
r"""Applies a multi-layer Elman RNN with `tanh` or `ReLU` non-linearity to an
input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
h_t = \tanh(w_{ih} x_t + b_{ih} + w_{hh} h_{(t-1)} + b_{hh})
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
previous layer at time `t-1` or the initial hidden state at time `0`.
If :attr:`nonlinearity`='relu', then `ReLU` is used instead of `tanh`.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two RNNs together to form a `stacked RNN`,
with the second RNN taking in outputs of the first RNN and
computing the final results. Default: 1
nonlinearity: The non-linearity to use. Can be either 'tanh' or 'relu'. Default: 'tanh'
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)`
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
RNN layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False``
Inputs: input, h_0
- **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
of the input sequence. The input can also be a packed variable length
sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence`
or :func:`torch.nn.utils.rnn.pack_sequence`
for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
Defaults to zero if not provided.
Outputs: output, h_n
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features (`h_k`) from the last layer of the RNN,
for each `k`. If a :class:`torch.nn.utils.rnn.PackedSequence` has
been given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** (num_layers * num_directions, batch, hidden_size): tensor
containing the hidden state for `k = seq_len`.
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)``.
Attributes:
weight_ih_l[k]: the learnable input-hidden weights of the k-th layer,
of shape `(hidden_size * input_size)` for `k = 0`. Otherwise, the shape is
`(hidden_size * hidden_size)`
weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer,
of shape `(hidden_size * hidden_size)`
bias_ih_l[k]: the learnable input-hidden bias of the k-th layer,
of shape `(hidden_size)`
bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer,
of shape `(hidden_size)`
Examples::
>>> rnn = nn.RNN(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
def __init__(self, *args, **kwargs):
if 'nonlinearity' in kwargs:
if kwargs['nonlinearity'] == 'tanh':
mode = 'RNN_TANH'
elif kwargs['nonlinearity'] == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(
kwargs['nonlinearity']))
del kwargs['nonlinearity']
else:
mode = 'RNN_TANH'
super(RNN, self).__init__(mode, *args, **kwargs)
class LSTM(RNNBase):
r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input
sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\
f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{(t-1)} + b_{hg}) \\
o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\
c_t = f_t c_{(t-1)} + i_t g_t \\
h_t = o_t \tanh(c_t)
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell
state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{(t-1)}`
is the hidden state of the previous layer at time `t-1` or the initial hidden
state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`,
:math:`o_t` are the input, forget, cell, and output gates, respectively.
:math:`\sigma` is the sigmoid function.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two LSTMs together to form a `stacked LSTM`,
with the second LSTM taking in outputs of the first LSTM and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature)
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False``
Inputs: input, (h_0, c_0)
- **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
of the input sequence.
The input can also be a packed variable length sequence.
See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
:func:`torch.nn.utils.rnn.pack_sequence` for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
- **c_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial cell state for each element in the batch.
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: output, (h_n, c_n)
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features `(h_t)` from the last layer of the LSTM,
for each t. If a :class:`torch.nn.utils.rnn.PackedSequence` has been
given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the hidden state for `t = seq_len`.
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)`` and similarly for *c_n*.
- **c_n** (num_layers * num_directions, batch, hidden_size): tensor
containing the cell state for `t = seq_len`
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
`(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size x input_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
`(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size x hidden_size)`
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
`(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
`(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)`
Examples::
>>> rnn = nn.LSTM(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> c0 = torch.randn(2, 3, 20)
>>> output, (hn, cn) = rnn(input, (h0, c0))
"""
def __init__(self, *args, **kwargs):
super(LSTM, self).__init__('LSTM', *args, **kwargs)
class GRU(RNNBase):
r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) n_t + z_t h_{(t-1)} \\
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
at time `t`, :math:`h_{(t-1)}` is the hidden state of the previous layer
at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
:math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
:math:`\sigma` is the sigmoid function.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two GRUs together to form a `stacked GRU`,
with the second GRU taking in outputs of the first GRU and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature)
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
GRU layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False``
Inputs: input, h_0
- **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
of the input sequence. The input can also be a packed variable length
sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence`
for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
Defaults to zero if not provided.
Outputs: output, h_n
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features h_t from the last layer of the GRU,
for each t. If a :class:`torch.nn.utils.rnn.PackedSequence` has been
given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the hidden state for `t = seq_len`
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)``.
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
(W_ir|W_iz|W_in), of shape `(3*hidden_size x input_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
(W_hr|W_hz|W_hn), of shape `(3*hidden_size x hidden_size)`
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
(b_ir|b_iz|b_in), of shape `(3*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
(b_hr|b_hz|b_hn), of shape `(3*hidden_size)`
Examples::
>>> rnn = nn.GRU(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
def __init__(self, *args, **kwargs):
super(GRU, self).__init__('GRU', *args, **kwargs)
class RNNCellBase(Module):
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
s += ', nonlinearity={nonlinearity}'
return s.format(**self.__dict__)
def check_forward_input(self, input):
if input.size(1) != self.input_size:
raise RuntimeError(
"input has inconsistent input_size: got {}, expected {}".format(
input.size(1), self.input_size))
def check_forward_hidden(self, input, hx, hidden_label=''):
if input.size(0) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden{} batch size {}".format(
input.size(0), hidden_label, hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden{} has inconsistent hidden_size: got {}, expected {}".format(
hidden_label, hx.size(1), self.hidden_size))
class RNNCell(RNNCellBase):
r"""An Elman RNN cell with tanh or ReLU non-linearity.
.. math::
h' = \tanh(w_{ih} x + b_{ih} + w_{hh} h + b_{hh})
If :attr:`nonlinearity`='relu', then ReLU is used in place of tanh.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
nonlinearity: The non-linearity to use. Can be either 'tanh' or 'relu'. Default: 'tanh'
Inputs: input, hidden
- **input** of shape `(batch, input_size)`: tensor containing input features
- **hidden** of shape `(batch, hidden_size)`: tensor containing the initial hidden
state for each element in the batch.
Defaults to zero if not provided.
Outputs: h'
- **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state
for each element in the batch
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(input_size x hidden_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(hidden_size x hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)`
Examples::
>>> rnn = nn.RNNCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
hx = rnn(input[i], hx)
output.append(hx)
"""
def __init__(self, input_size, hidden_size, bias=True, nonlinearity="tanh"):
super(RNNCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.nonlinearity = nonlinearity
self.weight_ih = Parameter(torch.Tensor(hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(hidden_size, hidden_size))
if bias:
self.bias_ih = Parameter(torch.Tensor(hidden_size))
self.bias_hh = Parameter(torch.Tensor(hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx):
self.check_forward_input(input)
self.check_forward_hidden(input, hx)
if self.nonlinearity == "tanh":
func = self._backend.RNNTanhCell
elif self.nonlinearity == "relu":
func = self._backend.RNNReLUCell
else:
raise RuntimeError(
"Unknown nonlinearity: {}".format(self.nonlinearity))
return func(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
class LSTMCell(RNNCellBase):
r"""A long short-term memory (LSTM) cell.
.. math::
\begin{array}{ll}
i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\
f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\
g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\
o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\
c' = f * c + i * g \\
h' = o \tanh(c') \\
\end{array}
where :math:`\sigma` is the sigmoid function.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If `False`, then the layer does not use bias weights `b_ih` and
`b_hh`. Default: ``True``
Inputs: input, (h_0, c_0)
- **input** of shape `(batch, input_size)`: tensor containing input features
- **h_0** of shape `(batch, hidden_size)`: tensor containing the initial hidden
state for each element in the batch.
- **c_0** of shape `(batch, hidden_size)`: tensor containing the initial cell state
for each element in the batch.
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: h_1, c_1
- **h_1** of shape `(batch, hidden_size)`: tensor containing the next hidden state
for each element in the batch
- **c_1** of shape `(batch, hidden_size)`: tensor containing the next cell state
for each element in the batch
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(4*hidden_size x input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(4*hidden_size x hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(4*hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(4*hidden_size)`
Examples::
>>> rnn = nn.LSTMCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> cx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
hx, cx = rnn(input[i], (hx, cx))
output.append(hx)
"""
def __init__(self, input_size, hidden_size, bias=True):
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size))
if bias:
self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx):
self.check_forward_input(input)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
return self._backend.LSTMCell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
class GRUCell(RNNCellBase):
r"""A gated recurrent unit (GRU) cell
.. math::
\begin{array}{ll}
r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\
z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\
n = \tanh(W_{in} x + b_{in} + r * (W_{hn} h + b_{hn})) \\
h' = (1 - z) * n + z * h
\end{array}
where :math:`\sigma` is the sigmoid function.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If `False`, then the layer does not use bias weights `b_ih` and
`b_hh`. Default: `True`
Inputs: input, hidden
- **input** of shape `(batch, input_size)`: tensor containing input features
- **hidden** of shape `(batch, hidden_size)`: tensor containing the initial hidden
state for each element in the batch.
Defaults to zero if not provided.
Outputs: h'
- **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state
for each element in the batch
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(3*hidden_size x input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(3*hidden_size x hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(3*hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(3*hidden_size)`
Examples::
>>> rnn = nn.GRUCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
hx = rnn(input[i], hx)
output.append(hx)
"""
def __init__(self, input_size, hidden_size, bias=True):
super(GRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(3 * hidden_size, hidden_size))
if bias:
self.bias_ih = Parameter(torch.Tensor(3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(3 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx):
self.check_forward_input(input)
self.check_forward_hidden(input, hx)
return self._backend.GRUCell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/rnn.py | 0.785966 | 0.34834 | rnn.py | pypi |
from .module import Module
from .linear import Linear, Bilinear
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
CosineEmbeddingLoss, HingeEmbeddingLoss, MarginRankingLoss, \
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, \
SmoothL1Loss, SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, PoissonNLLLoss
from .container import Container, Sequential, ModuleList, ParameterList
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, LPPool1d, LPPool2d, AdaptiveMaxPool1d, \
AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout
from .padding import ReflectionPad1d, ReflectionPad2d, ReplicationPad1d, ReplicationPad2d, \
ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d
from .sparse import Embedding, EmbeddingBag
from .rnn import RNNBase, RNN, LSTM, GRU, \
RNNCell, LSTMCell, GRUCell
from .pixelshuffle import PixelShuffle
from .upsampling import UpsamplingNearest2d, UpsamplingBilinear2d, Upsample
from .distance import PairwiseDistance, CosineSimilarity
from .fold import Fold, Unfold
__all__ = [
'Module', 'Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d',
'ConvTranspose2d', 'ConvTranspose3d', 'Threshold', 'ReLU', 'Hardtanh', 'ReLU6',
'Sigmoid', 'Tanh', 'Softmax', 'Softmax2d', 'LogSoftmax', 'ELU', 'SELU', 'GLU', 'Hardshrink',
'LeakyReLU', 'LogSigmoid', 'Softplus', 'Softshrink', 'PReLU', 'Softsign', 'Softmin',
'Tanhshrink', 'RReLU', 'L1Loss', 'NLLLoss', 'KLDivLoss', 'MSELoss', 'BCELoss', 'BCEWithLogitsLoss',
'NLLLoss2d', 'PoissonNLLLoss', 'CosineEmbeddingLoss', 'HingeEmbeddingLoss', 'MarginRankingLoss',
'MultiLabelMarginLoss', 'MultiLabelSoftMarginLoss', 'MultiMarginLoss', 'SmoothL1Loss',
'SoftMarginLoss', 'CrossEntropyLoss', 'Container', 'Sequential', 'ModuleList',
'ParameterList', 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'MaxPool1d', 'MaxPool2d',
'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'FractionalMaxPool2d',
'LPPool1d', 'LPPool2d', 'LocalResponseNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', 'InstanceNorm1d',
'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'GroupNorm', 'Dropout', 'Dropout2d', 'Dropout3d', 'AlphaDropout',
'ReflectionPad1d', 'ReflectionPad2d', 'ReplicationPad2d', 'ReplicationPad1d', 'ReplicationPad3d',
'CrossMapLRN2d', 'Embedding', 'EmbeddingBag', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCell', 'LSTMCell', 'GRUCell',
'PixelShuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d', 'PairwiseDistance',
'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d', 'AdaptiveAvgPool2d',
'AdaptiveAvgPool3d', 'TripletMarginLoss', 'ZeroPad2d', 'ConstantPad1d', 'ConstantPad2d',
'ConstantPad3d', 'Bilinear', 'CosineSimilarity', 'Unfold', 'Fold',
] | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/__init__.py | 0.819135 | 0.434641 | __init__.py | pypi |
from .batchnorm import _BatchNorm
from .. import functional as F
class _InstanceNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=False,
track_running_stats=False):
super(_InstanceNorm, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
raise NotImplementedError
def _load_from_state_dict(self, state_dict, prefix, strict, missing_keys, unexpected_keys, error_msgs):
try:
version = state_dict._metadata[prefix[:-1]]["version"]
except (AttributeError, KeyError):
version = None
# at version 1: removed running_mean and running_var when
# track_running_stats=False (default)
if version is None and not self.track_running_stats:
running_stats_keys = []
for name in ('running_mean', 'running_var'):
key = prefix + name
if key in state_dict:
running_stats_keys.append(key)
if len(running_stats_keys) > 0:
error_msgs.append(
'Unexpected running stats buffer(s) {names} for {klass} '
'with track_running_stats=False. If state_dict is a '
'checkpoint saved before 0.4.0, this may be expected '
'because {klass} does not track running stats by default '
'since 0.4.0. Please remove these keys from state_dict. If '
'the running stats are actually needed, instead set '
'track_running_stats=True in {klass} to enable them. See '
'the documentation of {klass} for details.'
.format(names=" and ".join('"{}"'.format(k) for k in running_stats_keys),
klass=self.__class__.__name__))
for key in running_stats_keys:
state_dict.pop(key)
super(_InstanceNorm, self)._load_from_state_dict(
state_dict, prefix, strict, missing_keys, unexpected_keys, error_msgs)
def forward(self, input):
self._check_input_dim(input)
return F.instance_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
class InstanceNorm1d(_InstanceNorm):
r"""Applies Instance Normalization over a 2D or 3D input (a mini-batch of 1D
inputs with optional additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, L)`
- Output: :math:`(N, C, L)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm1d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm1d(100, affine=True)
>>> input = torch.randn(20, 100, 40)
>>> output = m(input)
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
def _check_input_dim(self, input):
if input.dim() != 3:
raise ValueError('expected 3D input (got {}D input)'
.format(input.dim()))
class InstanceNorm2d(_InstanceNorm):
r"""Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm2d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm2d(100, affine=True)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class InstanceNorm3d(_InstanceNorm):
r"""Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size C (where C is the input size) if :attr:`affine` is ``True``.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm3d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm3d(100, affine=True)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim())) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/instancenorm.py | 0.846483 | 0.503601 | instancenorm.py | pypi |
import math
import torch
from torch.nn.parameter import Parameter
from .. import functional as F
from .module import Module
class Linear(Module):
r"""Applies a linear transformation to the incoming data: :math:`y = Ax + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, in\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape
`(out_features x in_features)`
bias: the learnable bias of the module of shape `(out_features)`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
"""
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
return F.linear(input, self.weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class Bilinear(Module):
r"""Applies a bilinear transformation to the incoming data:
:math:`y = x_1 A x_2 + b`
Args:
in1_features: size of each first input sample
in2_features: size of each second input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, \text{in1_features})`, :math:`(N, *, \text{in2_features})`
where :math:`*` means any number of additional dimensions. All but the last
dimension of the inputs should be the same.
- Output: :math:`(N, *, \text{out_features})` where all but the last dimension
are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape
`(out_features x in1_features x in2_features)`
bias: the learnable bias of the module of shape `(out_features)`
Examples::
>>> m = nn.Bilinear(20, 30, 40)
>>> input1 = torch.randn(128, 20)
>>> input2 = torch.randn(128, 30)
>>> output = m(input1, input2)
>>> print(output.size())
"""
def __init__(self, in1_features, in2_features, out_features, bias=True):
super(Bilinear, self).__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in1_features, in2_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input1, input2):
return F.bilinear(input1, input2, self.weight, self.bias)
def extra_repr(self):
return 'in1_features={}, in2_features={}, out_features={}, bias={}'.format(
self.in1_features, self.in2_features, self.out_features, self.bias is not None
)
# TODO: PartialLinear - maybe in sparse? | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/modules/linear.py | 0.911024 | 0.799638 | linear.py | pypi |
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch._thnn import type2backend
from .thnn.auto import function_by_name
import torch.backends.cudnn as cudnn
MODE_ZEROS = 0
MODE_BORDER = 1
def grid_sampler(input, grid, padding_mode):
if cudnn.is_acceptable(input.data) and padding_mode == 'zeros' and input.dim() == 4:
return torch.cudnn_grid_sampler(input, grid)
else:
return GridSampler.apply(input, grid, padding_mode)
def affine_grid_generator(theta, size):
if theta.data.is_cuda:
if not cudnn.enabled:
raise RuntimeError("AffineGridGenerator needs CuDNN for "
"processing CUDA inputs, but CuDNN is not enabled")
if not cudnn.is_acceptable(theta.data):
raise RuntimeError("AffineGridGenerator generator theta not acceptable for CuDNN")
N, C, H, W = size
return torch.cudnn_affine_grid_generator(theta, N, C, H, W)
else:
return AffineGridGenerator.apply(theta, size)
# TODO: Port these completely into C++
class GridSampler(Function):
@staticmethod
def forward(ctx, input, grid, padding_mode='zeros'):
ctx.save_for_backward(input, grid)
if padding_mode == 'zeros':
ctx.padding_mode = MODE_ZEROS
elif padding_mode == 'border':
ctx.padding_mode = MODE_BORDER
else:
raise ValueError("padding_mode needs to be 'zeros' or 'border', but got {}".format(padding_mode))
grid_sz = grid.size()
backend = type2backend[input.type()]
if input.dim() == 4:
output = input.new(grid_sz[0], input.size(1), grid_sz[1], grid_sz[2])
backend.SpatialGridSamplerBilinear_updateOutput(backend.library_state, input, grid,
output, ctx.padding_mode)
elif input.dim() == 5:
output = input.new(grid_sz[0], input.size(1), grid_sz[1], grid_sz[2], grid_sz[3])
backend.VolumetricGridSamplerBilinear_updateOutput(backend.library_state, input, grid,
output, ctx.padding_mode)
else:
raise ValueError("input has to be 4d or 5d but got input of shape: {}".format(input.shape))
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, grid = ctx.saved_tensors
padding_mode = ctx.padding_mode
backend = type2backend[input.type()]
grad_input = input.new(input.size())
grad_grid = grid.new(grid.size())
if input.dim() == 4:
backend.SpatialGridSamplerBilinear_updateGradInput(
backend.library_state, input, grad_input,
grid, grad_grid, grad_output, padding_mode)
elif input.dim() == 5:
backend.VolumetricGridSamplerBilinear_updateGradInput(
backend.library_state, input, grad_input,
grid, grad_grid, grad_output, padding_mode)
else:
raise ValueError("input has to be 4d or 5d but got input of shape: {}".format(input.shape))
return grad_input, grad_grid, None
class AffineGridGenerator(Function):
@staticmethod
def _enforce_cudnn(input):
if not cudnn.enabled:
raise RuntimeError("AffineGridGenerator needs CuDNN for "
"processing CUDA inputs, but CuDNN is not enabled")
assert cudnn.is_acceptable(input)
@staticmethod
def forward(ctx, theta, size):
assert type(size) == torch.Size
N, C, H, W = size
ctx.size = size
if theta.is_cuda:
AffineGridGenerator._enforce_cudnn(theta)
assert False
ctx.is_cuda = False
base_grid = theta.new(N, H, W, 3)
linear_points = torch.linspace(-1, 1, W) if W > 1 else torch.Tensor([-1])
base_grid[:, :, :, 0] = torch.ger(torch.ones(H), linear_points).expand_as(base_grid[:, :, :, 0])
linear_points = torch.linspace(-1, 1, H) if H > 1 else torch.Tensor([-1])
base_grid[:, :, :, 1] = torch.ger(linear_points, torch.ones(W)).expand_as(base_grid[:, :, :, 1])
base_grid[:, :, :, 2] = 1
ctx.base_grid = base_grid
grid = torch.bmm(base_grid.view(N, H * W, 3), theta.transpose(1, 2))
grid = grid.view(N, H, W, 2)
return grid
@staticmethod
@once_differentiable
def backward(ctx, grad_grid):
N, C, H, W = ctx.size
assert grad_grid.size() == torch.Size([N, H, W, 2])
assert ctx.is_cuda == grad_grid.is_cuda
if grad_grid.is_cuda:
AffineGridGenerator._enforce_cudnn(grad_grid)
assert False
base_grid = ctx.base_grid
grad_theta = torch.bmm(
base_grid.view(N, H * W, 3).transpose(1, 2),
grad_grid.view(N, H * W, 2))
grad_theta = grad_theta.transpose(1, 2)
return grad_theta, None | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/_functions/vision.py | 0.679604 | 0.553324 | vision.py | pypi |
from torch.autograd import Function, Variable
from torch.autograd._functions.utils import prepare_onnx_paddings
class ConstantPadNd(Function):
@staticmethod
def symbolic(g, input, pad, value=0):
paddings = prepare_onnx_paddings(len(input.type().sizes()), pad)
return g.op("Pad", input, pads_i=paddings, mode_s="constant", value_f=value)
@staticmethod
def forward(ctx, input, pad, value=0):
ctx.pad = pad
ctx.value = value
ctx.input_size = input.size()
ctx.l_inp = len(input.size())
ctx.pad_tup = tuple([(a, b) for a, b in zip(pad[:-1:2], pad[1::2])][::-1])
ctx.l_pad = len(ctx.pad_tup)
ctx.l_diff = ctx.l_inp - ctx.l_pad
assert ctx.l_inp >= ctx.l_pad
new_dim = tuple([sum((d,) + ctx.pad_tup[i]) for i, d in enumerate(input.size()[-ctx.l_pad:])])
assert all([d > 0 for d in new_dim]), 'input is too small'
# crop input if necessary
output = input.new(input.size()[:(ctx.l_diff)] + new_dim).fill_(ctx.value)
c_input = input
for i, p in zip(range(ctx.l_inp)[-ctx.l_pad:], ctx.pad_tup):
if p[0] < 0:
c_input = c_input.narrow(i, -p[0], c_input.size(i) + p[0])
if p[1] < 0:
c_input = c_input.narrow(i, 0, c_input.size(i) + p[1])
# crop output if necessary
c_output = output
for i, p in zip(range(ctx.l_inp)[-ctx.l_pad:], ctx.pad_tup):
if p[0] > 0:
c_output = c_output.narrow(i, p[0], c_output.size(i) - p[0])
if p[1] > 0:
c_output = c_output.narrow(i, 0, c_output.size(i) - p[1])
c_output.copy_(c_input)
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = Variable(grad_output.data.new(ctx.input_size).zero_())
grad_input_slices = [slice(0, x,) for x in ctx.input_size]
def narrow_slice(dim, start, length):
grad_input_slices[dim] = (slice(grad_input_slices[dim].start + start,
grad_input_slices[dim].start + start + length))
def slice_length(dim):
return grad_input_slices[dim].stop - grad_input_slices[dim].start
# crop grad_input if necessary
for i, p in zip(range(ctx.l_inp)[-ctx.l_pad:], ctx.pad_tup):
if p[0] < 0:
narrow_slice(i, -p[0], slice_length(i) + p[0])
if p[1] < 0:
narrow_slice(i, 0, slice_length(i) + p[1])
# crop grad_output if necessary
cg_output = grad_output
for i_s, p in zip(range(ctx.l_inp)[-ctx.l_pad:], ctx.pad_tup):
if p[0] > 0:
cg_output = cg_output.narrow(i_s, p[0], cg_output.size(i_s) - p[0])
if p[1] > 0:
cg_output = cg_output.narrow(i_s, 0, cg_output.size(i_s) - p[1])
gis = tuple(grad_input_slices)
grad_input[gis] = cg_output
return grad_input, None, None | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/_functions/padding.py | 0.78838 | 0.5425 | padding.py | pypi |
import torch
from torch.autograd.function import InplaceFunction
from itertools import repeat
class Dropout(InplaceFunction):
@staticmethod
def _make_noise(input):
return input.new().resize_as_(input)
@staticmethod
def symbolic(g, input, p=0.5, train=False, inplace=False):
# See Note [Export inplace]
r, _ = g.op("Dropout", input, ratio_f=p, is_test_i=not train, outputs=2)
return r
@classmethod
def forward(cls, ctx, input, p=0.5, train=False, inplace=False):
if p < 0 or p > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(p))
ctx.p = p
ctx.train = train
ctx.inplace = inplace
if ctx.p == 0 or not ctx.train:
return input
if ctx.inplace:
ctx.mark_dirty(input)
output = input
else:
output = input.clone()
ctx.noise = cls._make_noise(input)
if ctx.p == 1:
ctx.noise.fill_(0)
else:
ctx.noise.bernoulli_(1 - ctx.p).div_(1 - ctx.p)
ctx.noise = ctx.noise.expand_as(input)
output.mul_(ctx.noise)
return output
@staticmethod
def backward(ctx, grad_output):
if ctx.p > 0 and ctx.train:
return grad_output * ctx.noise, None, None, None
else:
return grad_output, None, None, None
class FeatureDropout(Dropout):
@staticmethod
def symbolic(g, input, p=0.5, train=False, inplace=False):
# See Note [Export inplace]
# NB: In inference mode, FeatureDropout is exported as an identity op.
from torch.onnx.symbolic import _unimplemented
if train:
return _unimplemented("FeatureDropout", "training mode")
return input
@staticmethod
def _make_noise(input):
return input.new().resize_(input.size(0), input.size(1),
*repeat(1, input.dim() - 2)) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/_functions/dropout.py | 0.801897 | 0.387545 | dropout.py | pypi |
import warnings
from torch.autograd import NestedIOFunction
import torch.backends.cudnn as cudnn
from .. import functional as F
from .thnn import rnnFusedPointwise as fusedBackend
import itertools
from functools import partial
try:
import torch.backends.cudnn.rnn
except ImportError:
pass
def RNNReLUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
hy = F.relu(F.linear(input, w_ih, b_ih) + F.linear(hidden, w_hh, b_hh))
return hy
def RNNTanhCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
hy = F.tanh(F.linear(input, w_ih, b_ih) + F.linear(hidden, w_hh, b_hh))
return hy
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
if input.is_cuda:
igates = F.linear(input, w_ih)
hgates = F.linear(hidden[0], w_hh)
state = fusedBackend.LSTMFused.apply
return state(igates, hgates, hidden[1]) if b_ih is None else state(igates, hgates, hidden[1], b_ih, b_hh)
hx, cx = hidden
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
return hy, cy
def GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
if input.is_cuda:
gi = F.linear(input, w_ih)
gh = F.linear(hidden, w_hh)
state = fusedBackend.GRUFused.apply
return state(gi, gh, hidden) if b_ih is None else state(gi, gh, hidden, b_ih, b_hh)
gi = F.linear(input, w_ih, b_ih)
gh = F.linear(hidden, w_hh, b_hh)
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
def StackedRNN(inners, num_layers, lstm=False, dropout=0, train=True):
num_directions = len(inners)
total_layers = num_layers * num_directions
def forward(input, hidden, weight, batch_sizes):
assert(len(weight) == total_layers)
next_hidden = []
if lstm:
hidden = list(zip(*hidden))
for i in range(num_layers):
all_output = []
for j, inner in enumerate(inners):
l = i * num_directions + j
hy, output = inner(input, hidden[l], weight[l], batch_sizes)
next_hidden.append(hy)
all_output.append(output)
input = torch.cat(all_output, input.dim() - 1)
if dropout != 0 and i < num_layers - 1:
input = F.dropout(input, p=dropout, training=train, inplace=False)
if lstm:
next_h, next_c = zip(*next_hidden)
next_hidden = (
torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),
torch.cat(next_c, 0).view(total_layers, *next_c[0].size())
)
else:
next_hidden = torch.cat(next_hidden, 0).view(
total_layers, *next_hidden[0].size())
return next_hidden, input
return forward
def Recurrent(inner, reverse=False):
def forward(input, hidden, weight, batch_sizes):
output = []
steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
for i in steps:
hidden = inner(input[i], hidden, *weight)
# hack to handle LSTM
output.append(hidden[0] if isinstance(hidden, tuple) else hidden)
if reverse:
output.reverse()
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return hidden, output
return forward
def variable_recurrent_factory(inner, reverse=False):
if reverse:
return VariableRecurrentReverse(inner)
else:
return VariableRecurrent(inner)
def VariableRecurrent(inner):
def forward(input, hidden, weight, batch_sizes):
output = []
input_offset = 0
last_batch_size = batch_sizes[0]
hiddens = []
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
for batch_size in batch_sizes:
step_input = input[input_offset:input_offset + batch_size]
input_offset += batch_size
dec = last_batch_size - batch_size
if dec > 0:
hiddens.append(tuple(h[-dec:] for h in hidden))
hidden = tuple(h[:-dec] for h in hidden)
last_batch_size = batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight),)
else:
hidden = inner(step_input, hidden, *weight)
output.append(hidden[0])
hiddens.append(hidden)
hiddens.reverse()
hidden = tuple(torch.cat(h, 0) for h in zip(*hiddens))
assert hidden[0].size(0) == batch_sizes[0]
if flat_hidden:
hidden = hidden[0]
output = torch.cat(output, 0)
return hidden, output
return forward
def VariableRecurrentReverse(inner):
def forward(input, hidden, weight, batch_sizes):
output = []
input_offset = input.size(0)
last_batch_size = batch_sizes[-1]
initial_hidden = hidden
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
initial_hidden = (initial_hidden,)
hidden = tuple(h[:batch_sizes[-1]] for h in hidden)
for i in reversed(range(len(batch_sizes))):
batch_size = batch_sizes[i]
inc = batch_size - last_batch_size
if inc > 0:
hidden = tuple(torch.cat((h, ih[last_batch_size:batch_size]), 0)
for h, ih in zip(hidden, initial_hidden))
last_batch_size = batch_size
step_input = input[input_offset - batch_size:input_offset]
input_offset -= batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight),)
else:
hidden = inner(step_input, hidden, *weight)
output.append(hidden[0])
output.reverse()
output = torch.cat(output, 0)
if flat_hidden:
hidden = hidden[0]
return hidden, output
return forward
def AutogradRNN(mode, input_size, hidden_size, num_layers=1, batch_first=False,
dropout=0, train=True, bidirectional=False, variable_length=False,
dropout_state=None, flat_weight=None):
if mode == 'RNN_RELU':
cell = RNNReLUCell
elif mode == 'RNN_TANH':
cell = RNNTanhCell
elif mode == 'LSTM':
cell = LSTMCell
elif mode == 'GRU':
cell = GRUCell
else:
raise Exception('Unknown mode: {}'.format(mode))
rec_factory = variable_recurrent_factory if variable_length else Recurrent
if bidirectional:
layer = (rec_factory(cell), rec_factory(cell, reverse=True))
else:
layer = (rec_factory(cell),)
func = StackedRNN(layer,
num_layers,
(mode == 'LSTM'),
dropout=dropout,
train=train)
def forward(input, weight, hidden, batch_sizes):
if batch_first and not variable_length:
input = input.transpose(0, 1)
nexth, output = func(input, hidden, weight, batch_sizes)
if batch_first and not variable_length:
output = output.transpose(0, 1)
return output, nexth
return forward
def CudnnRNN(mode, input_size, hidden_size, num_layers=1,
batch_first=False, dropout=0, train=True, bidirectional=False,
variable_length=False, dropout_state=None, flat_weight=None):
if dropout_state is None:
dropout_state = {}
mode = cudnn.rnn.get_cudnn_mode(mode)
# TODO: This is really goofy way of using the Torch RNG to get a random number
dropout_seed = int(torch.IntTensor(1).random_())
if flat_weight is None:
warnings.warn("RNN module weights are not part of single contiguous "
"chunk of memory. This means they need to be compacted "
"at every call, possibly greatly increasing memory usage. "
"To compact weights again call flatten_parameters().", stacklevel=5)
def forward(input, weight, hx, batch_sizes):
if mode == cudnn.CUDNN_LSTM:
hx, cx = hx
else:
cx = None
handle = cudnn.get_handle()
with torch.cuda.device(input.get_device()):
dropout_ts = cudnn.rnn.init_dropout_state(torch.uint8, torch.device('cuda'), dropout,
train, dropout_seed, dropout_state)
weight_arr = list(itertools.chain.from_iterable(weight))
weight_stride0 = len(weight[0])
output, hy, cy, reserve, new_weight_buf = torch._cudnn_rnn(
input, weight_arr, weight_stride0,
flat_weight,
hx, cx,
mode, hidden_size, num_layers,
batch_first, dropout, train, bool(bidirectional),
list(batch_sizes.data) if variable_length else (),
dropout_ts)
if cx is not None:
return (output, (hy, cy))
else:
return (output, hy)
return forward
def RNN(*args, **kwargs):
def forward(input, *fargs, **fkwargs):
if cudnn.is_acceptable(input.data):
func = CudnnRNN(*args, **kwargs)
else:
func = AutogradRNN(*args, **kwargs)
# Hack for the tracer that allows us to represent RNNs as single
# nodes and export them to ONNX in this form
# Check the first argument explicitly to reduce the overhead of creating
# the lambda. We need special handling here because the forward()
# function gets reconstructed each and every time when RNN() is invoked
# and we don't want to pay the cost of decorator invocation
import torch
if torch._C._jit_is_tracing(input):
import torch.onnx.symbolic
sym = torch.onnx.symbolic.RNN_symbolic_builder(*args, **kwargs)
cell_type = args[0]
bound_symbolic = partial(torch.onnx.symbolic.rnn_trace_override_symbolic,
cell_type, func, sym)
decorator = torch.onnx.symbolic_override_first_arg_based(bound_symbolic)
func = decorator(func)
return func(input, *fargs, **fkwargs)
return forward | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/_functions/rnn.py | 0.710126 | 0.335991 | rnn.py | pypi |
from torch.autograd.function import Function, once_differentiable
from torch._thnn import type2backend
from . import _all_functions
class Col2Im(Function):
@staticmethod
def forward(ctx, input, output_size, kernel_size, dilation, padding, stride):
ctx.output_size = output_size
ctx.kernel_size = kernel_size
ctx.dilation = dilation
ctx.padding = padding
ctx.stride = stride
ctx._backend = type2backend[input.type()]
output = input.new()
ctx._backend.Col2Im_updateOutput(ctx._backend.library_state,
input, output,
output_size[0], output_size[1],
kernel_size[0], kernel_size[1],
dilation[0], dilation[1],
padding[0], padding[1],
stride[0], stride[1])
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
grad_input = grad_output.new()
ctx._backend.Col2Im_updateGradInput(ctx._backend.library_state,
grad_output,
grad_input,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.dilation[0], ctx.dilation[1],
ctx.padding[0], ctx.padding[1],
ctx.stride[0], ctx.stride[1])
return grad_input, None, None, None, None, None
class Im2Col(Function):
@staticmethod
def forward(ctx, input, kernel_size, dilation, padding, stride):
assert input.dim() == 4
ctx.kernel_size = kernel_size
ctx.dilation = dilation
ctx.padding = padding
ctx.stride = stride
ctx.input_size = (input.size(2), input.size(3))
ctx._backend = type2backend[input.type()]
output = input.new()
ctx._backend.Im2Col_updateOutput(ctx._backend.library_state,
input, output,
kernel_size[0], kernel_size[1],
dilation[0], dilation[1],
padding[0], padding[1],
stride[0], stride[1])
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
grad_input = grad_output.new()
ctx._backend.Im2Col_updateGradInput(ctx._backend.library_state,
grad_output,
grad_input,
ctx.input_size[0], ctx.input_size[1],
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.dilation[0], ctx.dilation[1],
ctx.padding[0], ctx.padding[1],
ctx.stride[0], ctx.stride[1])
return grad_input, None, None, None, None | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/_functions/thnn/fold.py | 0.92944 | 0.448607 | fold.py | pypi |
import torch
from torch.autograd.function import Function, InplaceFunction, once_differentiable
from torch._thnn import type2backend
class GRUFused(Function):
@staticmethod
def forward(ctx, input_gate, hidden_gate, hx, ibias=None, hbias=None):
ctx.backend = type2backend[input_gate.type()]
hy = input_gate.new()
workspace = input_gate.new(hx.numel() * 5)
ctx.has_bias = False
if ibias is not None:
ctx.has_bias = True
if ibias.dim() == 1:
ibias = ibias.unsqueeze(0)
if hbias.dim() == 1:
hbias = hbias.unsqueeze(0)
ctx.backend.GRUFused_updateOutput(
ctx.backend.library_state,
input_gate, hidden_gate, ibias, hbias, hx, hy, workspace)
ctx.workspace = workspace
ctx.igate_size = input_gate.size()
ctx.hgate_size = hidden_gate.size()
return hy
@staticmethod
@once_differentiable
def backward(ctx, gradOutput):
ctx.backend = type2backend[gradOutput.type()]
gradInputHx = gradOutput.new()
gradInInput = gradOutput.new(*ctx.igate_size)
gradInHidden = gradOutput.new(*ctx.hgate_size)
ctx.backend.GRUFused_updateGradInput(
ctx.backend.library_state,
gradInInput, gradInHidden, gradOutput, gradInputHx, ctx.workspace)
gb1 = gb2 = None
if ctx.has_bias:
gb1 = gradInInput.sum(0, keepdim=False)
gb2 = gradInHidden.sum(0, keepdim=False)
return gradInInput, gradInHidden, gradInputHx, gb1, gb2
class LSTMFused(Function):
@staticmethod
def forward(ctx, input_gate, hidden_gate, cx, ibias=None, hbias=None):
ctx.backend = type2backend[input_gate.type()]
hy = input_gate.new()
cy = input_gate.new()
ctx.has_bias = False
if ibias is not None:
ctx.has_bias = True
if ibias.dim() == 1:
ibias = ibias.unsqueeze(0)
if hbias.dim() == 1:
hbias = hbias.unsqueeze(0)
# input_gate gets overwritten with some intermediate values to use in backwards
ctx.backend.LSTMFused_updateOutput(
ctx.backend.library_state,
input_gate, hidden_gate,
ibias, hbias,
cx, hy, cy)
ctx.hgate_size = hidden_gate.size()
ctx.save_for_backward(input_gate, cx, cy)
return hy, cy
@staticmethod
@once_differentiable
def backward(ctx, *gradOutput):
ctx.backend = type2backend[gradOutput[0].type()]
gradInputCx = gradOutput[0].new()
gradInGates = gradOutput[0].new(*ctx.hgate_size)
saved_tens, cx, cy = ctx.saved_tensors
ctx.backend.LSTMFused_updateGradInput(
ctx.backend.library_state,
saved_tens, gradInGates, cx, cy,
gradOutput[0], gradOutput[1], gradInputCx)
gb1 = gb2 = None
if ctx.has_bias:
gb1 = gradInGates.sum(0, keepdim=False)
gb2 = gradInGates.sum(0, keepdim=False)
return gradInGates, gradInGates, gradInputCx, gb1, gb2 | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/_functions/thnn/rnnFusedPointwise.py | 0.873228 | 0.394726 | rnnFusedPointwise.py | pypi |
import torch
from torch.autograd.function import Function
from torch._thnn import type2backend
from . import _all_functions
class CrossMapLRN2d(Function):
def __init__(self, size, alpha=1e-4, beta=0.75, k=1):
super(CrossMapLRN2d, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
self._backend = None
self.scale = None
def forward(self, input):
assert input.dim() == 4
self.scale = self.scale or input.new()
output = input.new()
backend = type2backend[input.type()]
if backend is not None:
try:
backend.SpatialCrossMapLRN_updateOutput
self._backend = backend
except NotImplementedError:
pass
if self._backend is not None:
self._backend.SpatialCrossMapLRN_updateOutput(
self._backend.library_state,
input,
output,
self.scale,
self.size,
self.alpha,
self.beta,
self.k
)
else:
batch_size = input.size(0)
channels = input.size(1)
input_height = input.size(2)
input_width = input.size(3)
output.resize_as_(input)
self.scale.resize_as_(input)
# use output storage as temporary buffer
input_square = output
torch.pow(input, 2, out=input_square)
pre_pad = int((self.size - 1) / 2 + 1)
pre_pad_crop = channels if pre_pad > channels else pre_pad
scale_first = self.scale.select(1, 0)
scale_first.zero_()
# compute first feature map normalization
for c in range(pre_pad_crop):
scale_first.add_(input_square.select(1, c))
# reuse computations for next feature maps normalization
# by adding the next feature map and removing the previous
for c in range(1, channels):
scale_previous = self.scale.select(1, c - 1)
scale_current = self.scale.select(1, c)
scale_current.copy_(scale_previous)
if c < channels - pre_pad + 1:
square_next = input_square.select(1, c + pre_pad - 1)
scale_current.add_(1, square_next)
if c > pre_pad:
square_previous = input_square.select(1, c - pre_pad)
scale_current.add_(-1, square_previous)
self.scale.mul_(self.alpha / self.size).add_(self.k)
torch.pow(self.scale, -self.beta, out=output)
output.mul_(input)
self.save_for_backward(input, output)
return output
def backward(self, grad_output):
input, output = self.saved_tensors
grad_input = grad_output.new()
if self._backend is not None:
self._backend.SpatialCrossMapLRN_updateGradInput(
self._backend.library_state,
input,
grad_output,
grad_input,
self.scale,
output,
self.size,
self.alpha,
self.beta,
self.k
)
else:
batch_size = input.size(0)
channels = input.size(1)
input_height = input.size(2)
input_width = input.size(3)
paddded_ratio = input.new(channels + self.size - 1, input_height,
input_width)
accum_ratio = input.new(input_height, input_width)
cache_ratio_value = 2 * self.alpha * self.beta / self.size
inversePrePad = int(self.size - (self.size - 1) / 2)
grad_input.resize_as_(input)
torch.pow(self.scale, -self.beta, out=grad_input).mul_(grad_output)
paddded_ratio.zero_()
padded_ratio_center = paddded_ratio.narrow(0, inversePrePad,
channels)
for n in range(batch_size):
torch.mul(grad_output[n], output[n], out=padded_ratio_center)
padded_ratio_center.div_(self.scale[n])
torch.sum(
paddded_ratio.narrow(0, 0, self.size - 1), 0, keepdim=False, out=accum_ratio)
for c in range(channels):
accum_ratio.add_(paddded_ratio[c + self.size - 1])
grad_input[n][c].addcmul_(-cache_ratio_value, input[n][c],
accum_ratio)
accum_ratio.add_(-1, paddded_ratio[c])
return grad_input
_all_functions.append(CrossMapLRN2d) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/_functions/thnn/normalization.py | 0.811303 | 0.325253 | normalization.py | pypi |
import torch
from torch.autograd.function import Function
from torch._thnn import type2backend
from torch.autograd.function import once_differentiable
from . import _all_functions
MODE_SUM = 0
MODE_MEAN = 1
class EmbeddingBag(Function):
@staticmethod
def _renorm(ctx, indices, weight, max_norm, norm_type):
# clone indices since LookupTable_renorm modifies it in-place
ctx._backend.LookupTable_renorm(
ctx._backend.library_state,
indices.clone().view(-1),
weight,
max_norm,
norm_type
)
@classmethod
def forward(cls, ctx, weight, indices, offsets,
max_norm, norm_type, scale_grad_by_freq, mode):
ctx.max_norm = max_norm
ctx.norm_type = norm_type
ctx.scale_grad_by_freq = scale_grad_by_freq
if mode == 'sum':
ctx.mode = MODE_SUM
elif mode == 'mean':
ctx.mode = MODE_MEAN
else:
raise ValueError("mode needs to be 'sum' or 'mean', but got {}"
.format(mode))
assert not ctx.needs_input_grad[1], "EmbeddingBag doesn't " \
"compute the gradient w.r.t. the indices"
assert not ctx.needs_input_grad[2], "EmbeddingBag doesn't " \
"compute the gradient w.r.t. the offsets"
assert indices.dim() == 1
if offsets.dim() != 1:
raise ValueError("offsets has to be a 1D Tensor")
if offsets[0] != 0:
raise ValueError("offsets[0] has to be 0, i.e. the first sequence"
" in the mini-batch has to start from position 0."
"However, got {}".format(offsets[0]))
if offsets[-1] > indices.size(0):
raise ValueError("offsets[-1] has to be smaller than indices's length"
" ({}), but got offsets[-1] of {}"
.format(indices.size(0), offsets[-1]))
ctx._backend = type2backend[weight.type()]
ctx._weight_size = weight.size()
ctx._offset2bag = offsets.new()
ctx.save_for_backward(indices)
indices = indices.contiguous().view(-1)
output = weight.new()
if ctx.max_norm is not None:
cls._renorm(ctx, indices, weight, max_norm=max_norm, norm_type=norm_type)
if weight.is_cuda:
if ctx.mode == MODE_MEAN:
ctx.bag_size = offsets.new().resize_(offsets.size())
else:
ctx.bag_size = None
ctx._backend.LookupTableBag_updateOutput(
ctx._backend.library_state,
indices,
offsets,
weight,
output,
ctx._offset2bag,
ctx.mode,
ctx.bag_size
)
else:
# slow CPU implementation
index_output = torch.index_select(weight, 0, indices)
# indices = [1, 2, 30, 100, 12], offsets = [0, 2, 3]
ctx._offset2bag.resize_(indices.size(0)).zero_() # offset2bag = [0 0 0 0 0]
ctx._offset2bag.index_fill_(0, offsets, 1) # offset2bag = [1 0 1 0 1]
ctx._offset2bag[0] = 0 # offset2bag = [0 0 1 0 1]
ctx._offset2bag = ctx._offset2bag.cumsum(0) # offset2bag = [0 0 1 1 2]
output.resize_(offsets.size(0), weight.size(1)).zero_()
output.index_add_(0, ctx._offset2bag, index_output)
if ctx.mode == MODE_MEAN:
if offsets.size(0) == 1:
ctx.bag_size = indices.size(0)
else:
ctx.bag_size = weight.new().resize_(offsets.size())
ctx.bag_size[:-1] = offsets[1:] - offsets[:-1]
ctx.bag_size[-1] = indices.size(0) - offsets[-1]
ctx.bag_size = ctx.bag_size[:, None].expand_as(output)
output /= ctx.bag_size
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
indices, = ctx.saved_tensors
indices = indices.contiguous().view(-1)
grad_output = grad_output.contiguous()
with torch.cuda.device_of(grad_output):
if grad_output.is_cuda:
_sorted = torch.cuda.LongTensor()
_indices = torch.cuda.LongTensor()
_count = torch.cuda.LongTensor()
else:
_count = torch.IntTensor()
_sorted = _indices = None
grad_weight = grad_output.new(ctx._weight_size).zero_()
if grad_output.is_cuda:
ctx._backend.LookupTableBag_accGradParameters(
ctx._backend.library_state,
indices,
grad_output,
grad_weight,
ctx._offset2bag,
_count,
_sorted,
_indices,
ctx.scale_grad_by_freq,
ctx.mode,
ctx.bag_size,
1
)
else:
# slow CPU implementation
if ctx.mode == MODE_MEAN:
# divide by average count
grad_output = grad_output / ctx.bag_size
index_grad_output = grad_output.index_select(0, ctx._offset2bag)
ctx._backend.LookupTable_accGradParameters(
ctx._backend.library_state,
indices,
index_grad_output,
grad_weight,
_count,
_sorted,
_indices,
ctx.scale_grad_by_freq,
-1,
1
)
return grad_weight, None, None, None, None, None, None
_all_functions.append(EmbeddingBag) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/_functions/thnn/sparse.py | 0.87456 | 0.433682 | sparse.py | pypi |
import torch
def elu_double_backwards(ctx, ggI):
t = ctx.saved_tensors
input, grad_output = t[0], t[1]
alpha = ctx.additional_args[0]
negative_mask = (input < 0).type_as(ggI)
exp_alpha = input.exp() * alpha * negative_mask
gI = ggI * grad_output * exp_alpha
non_negative_mask = (input >= 0).type_as(ggI)
ggO = ggI * (exp_alpha + non_negative_mask)
return gI, ggO, None, None, None, None
def gatedlinear_double_backwards(ctx, ggI):
input, gO = ctx.saved_tensors
dim = ctx.additional_args[0]
input_size = input.size(dim) // 2
first_half = input.narrow(dim, 0, input_size)
second_half = input.narrow(dim, input_size, input_size)
sig_second_half = second_half.sigmoid()
one_sub_sig_second_half = 1 - sig_second_half
sig_one_sub_sig = sig_second_half * one_sub_sig_second_half
ggI_first_half = ggI.narrow(dim, 0, input_size)
ggI_second_half = ggI.narrow(dim, input_size, input_size)
ggI_second_half_times_first_half = ggI_second_half * first_half
gI_first_half = ggI_second_half * gO * sig_one_sub_sig
second_order_sh = sig_one_sub_sig * one_sub_sig_second_half - sig_second_half * sig_one_sub_sig
gI_second_half = ggI_second_half_times_first_half * gO * second_order_sh + ggI_first_half * gO * sig_one_sub_sig
gI = torch.cat((gI_first_half, gI_second_half), dim)
ggO = ggI_first_half * sig_second_half + ggI_second_half_times_first_half * sig_one_sub_sig
return gI, ggO, None, None, None
def hardshrink_double_backwards(ctx, ggI):
t = ctx.saved_tensors
input = t[0]
lambd = ctx.additional_args[0]
gI = None
mask = torch.zeros_like(input).masked_fill_(input > lambd, 1).masked_fill_(input < -lambd, 1)
ggO = ggI * mask
return gI, ggO, None, None, None
def hardtanh_double_backwards(ctx, ggI):
t = ctx.saved_tensors
input, grad_output = t[0], t[1]
min_val, max_val = ctx.additional_args[0:2]
max_mask = input <= max_val
min_mask = input <= min_val
gI = torch.zeros_like(ggI)
ggO = ggI * (max_mask - min_mask).type_as(grad_output)
return gI, ggO, None, None, None
def leakyrelu_double_backwards(ctx, ggI):
t = ctx.saved_tensors
input = t[0]
negative_slope = ctx.additional_args[0]
gI = torch.zeros_like(ggI)
input_lt_0 = (input < 0).type_as(ggI)
input_ge_0 = (input >= 0).type_as(ggI)
ggO = ggI * (input_lt_0 * negative_slope + input_ge_0)
return gI, ggO, None, None, None
def logsigmoid_double_backwards(ctx, ggI):
t = ctx.saved_tensors
# maybe more efficient in terms of output, but save_output is False
input, gO = t[0], t[1]
exp_input = input.exp()
exp_input_plus_1 = exp_input + 1
gI = ggI * gO * -1 * exp_input / (exp_input_plus_1.pow(2))
ggO = ggI / exp_input_plus_1
return gI, ggO, None, None, None, None
def softplus_double_backwards(ctx, ggI):
t = ctx.saved_tensors
input, gO, output = t[0], t[1], t[2]
beta, threshold = ctx.additional_args[0], ctx.additional_args[1]
input_beta = input * beta
above_threshold = torch.zeros_like(ggI).masked_fill_(input_beta > threshold, 1)
below_threshold = torch.zeros_like(ggI).masked_fill_(input_beta <= threshold, 1)
exp_output_beta = (output * beta).exp()
first_deriv = (exp_output_beta - 1) / exp_output_beta
first_deriv_below_threshold = first_deriv * below_threshold
gI = ggI * gO * first_deriv_below_threshold * beta / exp_output_beta
ggO = ggI * (above_threshold + first_deriv_below_threshold)
return gI, ggO, None, None, None, None
def softshrink_double_backwards(ctx, ggI):
return hardshrink_double_backwards(ctx, ggI)
def threshold_double_backwards(ctx, ggI):
t = ctx.saved_tensors
input = t[0]
threshold, value = ctx.additional_args[0:2]
gI = torch.zeros_like(ggI)
input_gt_threshold = (input > threshold).type_as(ggI)
ggO = ggI * input_gt_threshold
return gI, ggO, None, None, None
def klddivloss_double_backwards(ctx, ggI):
size_average = ctx.additional_args[0]
input, target, gO = ctx.saved_tensors
div_factor = input.nelement() if size_average else 1
gI = None
ggO = (ggI * target).sum() / -div_factor
return gI, None, ggO, None, None
def l1loss_double_backwards(ctx, ggI):
size_average = ctx.additional_args[0]
input, target, grad_output = ctx.saved_tensors
gI = torch.zeros_like(ggI)
positive_mask = (input > target).type_as(ggI)
negative_mask = (input < target).type_as(ggI)
ggO = (ggI * (positive_mask - negative_mask)).sum()
if size_average:
ggO = ggO / input.nelement()
return gI, None, ggO, None, None
def mseloss_double_backwards(ctx, ggI):
size_average = ctx.additional_args[0]
reduce = ctx.additional_args[1]
input, target, gO = ctx.saved_tensors
div_factor = input.nelement() if size_average and reduce else 1
gI = ggI * (gO * 2. / div_factor).expand_as(input)
if reduce:
ggO = (ggI * (input - target)).sum() * (2. / div_factor)
else:
ggO = (ggI * (input - target)) * 2.
return gI, None, ggO, None, None
def nllloss_double_backwards(ctx, ggI):
t = ctx.saved_tensors
target = t[1]
weights = ctx.additional_args[1]
size_average = ctx.additional_args[0]
ignore_index = ctx.additional_args[3]
reduce = ctx.additional_args[4]
gI = None
# can't scatter/gather on indices outside of range, let's just put them in range
# and 0 out the weights later (so it doesn't matter where in range we put them)
target_mask = target == ignore_index
safe_target = target.clone()
safe_target.masked_fill_(target_mask, 0)
if weights.dim() == 0:
weights_to_scatter = torch.ones_like(safe_target)
else:
weights_maybe_resized = weights
while weights_maybe_resized.dim() < target.dim():
weights_maybe_resized = weights_maybe_resized.unsqueeze(1)
weights_maybe_resized = weights_maybe_resized.expand(weights.size()[0:1] + target.size()[1:])
weights_to_scatter = weights_maybe_resized.gather(0, safe_target)
weights_to_scatter.masked_fill_(target_mask, 0)
divisor = weights_to_scatter.sum() if size_average and reduce else 1
weights_to_scatter = -1 * weights_to_scatter / divisor
zeros = torch.zeros_like(ggI)
mask = zeros.scatter_(1, safe_target.unsqueeze(1), weights_to_scatter.unsqueeze(1))
if reduce:
ggO = (ggI * mask).sum()
else:
ggO = (ggI * mask).sum(dim=1)
return gI, None, ggO, None, None, None
def smoothl1loss_double_backwards(ctx, ggI):
size_average = ctx.additional_args[0]
input, target, gO = ctx.saved_tensors
div_factor = input.nelement() if size_average else 1
input_sub_target = input - target
small_error_mask = (input_sub_target.abs() < 1)
large_error_mask = (small_error_mask == 0)
large_error_pos_mask = (((input_sub_target > 0) + large_error_mask) == 2).type_as(ggI)
large_error_neg_mask = (((input_sub_target <= 0) + large_error_mask) == 2).type_as(ggI)
small_error_mask = small_error_mask.type_as(ggI)
gI = small_error_mask * ggI * gO / div_factor
ggO = (ggI * (input_sub_target * small_error_mask + large_error_pos_mask - large_error_neg_mask)).sum() / div_factor
return gI, None, ggO, None, None, None
def softmarginloss_double_backwards(ctx, ggI):
size_average = ctx.additional_args[0]
input, target, gO = ctx.saved_tensors
div_factor = input.nelement() if size_average else 1
t0 = (1 + (-target * input).exp()).pow(-1)
t1 = (-target * (-target * input).exp())
first_deriv = t0 * t1
gI = -1 * gO * ggI / div_factor * (first_deriv.pow(2) + first_deriv * target)
ggO = (ggI * first_deriv).sum() / div_factor
return gI, None, ggO, None, None, None
double_backwards_fns = {
'ELU': elu_double_backwards,
'GatedLinear': gatedlinear_double_backwards,
'Hardshrink': hardshrink_double_backwards,
'Hardtanh': hardtanh_double_backwards,
'LeakyReLU': leakyrelu_double_backwards,
'LogSigmoid': logsigmoid_double_backwards,
'Softplus': softplus_double_backwards,
'Softshrink': softshrink_double_backwards,
'Threshold': threshold_double_backwards,
'KLDivLoss': klddivloss_double_backwards,
'L1Loss': l1loss_double_backwards,
'MSELoss': mseloss_double_backwards,
'NLLLoss': nllloss_double_backwards,
'NLLLoss2d': nllloss_double_backwards,
'SmoothL1Loss': smoothl1loss_double_backwards,
'SoftMarginLoss': softmarginloss_double_backwards,
} | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/_functions/thnn/auto_double_backwards.py | 0.658088 | 0.33876 | auto_double_backwards.py | pypi |
import operator
import torch
import warnings
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
def _check_balance(device_ids):
imbalance_warn = """
There is an imbalance between your GPUs. You may want to exclude GPU {} which
has less than 75% of the memory or cores of GPU {}. You can do so by setting
the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES
environment variable."""
dev_props = [torch.cuda.get_device_properties(i) for i in device_ids]
def warn_imbalance(get_prop):
values = [get_prop(props) for props in dev_props]
min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1))
max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1))
if min_val / max_val < 0.75:
warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]))
return True
return False
if warn_imbalance(lambda props: props.total_memory):
return
if warn_imbalance(lambda props: props.multi_processor_count):
return
class DataParallel(Module):
r"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
The batch size should be larger than the number of GPUs used.
See also: :ref:`cuda-nn-dataparallel-instead`
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel EXCEPT Tensors. All tensors will be scattered on dim
specified (default 0). Primitive types will be broadcasted, but all
other types will be a shallow copy and can be corrupted if written to in
the model's forward pass.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
will be invoked ``len(device_ids)`` times, each with inputs located on
a particular device. Particularly, the hooks are only guaranteed to be
executed in correct order with respect to operations on corresponding
devices. For example, it is not guaranteed that hooks set via
:meth:`~torch.nn.Module.register_forward_pre_hook` be executed before
`all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but
that each such hook be executed before the corresponding
:meth:`~torch.nn.Module.forward` call of that device.
.. note::
There is a subtlety in using the
``pack sequence -> recurrent network -> unpack sequence`` pattern in a
:class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for
details.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
Attributes:
module (Module): the module to be parallelized
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var)
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super(DataParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.output_device = output_device
_check_balance(self.device_ids)
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module, device_ids):
return replicate(module, device_ids)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Tensor containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/parallel/data_parallel.py | 0.81283 | 0.425068 | data_parallel.py | pypi |
import torch
from ._functions import Scatter, Gather
def scatter(inputs, target_gpus, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors. Does not
support Tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return Scatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def gather(outputs, target_device, dim=0):
r"""
Gathers tensors from different GPUs on a specified device
(-1 means the CPU).
"""
def gather_map(outputs):
out = outputs[0]
if isinstance(out, torch.Tensor):
return Gather.apply(target_device, dim, *outputs)
if out is None:
return None
if isinstance(out, dict):
if not all((len(out) == len(d) for d in outputs)):
raise ValueError('All dicts must have the same number of keys')
return type(out)(((k, gather_map([d[k] for d in outputs]))
for k in out))
return type(out)(map(gather_map, zip(*outputs)))
# Recursive function calls like this create reference cycles.
# Setting the function to None clears the refcycle.
try:
return gather_map(outputs)
finally:
gather_map = None | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/parallel/scatter_gather.py | 0.733929 | 0.54353 | scatter_gather.py | pypi |
import torch
import torch.cuda.comm as comm
from torch.autograd import Function
class Broadcast(Function):
@staticmethod
def forward(ctx, target_gpus, *inputs):
if not all(input.is_cuda for input in inputs):
raise TypeError('Broadcast function not implemented for CPU tensors')
ctx.target_gpus = target_gpus
if len(inputs) == 0:
return tuple()
ctx.num_inputs = len(inputs)
ctx.input_device = inputs[0].get_device()
outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
non_differentiables = []
for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]):
if not input_requires_grad:
for output in outputs:
non_differentiables.append(output[idx])
ctx.mark_non_differentiable(*non_differentiables)
return tuple([t for tensors in outputs for t in tensors])
@staticmethod
def backward(ctx, *grad_outputs):
return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs)
class ReduceAddCoalesced(Function):
@staticmethod
def forward(ctx, destination, num_inputs, *grads):
ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]
grads = [grads[i:i + num_inputs]
for i in range(0, len(grads), num_inputs)]
return comm.reduce_add_coalesced(grads, destination)
@staticmethod
def backward(ctx, *grad_outputs):
return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs)
class Gather(Function):
@staticmethod
def forward(ctx, target_device, dim, *inputs):
assert all(map(lambda i: i.is_cuda, inputs))
ctx.target_device = target_device
ctx.dim = dim
ctx.input_gpus = tuple(map(lambda i: i.get_device(), inputs))
ctx.input_sizes = tuple(map(lambda i: i.size(ctx.dim), inputs))
return comm.gather(inputs, ctx.dim, ctx.target_device)
@staticmethod
def backward(ctx, grad_output):
return (None, None) + Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)
class Scatter(Function):
@staticmethod
def forward(ctx, target_gpus, chunk_sizes, dim, input):
ctx.target_gpus = target_gpus
ctx.chunk_sizes = chunk_sizes
ctx.dim = dim
ctx.input_device = input.get_device() if input.is_cuda else -1
streams = None
if ctx.input_device == -1:
# Perform CPU to GPU copies in a background stream
streams = [_get_stream(device) for device in ctx.target_gpus]
outputs = comm.scatter(input, ctx.target_gpus, ctx.chunk_sizes, ctx.dim, streams)
# Synchronize with the copy stream
if streams is not None:
for i, output in enumerate(outputs):
with torch.cuda.device(ctx.target_gpus[i]):
main_stream = torch.cuda.current_stream()
main_stream.wait_stream(streams[i])
output.record_stream(main_stream)
return outputs
@staticmethod
def backward(ctx, *grad_output):
return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output)
# background streams used for copying
_streams = None
def _get_stream(device):
"""Gets a background stream for copying between CPU and GPU"""
global _streams
if device == -1:
return None
if _streams is None:
_streams = [None] * torch.cuda.device_count()
if _streams[device] is None:
_streams[device] = torch.cuda.Stream(device)
return _streams[device] | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/parallel/_functions.py | 0.853898 | 0.395105 | _functions.py | pypi |
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import torch.distributed as dist
from torch.nn.modules import Module
from collections import defaultdict
from torch.autograd import Variable
class DistributedDataParallelCPU(Module):
r"""Implements distributed data parallelism for CPU at the module level.
This module support the ``mpi``, ``gloo``, ``tcp`` backends.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine, and each such replica
handles a portion of the input. During the backwards pass, gradients from
each node are averaged.
This module could be used in conjunction with the DistributedSampler,
(see :class `torch.utils.data.distributed.DistributedSampler`)
which will load a subset of the original datset for each node with the same
batch size. So strong scaling should be configured like this:
n = 1, batch size = 128
n = 2, batch size = 64
n = 4, batch size = 32
n = 8, batch size = 16
Creation of this class requires the distributed package to be already
initialized in the process group mode
(see :func:`torch.distributed.init_process_group`).
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) is a distributed synchronization
point. Take that into account in case different node might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
.. warning::
This module assumes all gradients are dense.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. note::
Parameters are broadcast between nodes in the __init__() function. The
module performs an all-reduce step on gradients and assumes that they
will be modified by the optimizer in all nodes in the same way.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
won't be invoked anymore, unless the hooks are initialized in the
:meth:`forward` method.
Args:
module: module to be parallelized
Example::
>>> torch.distributed.init_process_group(world_size=4, init_method='...')
>>> net = torch.nn.DistributedDataParallelCPU(model)
"""
def __init__(self, module):
super(DistributedDataParallelCPU, self).__init__()
self.module = module
self.sync_parameters()
def allreduce_params():
if self.needs_reduction:
self.needs_reduction = False
buckets = defaultdict(list)
for param in self.module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
buckets[tp].append(param)
for bucket in buckets.values():
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def sync_parameters(self):
for param in self.module.parameters():
dist.broadcast(param.data, 0)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/parallel/distributed_cpu.py | 0.934761 | 0.577019 | distributed_cpu.py | pypi |
import threading
import torch
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None):
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
output = module(*input, **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device))
for i, (module, input, kwargs, device) in
enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/parallel/parallel_apply.py | 0.425605 | 0.335773 | parallel_apply.py | pypi |
r"""
Weight Normalization from https://arxiv.org/abs/1602.07868
"""
from torch.nn.parameter import Parameter
def _norm(p, dim):
"""Computes the norm over all dimensions except dim"""
if dim is None:
return p.norm()
elif dim == 0:
output_size = (p.size(0),) + (1,) * (p.dim() - 1)
return p.contiguous().view(p.size(0), -1).norm(dim=1).view(*output_size)
elif dim == p.dim() - 1:
output_size = (1,) * (p.dim() - 1) + (p.size(-1),)
return p.contiguous().view(-1, p.size(-1)).norm(dim=0).view(*output_size)
else:
return _norm(p.transpose(0, dim), 0).transpose(0, dim)
class WeightNorm(object):
def __init__(self, name, dim):
self.name = name
self.dim = dim
def compute_weight(self, module):
g = getattr(module, self.name + '_g')
v = getattr(module, self.name + '_v')
return v * (g / _norm(v, self.dim))
@staticmethod
def apply(module, name, dim):
fn = WeightNorm(name, dim)
weight = getattr(module, name)
# remove w from parameter list
del module._parameters[name]
# add g and v as new parameters and express w as g/||v|| * v
module.register_parameter(name + '_g', Parameter(_norm(weight, dim).data))
module.register_parameter(name + '_v', Parameter(weight.data))
setattr(module, name, fn.compute_weight(module))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
weight = self.compute_weight(module)
delattr(module, self.name)
del module._parameters[self.name + '_g']
del module._parameters[self.name + '_v']
module.register_parameter(self.name, Parameter(weight.data))
def __call__(self, module, inputs):
setattr(module, self.name, self.compute_weight(module))
def weight_norm(module, name='weight', dim=0):
r"""Applies weight normalization to a parameter in the given module.
.. math::
\mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|}
Weight normalization is a reparameterization that decouples the magnitude
of a weight tensor from its direction. This replaces the parameter specified
by `name` (e.g. "weight") with two parameters: one specifying the magnitude
(e.g. "weight_g") and one specifying the direction (e.g. "weight_v").
Weight normalization is implemented via a hook that recomputes the weight
tensor from the magnitude and direction before every :meth:`~Module.forward`
call.
By default, with `dim=0`, the norm is computed independently per output
channel/plane. To compute a norm over the entire weight tensor, use
`dim=None`.
See https://arxiv.org/abs/1602.07868
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
dim (int, optional): dimension over which to compute the norm
Returns:
The original module with the weight norm hook
Example::
>>> m = weight_norm(nn.Linear(20, 40), name='weight')
Linear (20 -> 40)
>>> m.weight_g.size()
torch.Size([40, 1])
>>> m.weight_v.size()
torch.Size([40, 20])
"""
WeightNorm.apply(module, name, dim)
return module
def remove_weight_norm(module, name='weight'):
r"""Removes the weight normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = weight_norm(nn.Linear(20, 40))
>>> remove_weight_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("weight_norm of '{}' not found in {}"
.format(name, module)) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/utils/weight_norm.py | 0.953848 | 0.733667 | weight_norm.py | pypi |
import torch
from torch.nn.functional import normalize
from torch.nn.parameter import Parameter
class SpectralNorm(object):
def __init__(self, name='weight', n_power_iterations=1, eps=1e-12):
self.name = name
self.n_power_iterations = n_power_iterations
self.eps = eps
def compute_weight(self, module):
weight = getattr(module, self.name + '_org')
u = getattr(module, self.name + '_u')
height = weight.size(0)
weight_mat = weight.view(height, -1)
with torch.no_grad():
for _ in range(self.n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
v = normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
u = normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
sigma = torch.dot(u, torch.matmul(weight_mat, v))
weight = weight / sigma
return weight, u
def remove(self, module):
weight = module._parameters[self.name + '_org']
delattr(module, self.name)
delattr(module, self.name + '_u')
delattr(module, self.name + '_org')
module.register_parameter(self.name, weight)
def __call__(self, module, inputs):
weight, u = self.compute_weight(module)
setattr(module, self.name, weight)
with torch.no_grad():
getattr(module, self.name).copy_(weight)
@staticmethod
def apply(module, name, n_power_iterations, eps):
fn = SpectralNorm(name, n_power_iterations, eps)
weight = module._parameters[name]
height = weight.size(0)
u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter(fn.name + "_org", weight)
module.register_buffer(fn.name, weight)
module.register_buffer(fn.name + "_u", u)
module.register_forward_pre_hook(fn)
return fn
def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} &= \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) &= \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectal norm
eps (float, optional): epsilon for numerical stability in
calculating norms
Returns:
The original module with the spectal norm hook
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
Linear (20 -> 40)
>>> m.weight_u.size()
torch.Size([20])
"""
SpectralNorm.apply(module, name, n_power_iterations, eps)
return module
def remove_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(
name, module)) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/utils/spectral_norm.py | 0.935065 | 0.596844 | spectral_norm.py | pypi |
import warnings
def clip_grad_norm_(parameters, max_norm, norm_type=2):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Tensor]): an iterable of Tensors that will have
gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef.item())
return total_norm
def clip_grad_norm(parameters, max_norm, norm_type=2):
r"""Clips gradient norm of an iterable of parameters.
.. warning::
This method is now deprecated in favor of
:func:`torch.nn.utils.clip_grad_norm_`.
"""
warnings.warn("torch.nn.utils.clip_grad_norm is now deprecated in favor "
"of torch.nn.utils.clip_grad_norm_.", stacklevel=2)
return clip_grad_norm_(parameters, max_norm, norm_type)
def clip_grad_value_(parameters, clip_value):
r"""Clips gradient of an iterable of parameters at specified value.
Gradients are modified in-place.
Arguments:
parameters (Iterable[Tensor]): an iterable of Tensors that will have
gradients normalized
clip_value (float or int): maximum allowed value of the gradients
The gradients are clipped in the range [-clip_value, clip_value]
"""
clip_value = float(clip_value)
for p in filter(lambda p: p.grad is not None, parameters):
p.grad.data.clamp_(min=-clip_value, max=clip_value) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/utils/clip_grad.py | 0.907476 | 0.654522 | clip_grad.py | pypi |
import torch
def parameters_to_vector(parameters):
r"""Convert parameters to one vector
Arguments:
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
Returns:
The parameters represented by a single vector
"""
# Flag for the device where the parameter is located
param_device = None
vec = []
for param in parameters:
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
vec.append(param.view(-1))
return torch.cat(vec)
def vector_to_parameters(vec, parameters):
r"""Convert one vector to the parameters
Arguments:
vec (Tensor): a single vector represents the parameters of a model.
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
"""
# Ensure vec of type Tensor
if not isinstance(vec, torch.Tensor):
raise TypeError('expected torch.Tensor, but got: {}'
.format(torch.typename(vec)))
# Flag for the device where the parameter is located
param_device = None
# Pointer for slicing the vector for each parameter
pointer = 0
for param in parameters:
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
# The length of the parameter
num_param = torch.prod(torch.LongTensor(list(param.size())))
# Slice the vector, reshape it, and replace the old data of the parameter
param.data = vec[pointer:pointer + num_param].view(param.size()).data
# Increment the pointer
pointer += num_param
def _check_param_device(param, old_param_device):
r"""This helper function is to check if the parameters are located
in the same device. Currently, the conversion between model parameters
and single vector form is not supported for multiple allocations,
e.g. parameters in different GPUs, or mixture of CPU/GPU.
Arguments:
param ([Tensor]): a Tensor of a parameter of a model
old_param_device (int): the device where the first parameter of a
model is allocated.
Returns:
old_param_device (int): report device for the first time
"""
# Meet the first parameter
if old_param_device is None:
old_param_device = param.get_device() if param.is_cuda else -1
else:
warn = False
if param.is_cuda: # Check if in same GPU
warn = (param.get_device() != old_param_device)
else: # Check if in CPU
warn = (old_param_device != -1)
if warn:
raise TypeError('Found two parameters on different devices, '
'this is currently not supported.')
return old_param_device | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/nn/utils/convert_parameters.py | 0.900606 | 0.890008 | convert_parameters.py | pypi |
import ctypes
import torch
from . import cudart, check_error, cudaStatus
class Stream(torch._C._CudaStreamBase):
"""Wrapper around a CUDA stream.
A CUDA stream is a linear sequence of execution that belongs to a specific
device, independent from other streams. See :ref:`cuda-semantics` for
details.
Arguments:
device(int, optional): a device on which to allocate the Stream.
priority(int, optional): priority of the stream. Lower numbers
represent higher priorities.
"""
def __new__(cls, device=-1, priority=0, **kwargs):
with torch.cuda.device(device):
return super(Stream, cls).__new__(cls, priority=priority, **kwargs)
def wait_event(self, event):
"""Makes all future work submitted to the stream wait for an event.
Arguments:
event (Event): an event to wait for.
.. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see `CUDA
documentation`_ for more info.
This function returns without waiting for :attr:`event`: only future
operations are affected.
.. _CUDA documentation:
http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html
"""
check_error(cudart().cudaStreamWaitEvent(self, event, ctypes.c_int(0)))
def wait_stream(self, stream):
"""Synchronizes with another stream.
All future work submitted to this stream will wait until all kernels
submitted to a given stream at the time of call complete.
Arguments:
stream (Stream): a stream to synchronize.
.. note:: This function returns without waiting for currently enqueued
kernels in :attr:`stream`: only future operations are affected.
"""
self.wait_event(stream.record_event())
def record_event(self, event=None):
"""Records an event.
Arguments:
event (Event, optional): event to record. If not given, a new one
will be allocated.
Returns:
Recorded event.
"""
if event is None:
event = Event()
check_error(cudart().cudaEventRecord(event, self))
return event
def query(self):
"""Checks if all the work submitted has been completed.
Returns:
A boolean indicating if all kernels in this stream are completed.
"""
res = cudart().cudaStreamQuery(self)
if res == cudaStatus.ERROR_NOT_READY:
return False
check_error(res)
return True
def synchronize(self):
"""Wait for all the kernels in this stream to complete.
.. note:: This is a wrapper around ``cudaStreamSynchronize()``: see
`CUDA documentation`_ for more info.
.. _CUDA documentation:
http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html
"""
check_error(cudart().cudaStreamSynchronize(self))
@staticmethod
def priority_range():
least_priority = ctypes.c_int()
greatest_priority = ctypes.c_int()
check_error(cudart().cudaDeviceGetStreamPriorityRange(
ctypes.byref(least_priority), ctypes.byref(greatest_priority)))
return (least_priority.value, greatest_priority.value)
@property
def priority(self):
priority = ctypes.c_int()
check_error(cudart().cudaStreamGetPriority(self, ctypes.byref(priority)))
return priority.value
@property
def _as_parameter_(self):
return ctypes.c_void_p(self.cuda_stream)
def __eq__(self, o):
if isinstance(o, Stream):
return o.device == self.device and o.cuda_stream == self.cuda_stream
return False
def __hash__(self):
return hash((self.cuda_stream, self.device))
def __repr__(self):
return ('<torch.cuda.Stream device={0} cuda_stream={1:#x}>'
.format(self.device, self.cuda_stream))
class EventHandle(ctypes.Structure):
IPC_HANDLE_SIZE = 64
_fields_ = [('reserved', ctypes.c_char * IPC_HANDLE_SIZE)]
class Event(object):
"""Wrapper around CUDA event.
Arguments:
enable_timing (bool): indicates if the event should measure time
(default: ``False``)
blocking (bool): if ``True``, :meth:`wait` will be blocking (default: ``False``)
interprocess (bool): if ``True``, the event can be shared between processes
(default: ``False``)
"""
DEFAULT = 0x0
BLOCKING_SYNC = 0x1
DISABLE_TIMING = 0x2
INTERPROCESS = 0x4
def __init__(self, enable_timing=False, blocking=False, interprocess=False,
_handle=None):
flags = Event.DEFAULT
if not enable_timing:
flags |= Event.DISABLE_TIMING
if blocking:
flags |= Event.BLOCKING_SYNC
if interprocess:
flags |= Event.INTERPROCESS
ptr = ctypes.c_void_p()
self._cudart = cudart()
if _handle:
check_error(self._cudart.cudaIpcOpenEventHandle(ctypes.byref(ptr), _handle))
else:
check_error(self._cudart.cudaEventCreateWithFlags(ctypes.byref(ptr), ctypes.c_uint(flags)))
self._as_parameter_ = ptr
def __del__(self):
if hasattr(self, '_as_parameter_'):
check_error(self._cudart.cudaEventDestroy(self._as_parameter_))
del self._as_parameter_
def record(self, stream=None):
"""Records the event in a given stream."""
if stream is None:
stream = torch.cuda.current_stream()
stream.record_event(self)
def wait(self, stream=None):
"""Makes a given stream wait for the event."""
if stream is None:
stream = torch.cuda.current_stream()
stream.wait_event(self)
def query(self):
"""Checks if the event has been recorded.
Returns:
A boolean indicating if the event has been recorded.
"""
res = cudart().cudaEventQuery(self)
if res == cudaStatus.ERROR_NOT_READY:
return False
check_error(res)
return True
def elapsed_time(self, end_event):
"""Returns the time elapsed before the event was recorded."""
time_ms = ctypes.c_float()
check_error(cudart().cudaEventElapsedTime(
ctypes.byref(time_ms), self, end_event))
return time_ms.value
def synchronize(self):
"""Synchronizes with the event."""
check_error(cudart().cudaEventSynchronize(self))
def ipc_handle(self):
"""Returns an IPC handle of this event."""
handle = EventHandle()
check_error(cudart().cudaIpcGetEventHandle(ctypes.byref(handle), self))
return handle
def __repr__(self):
return '<torch.cuda.Event {0:#x}>'.format(self._as_parameter_.value) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/cuda/streams.py | 0.885841 | 0.339691 | streams.py | pypi |
from torch import _C
from . import _lazy_init, _lazy_call, device_count, device as device_ctx_manager
def get_rng_state(device=-1):
r"""Returns the random number generator state of the current
GPU as a ByteTensor.
Args:
device (int, optional): The device to return the RNG state of.
Default: -1 (i.e., use the current device).
.. warning::
This function eagerly initializes CUDA.
"""
_lazy_init()
with device_ctx_manager(device):
return _C._cuda_getRNGState()
def get_rng_state_all():
r"""Returns a tuple of ByteTensor representing the random number states of all devices."""
results = []
for i in range(device_count()):
with device_ctx_manager(i):
results.append(get_rng_state())
return results
def set_rng_state(new_state, device=-1):
r"""Sets the random number generator state of the current GPU.
Args:
new_state (torch.ByteTensor): The desired state
"""
new_state_copy = new_state.clone()
# NB: What if device=-1? You might be afraid that the "current"
# device would change by the time we actually get around to invoking
# the lazy callback. But actually, this is not possible: changing
# the current device involves a CUDA call, which would in turn
# initialize the state. So then _lazy_call would execute cb
# immediately.
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state_copy)
_lazy_call(cb)
def set_rng_state_all(new_states):
r"""Sets the random number generator state of all devices.
Args:
new_state (tuple of torch.ByteTensor): The desired state for each device"""
for i, state in enumerate(new_states):
set_rng_state(state, i)
def manual_seed(seed):
r"""Sets the seed for generating random numbers for the current GPU.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
.. warning::
If you are working with a multi-GPU model, this function is insufficient
to get determinism. To seed all GPUs, use :func:`manual_seed_all`.
"""
seed = int(seed)
_lazy_call(lambda: _C._cuda_manualSeed(seed))
def manual_seed_all(seed):
r"""Sets the seed for generating random numbers on all GPUs.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
"""
seed = int(seed)
_lazy_call(lambda: _C._cuda_manualSeedAll(seed))
def seed():
r"""Sets the seed for generating random numbers to a random number for the current GPU.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
.. warning::
If you are working with a multi-GPU model, this function will only initialize
the seed on one GPU. To initialize all GPUs, use :func:`seed_all`.
"""
_lazy_call(lambda: _C._cuda_seed())
def seed_all():
r"""Sets the seed for generating random numbers to a random number on all GPUs.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
"""
_lazy_call(lambda: _C._cuda_seedAll())
def initial_seed():
r"""Returns the current random seed of the current GPU.
.. warning::
This function eagerly initializes CUDA.
"""
_lazy_init()
return _C._cuda_initialSeed() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/cuda/random.py | 0.921362 | 0.456289 | random.py | pypi |
import torch
from . import nccl
from torch._utils import _accumulate, _take_tensors, _flatten_dense_tensors, \
_flatten_sparse_tensors, _unflatten_dense_tensors, \
_unflatten_sparse_tensors, _reorder_tensors_as
def broadcast(tensor, devices):
"""Broadcasts a tensor to a number of GPUs.
Arguments:
tensor (Tensor): tensor to broadcast.
devices (Iterable): an iterable of devices among which to broadcast.
Note that it should be like (src, dst1, dst2, ...), the first element
of which is the source device to broadcast from.
Returns:
A tuple containing copies of the ``tensor``, placed on devices
corresponding to indices from ``devices``.
"""
return torch._C._broadcast(tensor, devices)
def broadcast_coalesced(tensors, devices, buffer_size=10485760):
"""Broadcasts a sequence tensors to the specified GPUs.
Small tensors are first coalesced into a buffer to reduce the number
of synchronizations.
Arguments:
tensors (sequence): tensors to broadcast.
devices (Iterable): an iterable of devices among which to broadcast.
Note that it should be like (src, dst1, dst2, ...), the first element
of which is the source device to broadcast from.
buffer_size (int): maximum size of the buffer used for coalescing
Returns:
A tuple containing copies of the ``tensor``, placed on devices
corresponding to indices from ``devices``.
"""
return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
def reduce_add(inputs, destination=None):
"""Sums tensors from multiple GPUs.
All inputs should have matching shapes.
Arguments:
inputs (Iterable[Tensor]): an iterable of tensors to add.
destination (int, optional): a device on which the output will be
placed (default: current device).
Returns:
A tensor containing an elementwise sum of all inputs, placed on the
``destination`` device.
"""
# TODO: try to find an input on another gpu, copy it,
# and accumulate into the copy
if destination is None:
destination = torch.cuda.current_device()
input_size = inputs[0].size()
nccl_root = None
for i, inp in enumerate(inputs):
assert inp.is_cuda, "reduce_add expects all inputs to be on GPUs"
if inp.get_device() == destination:
nccl_root = i
if inp.size() != input_size:
got = 'x'.join(str(x) for x in inp.size())
expected = 'x'.join(str(x) for x in input_size)
raise ValueError("input {} has invalid size: got {}, but expected "
"{}".format(i, got, expected))
if nccl_root is None:
raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors")
result = inp.new(device=destination).resize_as_(inp).zero_()
if nccl.is_available(inputs) and inputs[0].get_device() == destination:
outputs = [result] + [t.new(t.size()) for t in inputs[1:]]
nccl.reduce(inputs, outputs, root=nccl_root)
return result
for inp in inputs:
input_correct_gpu = inp.cuda(result.get_device())
result.add_(input_correct_gpu)
return result
def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760):
"""Sums tensors from multiple GPUs.
Small tensors are first coalesced into a buffer to reduce the number
of synchronizations.
Arguments:
inputs (Iterable[Iterable[Tensor]]): iterable of iterables that
contain tensors from a single device.
destination (int, optional): a device on which the output will be
placed (default: current device).
buffer_size (int): maximum size of the buffer used for coalescing
Returns:
A tuple of tensors containing an elementwise sum of each group of
inputs, placed on the ``destination`` device.
"""
dense_tensors = [[] for _ in inputs] # shape (num_gpus, num_tensors)
output = []
ref_order = []
# process sparse ones first since they may have different sizes on different gpus
for tensor_at_gpus in zip(*inputs):
if all(t.is_sparse for t in tensor_at_gpus):
result = reduce_add(tensor_at_gpus, destination)
output.append(result)
ref_order.append(tensor_at_gpus[0])
else:
for coll, t in zip(dense_tensors, tensor_at_gpus):
coll.append(t.to_dense() if t.is_sparse else t)
ref_order.append(dense_tensors[0][-1])
itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors]
# now the dense ones, which have consistent sizes
for chunks in zip(*itrs):
flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks]
flat_result = reduce_add(flat_tensors, destination)
output.extend(_unflatten_dense_tensors(flat_result, chunks[0]))
return tuple(_reorder_tensors_as(output, ref_order))
def scatter(tensor, devices, chunk_sizes=None, dim=0, streams=None):
"""Scatters tensor across multiple GPUs.
Arguments:
tensor (Tensor): tensor to scatter.
devices (Iterable[int]): iterable of ints, specifying among which
devices the tensor should be scattered.
chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on
each device. It should match ``devices`` in length and sum to
``tensor.size(dim)``. If not specified, the tensor will be divided
into equal chunks.
dim (int, optional): A dimension along which to chunk the tensor.
Returns:
A tuple containing chunks of the ``tensor``, spread across given
``devices``.
"""
if chunk_sizes is None:
chunks = tensor.chunk(len(devices), dim)
else:
assert sum(chunk_sizes) == tensor.size(dim), "given chunk sizes " \
"don't sum up to the tensor's size (sum(chunk_sizes) == {}, but " \
"expected {})".format(sum(chunk_sizes), tensor.size(dim))
assert min(chunk_sizes) > 0, "got a negative chunk_size"
chunks = [tensor.narrow(dim, start - size, size)
for start, size in zip(_accumulate(chunk_sizes), chunk_sizes)]
chunks = tuple(chunk.contiguous() for chunk in chunks)
# TODO: copy to a pinned buffer first (if copying from CPU)
if streams is None:
streams = [None] * len(devices)
outputs = []
for device, chunk, stream in zip(devices, chunks, streams):
with torch.cuda.device(device), torch.cuda.stream(stream):
outputs.append(chunk.cuda(device, non_blocking=True))
return tuple(outputs)
def gather(tensors, dim=0, destination=None):
"""Gathers tensors from multiple GPUs.
Tensor sizes in all dimension different than ``dim`` have to match.
Arguments:
tensors (Iterable[Tensor]): iterable of tensors to gather.
dim (int): a dimension along which the tensors will be concatenated.
destination (int, optional): output device (-1 means CPU, default:
current device)
Returns:
A tensor located on ``destination`` device, that is a result of
concatenating ``tensors`` along ``dim``.
"""
total_size = 0
expected_size = list(tensors[0].size())
for tensor in tensors:
assert tensor.is_cuda, "gather expects all inputs to be on GPUs"
expected_size[dim] = tensor.size(dim)
if list(tensor.size()) != expected_size:
got = 'x'.join(str(x) for x in tensor.size())
expected = 'x'.join(str(x) for x in expected_size)
raise ValueError("gather got an input of invalid size: got {}, "
"but expected {}".format(got, expected))
total_size += tensor.size(dim)
expected_size[dim] = total_size
expected_size = torch.Size(expected_size)
if destination is None:
destination = torch.cuda.current_device()
if destination == -1:
result = tensors[0].new().cpu().resize_(expected_size)
else:
result = tensors[0].new(expected_size, device=destination)
chunk_start = 0
# TODO: if copying to CPU, allocate a pinned buffer, do async copies to it,
# and copy it to regular memory
for tensor in tensors:
result.narrow(dim, chunk_start, tensor.size(dim)).copy_(tensor, True)
chunk_start += tensor.size(dim)
return result | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/cuda/comm.py | 0.852076 | 0.731634 | comm.py | pypi |
import functools
import types
import torch._C as _C
TensorProtoDataType = _C._onnx.TensorProtoDataType
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
class ExportTypes:
PROTOBUF_FILE = 1
ZIP_ARCHIVE = 2
COMPRESSED_ZIP_ARCHIVE = 3
DIRECTORY = 4
def _export(*args, **kwargs):
from torch.onnx import utils
return utils._export(*args, **kwargs)
def _export_to_pretty_string(*args, **kwargs):
from torch.onnx import utils
return utils._export_to_pretty_string(*args, **kwargs)
def export(*args, **kwargs):
from torch.onnx import utils
return utils.export(*args, **kwargs)
def _optimize_trace(trace, aten):
from torch.onnx import utils
trace.set_graph(utils._optimize_graph(trace.graph(), aten))
def set_training(*args, **kwargs):
from torch.onnx import utils
return utils.set_training(*args, **kwargs)
def _run_symbolic_function(*args, **kwargs):
from torch.onnx import utils
return utils._run_symbolic_function(*args, **kwargs)
def _run_symbolic_method(*args, **kwargs):
from torch.onnx import utils
return utils._run_symbolic_method(*args, **kwargs)
def _symbolic_override_wrapper_maker(symbolic_fn, might_trace, fn):
def wrapper(*args, **kwargs):
import torch
import torch.jit
from torch.autograd import Function, function
# fast pass
if not might_trace(args):
return fn(*args, **kwargs)
flat_args = tuple(function._iter_tensors_permissive(args))
flat_args_only_tensors = tuple(t for t in flat_args if isinstance(t, torch.Tensor))
if not any(map(torch._C._jit_is_tracing, flat_args_only_tensors)):
return fn(*args, **kwargs)
tstate = torch._C._get_tracing_state(flat_args_only_tensors)
arg_values = [torch._C._get_value_trace(tstate, x) if isinstance(x, torch.Tensor) else x for x in flat_args]
# This must come after the calls to get_value_trace, lest we
# lose information due to in-place operations.
output_vars = fn(*args, **kwargs)
symbolic_args = function._unflatten(arg_values, args)
output_vals = symbolic_fn(tstate.graph(), *symbolic_args, **kwargs)
for var, val in zip(
function._iter_tensors(output_vars),
function._iter_jit_values(output_vals)):
val.inferTypeFrom(var.data)
torch._C._set_value_trace(tstate, var, val)
return output_vars
# fn might be autograd.Function too, in this case wrapping doesn't work
if isinstance(fn, types.FunctionType):
wrapper = functools.wraps(fn)(wrapper)
return wrapper
def symbolic_override(symbolic_fn):
r"""
Decorator to override ONNX export of the a function with specified subgraph.
Effectively allows to attach symbolic() implementation to an arbitrary
python function or autograd.Function. Requirements for the decorated
function:
- being non-member function or autograd.Function
- positional inputs are Tensors or (nested) lists or tuples of
them (similar requirement to NestedIOFunction)
- outputs are similarly Tensors or (nested) lists or tuples of them
- non-tensor typed values should be keyword arguments both in definition
and when called
Example usage:
```
def symb(g, x, y):
return g.op('Sum', x, y[0], y[1])
@symbolic_override(symb)
def foo(x, y):
return x + y[0] + y[1]
```
"""
return functools.partial(_symbolic_override_wrapper_maker, symbolic_fn, lambda x: True)
def symbolic_override_first_arg_based(symbolic_fn):
r"""
Decorator to override ONNX export of the a function with specified subgraph.
Equivalent to :func:`symbolic_override` but checks only the first argument
of the function to figure out whether the tracing is on. Thus the first arg
needs to be a Tensor.
"""
def might_trace(args):
import torch
first_arg = args[0]
if not isinstance(first_arg, torch.Tensor):
raise ValueError('First argument of {} is expected to be a tensor, '
'but got an object of type {}'
.format(symbolic_fn.__name__, type(first_arg)))
return torch._C._jit_is_tracing(first_arg)
return functools.partial(_symbolic_override_wrapper_maker, symbolic_fn, might_trace)
def symbolic_override_packed_sequence_based(symbolic_fn):
r"""
Decorator to override ONNX export of the a function with specified subgraph.
Equivalent to :func:`symbolic_override` but checks only the first argument
of the function to figure out whether the tracing is on. Thus the first arg
needs to be a Tensor.
"""
def might_trace(args):
import torch
first_arg = args[0]
if not isinstance(first_arg, torch.nn.utils.rnn.PackedSequence):
raise ValueError('pad_packed_sequence expects sequence to be a '
'PackedSequence, but got an object of type {}'
.format(type(first_arg)))
return torch._C._jit_is_tracing(first_arg[0])
return functools.partial(_symbolic_override_wrapper_maker, symbolic_fn, might_trace) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/onnx/__init__.py | 0.831314 | 0.492676 | __init__.py | pypi |
import math
import torch
from .Module import Module
from .utils import clear
class SpatialFullConvolution(Module):
def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None, adjW=0, adjH=0):
super(SpatialFullConvolution, self).__init__()
self.nInputPlane = nInputPlane
self.nOutputPlane = nOutputPlane
self.kW = kW
self.kH = kH
self.dW = dW
self.dH = dH
self.padW = padW
self.padH = padH if padH is not None else padW
self.adjW = adjW
self.adjH = adjH
if self.adjW > self.dW - 1 or self.adjH > self.dH - 1:
raise ValueError('adjW and adjH must be smaller than self.dW - 1 and self.dH - 1 respectively')
self.weight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)
self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)
self.bias = torch.Tensor(self.nOutputPlane)
self.gradBias = torch.Tensor(self.nOutputPlane)
self.ones = torch.Tensor()
self.finput = None
self.fgradInput = None
self.zeroScalar = None
self._gradOutput = None
self.reset()
def noBias(self):
self.bias = None
self.gradBias = None
return self
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
nInputPlane = self.nInputPlane
kH = self.kH
kW = self.kW
stdv = 1 / math.sqrt(kW * kH * nInputPlane)
self.weight.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.uniform_(-stdv, stdv)
def _makeContiguous(self, input, gradOutput=None):
if not input.is_contiguous():
if self._input is None:
self._input = input.new()
self._input.resize_as_(input).copy_(input)
input = self._input
if gradOutput is not None:
if not gradOutput.is_contiguous():
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
return input, gradOutput
return input
def _calculateAdj(self, targetSize, ker, pad, stride):
return (targetSize + 2 * pad - ker) % stride
def updateOutput(self, input):
inputTensor = input
adjW, adjH = self.adjW, self.adjH
# The input can be a table where the second element indicates the target
# output size, in which case the adj factors are computed automatically
if isinstance(input, list):
inputTensor = input[0]
targetTensor = input[1]
tDims = targetTensor.dim()
tH = targetTensor.size(tDims - 2)
tW = targetTensor.size(tDims - 1)
adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
if not hasattr(self, 'finput') or self.finput is None:
self.finput = input[0].new()
if not hasattr(self, 'fgradInput') or self.fgradInput is None:
self.fgradInput = input[0].new()
else:
if not hasattr(self, 'finput') or self.finput is None:
self.finput = input.new()
if not hasattr(self, 'fgradInput') or self.fgradInput is None:
self.fgradInput = input.new()
inputTensor = self._makeContiguous(inputTensor)
self._backend.SpatialFullConvolution_updateOutput(
self._backend.library_state,
inputTensor,
self.output,
self.weight,
self.bias,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
adjW, adjH
)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
inputTensor = input
adjW, adjH = self.adjW, self.adjH
# The input can be a table where the second element indicates the target
# output size, in which case the adj factors are computed automatically
if isinstance(input, list):
inputTensor = input[0]
targetTensor = input[1]
tDims = targetTensor.dim()
tH = targetTensor.size(tDims - 2)
tW = targetTensor.size(tDims - 1)
adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
# Momentarily extract the gradInput tensor
if isinstance(self.gradInput, list):
self.gradInput = self.gradInput[0]
inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)
self._backend.SpatialFullConvolution_updateGradInput(
self._backend.library_state,
inputTensor,
gradOutput,
self.gradInput,
self.weight,
self.finput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
adjW, adjH
)
if isinstance(input, list):
# Create a zero tensor to be expanded and used as gradInput[1].
if self.zeroScalar is None:
self.zeroScalar = input[1].new(1).zero_()
self.ones.resize_(input[1].dim()).fill_(1)
zeroTensor = self.zeroScalar.view_as(self.ones).expand_as(input[1])
self.gradInput = [self.gradInput, zeroTensor]
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
inputTensor = input
adjW, adjH = self.adjW, self.adjH
# The input can be a table where the second element indicates the target
# output size, in which case the adj factors are computed automatically
if isinstance(inputTensor, list):
inputTensor = input[0]
targetTensor = input[1]
tDims = targetTensor.dim()
tH = targetTensor.size(tDims - 2)
tW = targetTensor.size(tDims - 1)
adjW = calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = calculateAdj(tH, self.kH, self.padH, self.dH)
inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)
self._backend.SpatialFullConvolution_accGradParameters(
self._backend.library_state,
inputTensor,
gradOutput,
self.gradWeight,
self.gradBias,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
adjW, adjH,
scale
)
def type(self, type=None, tensorCache=None):
if self.finput is not None:
self.finput = torch.Tensor()
if self.fgradInput is not None:
self.fgradInput = torch.Tensor()
return super(SpatialFullConvolution, self).type(type, tensorCache)
def __repr__(self):
s = super(SpatialFullConvolution, self).__repr__()
s += '({} -> {}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kW, self.kH)
if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0:
s += ', {}, {}'.format(self.dW, self.dH)
if (self.padW or self.padH) and (self.padW != 0 or self.padH != 0):
s += ', {}, {}'.format(self.padW, self.padH)
if (self.adjW or self.adjH) and (self.adjW != 0 or self.adjH != 0):
s += ', {}, {}'.format(self.adjW, self.adjH)
s += ')'
if self.bias is None:
s += ' without bias'
return s
def clearState(self):
clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')
return super(SpatialFullConvolution, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialFullConvolution.py | 0.839767 | 0.345851 | SpatialFullConvolution.py | pypi |
import torch
from .Module import Module
from .utils import clear, addSingletondimension
class Max(Module):
def __init__(self, dimension=0):
super(Max, self).__init__()
self.dimension = dimension
self._output = None
self._indices = None
def _getPositiveDimension(self, input):
dimension = self.dimension
if dimension < 0:
dimension = input.dim() + dimension
return dimension
def _lazyInit(self):
if self._output is None:
self._output = self.output.new()
if self._indices is None:
self._indices = \
(torch.cuda.LongTensor() if self.output.is_cuda else torch.LongTensor())
def updateOutput(self, input):
self._lazyInit()
dimension = self._getPositiveDimension(input)
torch.max(input, dimension, out=(self._output, self._indices), keepdim=True)
if input.dim() > 1:
self.output.set_(self._output.select(dimension, 0))
else:
self.output.set_(self._output)
return self.output
def updateGradInput(self, input, gradOutput):
self._lazyInit()
dimension = self._getPositiveDimension(input)
if input.dim() > 1:
gradOutputView = addSingletondimension(gradOutput, dimension)
else:
gradOutputView = gradOutput
self.gradInput.resize_as_(input).zero_().scatter_(dimension, self._indices, gradOutputView)
return self.gradInput
def type(self, type, tensorCache=None):
# torch.max expects a LongTensor as indices, whereas cutorch.max expects a CudaTensor.
if type == 'torch.cuda.FloatTensor':
indices, self._indices = self._indices, None
super(Max, self).type(type, tensorCache)
self._indices = indices.type('torch.cuda.LongTensor') if indices is not None else None
else:
# self._indices must be a LongTensor. Setting it to nil temporarily avoids
# unnecessary memory allocations.
indices, self._indices = self._indices, None
super(Max, self).type(type, tensorCache)
self._indices = indices.long() if indices is not None else None
return self
def clearState(self):
clear(self, '_indices', '_output')
return super(Max, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/Max.py | 0.826327 | 0.248854 | Max.py | pypi |
import math
import torch
from .Module import Module
from .utils import clear
class SpatialConvolutionLocal(Module):
def __init__(self, nInputPlane, nOutputPlane, iW, iH, kW, kH, dW=1, dH=1, padW=0, padH=None):
super(SpatialConvolutionLocal, self).__init__()
self.nInputPlane = nInputPlane
self.nOutputPlane = nOutputPlane
self.kW = kW
self.kH = kH
self.iW = iW
self.iH = iH
self.dW = dW
self.dH = dH
self.padW = padW
self.padH = padH if padH is not None else padW
self.oW = int(math.floor((self.padW * 2 + iW - self.kW) / self.dW)) + 1
self.oH = int(math.floor((self.padH * 2 + iH - self.kH) / self.dH)) + 1
assert 1 <= self.oW and 1 <= self.oH
self.weight = torch.Tensor(self.oH, self.oW, nOutputPlane, nInputPlane, kH, kW)
self.bias = torch.Tensor(nOutputPlane, self.oH, self.oW)
self.gradWeight = torch.Tensor().resize_as_(self.weight)
self.gradBias = torch.Tensor().resize_as_(self.bias)
self.reset()
self.finput = None
self.fgradInput = None
self._gradOutput = None
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.kW * self.kH * self.nInputPlane)
self.weight.uniform_(-stdv, stdv)
self.bias.uniform_(-stdv, stdv)
def _makeContiguous(self, input, gradOutput=None):
if not input.is_contiguous():
if self._input is None:
self._input = input.new()
self._input.resize_as_(input).copy_(input)
input = self._input
if gradOutput is not None:
if not gradOutput.is_contiguous():
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
return input, gradOutput
return input
def _viewWeight(self):
self.weight = self.weight.view(self.oH * self.oW, self.nOutputPlane, self.nInputPlane * self.kH * self.kW)
if self.gradWeight is not None and self.gradWeight.dim() > 0:
self.gradWeight = self.gradWeight.view(
self.oH * self.oW, self.nOutputPlane, self.nInputPlane * self.kH * self.kW)
def _unviewWeight(self):
self.weight = self.weight.view(self.oH, self.oW, self.nOutputPlane, self.nInputPlane, self.kH, self.kW)
if self.gradWeight is not None and self.gradWeight.dim() > 0:
self.gradWeight = self.gradWeight.view(
self.oH, self.oW, self.nOutputPlane, self.nInputPlane, self.kH, self.kW)
def _checkInputSize(self, input):
if input.ndimension() == 3:
if input.size(0) != self.nInputPlane or input.size(1) != self.iH or input.size(1) != self.iW:
raise RuntimeError(
'Given input size: ({}x{}x{}) inconsistent with expected input size: ({}x{}x{}).'.format(
input.size(0), input.size(1), input.size(2), self.nInputPlane, self.iH, self.iW))
elif input.ndimension() == 4:
if input.size(1) != self.nInputPlane or input.size(2) != self.iH or input.size(3) != self.iW:
raise RuntimeError(
'Given input size: ({}x{}x{}x{}) inconsistent with expected input size: (*x{}x{}x{}).'.format(
input.size(0), input.size(1), input.size(2), input.size(3), self.nInputPlane, self.iH, self.iW))
else:
raise RuntimeError('3D or 4D (batch mode) tensor expected')
def _checkOutputSize(self, input, output):
if output.ndimension() != input.ndimension():
raise RuntimeError('inconsistent dimension between output and input.')
if output.ndimension() == 3:
if output.size(0) != self.nOutputPlane or output.size(1) != self.oH or output.size(2) != self.oW:
raise RuntimeError(
'Given output size: ({}x{}x{}) inconsistent with expected output size: ({}x{}x{}).'.format(
output.size(0), output.size(1), output.size(2), self.nOutputPlane, self.oH, self.oW))
elif output.ndimension() == 4:
if output.size(1) != self.nOutputPlane or output.size(2) != self.oH or output.size(3) != self.oW:
raise RuntimeError('Given output size: ({}x{}x{}x{}) inconsistent with expected output size: '
'(batchsize x{}x{}x{}).'.format(
output.size(0), output.size(1), output.size(2),
output.size(3), self.nOutputPlane, self.oH, self.oW))
else:
raise RuntimeError('3D or 4D(batch mode) tensor expected')
def updateOutput(self, input):
if self.finput is None:
self.finput = input.new()
if self.fgradInput is None:
self.fgradInput = input.new()
self._checkInputSize(input)
self._viewWeight()
input = self._makeContiguous(input)
self._backend.SpatialConvolutionLocal_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
self.iW, self.iH,
self.oW, self.oH
)
self._unviewWeight()
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
self._checkInputSize(input)
self._checkOutputSize(input, gradOutput)
self._viewWeight()
input, gradOutput = self._makeContiguous(input, gradOutput)
self._backend.SpatialConvolutionLocal_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
self.iW, self.iH,
self.oW, self.oH
)
self._unviewWeight()
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self._checkInputSize(input)
self._checkOutputSize(input, gradOutput)
input, gradOutput = self._makeContiguous(input, gradOutput)
self._viewWeight()
self._backend.SpatialConvolutionLocal_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
self.iW, self.iH,
self.oW, self.oH,
scale
)
self._unviewWeight()
def type(self, type=None, tensorCache=None):
if self.finput is not None:
self.finput = torch.Tensor()
if self.fgradInput is not None:
self.fgradInput = torch.Tensor()
return super(SpatialConvolutionLocal, self).type(type, tensorCache)
def __tostring__(self, ):
s = super(SpatialConvolution, self).__repr__()
s += '({} -> {}, {}x{}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.iW, self.iH, self.kW, self.kH)
if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0:
s += ', {}, {}'.format(self.dW, self.dH)
if self.padW != 0 or self.padH != 0:
s += ', {}, {}'.format(self.padW, self.padH)
s += ')'
return s
def clearState(self):
clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')
return super(SpatialConvolutionLocal, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialConvolutionLocal.py | 0.755276 | 0.355076 | SpatialConvolutionLocal.py | pypi |
import torch
from .Module import Module
class FlattenTable(Module):
def __init__(self):
super(FlattenTable, self).__init__()
self.output = []
self.input_map = []
self.gradInput = []
def _flatten(self, output, input):
if isinstance(input, list):
input_map = []
# forward DFS order
for i in range(len(input)):
input_map.append(self._flatten(output, input[i]))
else:
input_map = len(output)
output.append(input)
return input_map
def _checkMapping(self, output, input, input_map):
if isinstance(input, list):
if len(input) != len(input_map):
return False
# forward DFS order
for i in range(len(input)):
if not self._checkMapping(output, input[i], input_map[i]):
return False
return True
else:
return output[input_map] is input
# During BPROP we have to build a gradInput with the same shape as the
# input. This is a recursive function to build up a gradInput
def _inverseFlatten(self, gradOutput, input_map):
if isinstance(input_map, list):
gradInput = []
for i in range(len(input_map)):
gradInput.append(self._inverseFlatten(gradOutput, input_map[i]))
return gradInput
else:
return gradOutput[input_map]
def updateOutput(self, input):
assert isinstance(input, list)
# to avoid updating rebuilding the flattened table every updateOutput call
# we will: a DFS pass over the existing output table and the inputs to
# see if it needs to be rebuilt.
if not self._checkMapping(self.output, input, self.input_map):
self.output = []
self.input_map = self._flatten(self.output, input)
return self.output
def updateGradInput(self, input, gradOutput):
assert isinstance(input, list)
assert isinstance(gradOutput, list)
# If the input changes between the updateOutput and updateGradInput call,
#: we may have to rebuild the input_map! However, let's assume that
# the input_map is valid and that forward has already been called.
# However, we should check that the gradInput is valid:
if not self._checkMapping(gradOutput, self.gradInput, self.input_map):
self.gradInput = self._inverseFlatten(gradOutput, self.input_map)
return self.gradInput
def type(self, type=None, tensorCache=None):
if not type:
return self._type
# This function just stores references so we don't need to do any type
# conversions. Just force the tables to be empty.
self.clearState()
def clearState(self):
self.input_map = []
return super(FlattenTable, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/FlattenTable.py | 0.7478 | 0.357483 | FlattenTable.py | pypi |
import torch
from .Module import Module
from .utils import clear
class BatchNormalization(Module):
# expected dimension of input
nDim = 2
def __init__(self, nOutput, eps=1e-5, momentum=0.1, affine=True):
super(BatchNormalization, self).__init__()
assert nOutput != 0
self.affine = affine
self.eps = eps
self.train = True
self.momentum = momentum
self.running_mean = torch.zeros(nOutput)
self.running_var = torch.ones(nOutput)
self.save_mean = None
self.save_std = None
self._gradOutput = None
if self.affine:
self.weight = torch.Tensor(nOutput)
self.bias = torch.Tensor(nOutput)
self.gradWeight = torch.Tensor(nOutput)
self.gradBias = torch.Tensor(nOutput)
self.reset()
else:
self.weight = None
self.bias = None
self.gradWeight = None
self.gradBias = None
def reset(self):
if self.weight is not None:
self.weight.uniform_()
if self.bias is not None:
self.bias.zero_()
self.running_mean.zero_()
self.running_var.fill_(1)
def _checkInputDim(self, input):
if input.dim() != self.nDim:
raise RuntimeError(
'only mini-batch supported ({}D tensor), got {}D tensor instead'.format(self.nDim, input.dim()))
if input.size(1) != self.running_mean.nelement():
raise RuntimeError('got {}-feature tensor, expected {}'.format(input.size(1), self.running_mean.nelement()))
def _makeContiguous(self, input, gradOutput=None):
if not input.is_contiguous():
if self._input is None:
self._input = input.new()
self._input.resize_as_(input).copy_(input)
input = self._input
if gradOutput is not None:
if not gradOutput.is_contiguous():
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
return input, gradOutput
def updateOutput(self, input):
self._checkInputDim(input)
input = self._makeContiguous(input)[0]
self.output.resize_as_(input)
if self.save_mean is None:
self.save_mean = input.new()
self.save_mean.resize_as_(self.running_mean)
if self.save_std is None:
self.save_std = input.new()
self.save_std.resize_as_(self.running_var)
self._backend.BatchNormalization_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.save_mean,
self.save_std,
self.train,
self.momentum,
self.eps
)
return self.output
def _backward(self, input, gradOutput, scale, gradInput=None, gradWeight=None, gradBias=None):
self._checkInputDim(input)
self._checkInputDim(gradOutput)
if not hasattr(self, 'save_mean') or not hasattr(self, 'save_std'):
raise RuntimeError('you have to call updateOutput() at least once before backward()')
input, gradOutput = self._makeContiguous(input, gradOutput)
scale = scale or 1.
if gradInput is not None:
gradInput.resize_as_(gradOutput)
self._backend.BatchNormalization_backward(
self._backend.library_state,
input,
gradOutput,
gradInput,
gradWeight,
gradBias,
self.weight,
self.running_mean,
self.running_var,
self.save_mean,
self.save_std,
self.train,
scale,
self.eps
)
return self.gradInput
def backward(self, input, gradOutput, scale=1.):
return self._backward(input, gradOutput, scale, self.gradInput, self.gradWeight, self.gradBias)
def updateGradInput(self, input, gradOutput):
return self._backward(input, gradOutput, 1., self.gradInput)
def accGradParameters(self, input, gradOutput, scale=1.):
return self._backward(input, gradOutput, scale, None, self.gradWeight, self.gradBias)
def read(self, file, version):
super(BatchNormalization, self).read(self, file)
if version < 2:
if self.running_std:
self.running_var = self.running_std.pow_(-2).add_(-self.eps)
self.running_std = None
def clearState(self):
# first 5 buffers are not present in the current implementation,
# but we keep them for cleaning old saved models
clear(self, [
'buffer',
'buffer2',
'centered',
'std',
'normalized',
'_input',
'_gradOutput',
'save_mean',
'save_std',
])
return super(BatchNormalization, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/BatchNormalization.py | 0.857574 | 0.396185 | BatchNormalization.py | pypi |
import torch
from .Module import Module
from .utils import clear, recursiveResizeAs
class MixtureTable(Module):
def __init__(self, dim=1):
super(MixtureTable, self).__init__()
self.dim = dim
self.size = torch.Size()
self.size2 = torch.Size()
self.batchSize = 0
self.backwardSetup = False
self.gradInput = []
self._gaterView = None
self._expert = None
self._expertView = None
self._sum = None
self._expertView2 = None
self._expert2 = None
self.table = False
def updateOutput(self, input):
gaterInput, expertInputs = input
# buffers
if self._gaterView is None:
self._gaterView = input[0].new()
if self._expert is None:
self._expert = input[0].new()
if self._expertView is None:
self._expertView = input[0].new()
self.dimG = 1
batchSize = gaterInput.size(0)
if self.table or isinstance(expertInputs, list):
self.table = True
if gaterInput.size(self.dimG) != len(expertInputs):
raise RuntimeError("Should be one gater output per expert")
expertInput = expertInputs[0]
if self.batchSize != batchSize:
size = [1] * (expertInput.dim() + 1)
if self.dimG > 0:
size[0] = gaterInput.size(0)
size[self.dim] = gaterInput.size(self.dimG)
self.size = torch.Size(size)
self.output.resize_as_(expertInput)
self.backwardSetup = False
self.batchSize = batchSize
self._gaterView = gaterInput.view(self.size)
self.output.zero_()
# multiply accumulate gater outputs by their commensurate expert
for i, expertInput in enumerate(expertInputs):
gate = self._gaterView.select(self.dim, i).expand_as(expertInput)
self.output.addcmul_(expertInput, gate)
else:
if self.batchSize != batchSize:
size = [1] * expertInputs.dim()
if self.dimG > 0:
size[0] = gaterInput.size(0)
size[self.dim] = gaterInput.size(self.dimG)
self.size = torch.Size(size)
self.output.resize_as_(expertInputs.select(self.dim, 0))
self.batchSize = batchSize
self.backwardSetup = False
self._gaterView = gaterInput.view(self.size)
torch.mul(self._gaterView.expand_as(expertInputs), expertInputs, out=self._expert)
torch.sum(self._expert, self.dim, True, out=self.output)
self.output.resize_as_(expertInputs.select(self.dim, 0))
return self.output
def updateGradInput(self, input, gradOutput):
gaterInput, expertInputs = input
recursiveResizeAs(self.gradInput, input)
gaterGradInput, expertGradInputs = self.gradInput
# buffers
if self._sum is None:
self._sum = input[0].new()
if self._expertView2 is None:
self._expertView2 = input[0].new()
if self._expert2 is None:
self._expert2 = input[0].new()
if self.table:
if not self.backwardSetup:
for i, expertInput in enumerate(expertInputs):
expertGradInput = expertGradInputs[i] or expertInput.clone()
expertGradInput.resize_as_(expertInput)
expertGradInputs[i] = expertGradInput
gaterGradInput.resize_as_(gaterInput)
self.backwardSetup = True
# like CMulTable, but with broadcasting
for i, expertGradInput in enumerate(expertGradInputs):
# gater updateGradInput
torch.mul(gradOutput, expertInputs[i], out=self._expert)
if self.dimG == 0:
self._expertView = self._expert.view(-1)
else:
self._expertView = self._expert.view(gradOutput.size(0), -1)
torch.sum(self._expertView, self.dimG, True, out=self._sum)
if self.dimG == 0:
gaterGradInput[i] = self._sum.select(self.dimG, 0)
else:
gaterGradInput.select(self.dimG, i).copy_(self._sum.select(self.dimG, 0))
# expert updateGradInput
gate = self._gaterView.select(self.dim, i).expand_as(expertGradInput)
expertGradInput.mul_(gate, gradOutput)
else:
if not self.backwardSetup:
size2 = list(expertInputs.size())
size2[self.dim] = 1
self.size2 = torch.Size(size2)
gaterGradInput.resize_as_(gaterInput)
self.backwardSetup = True
# gater updateGradInput
self._expertView = gradOutput.contiguous().view(torch.Size(self.size2))
gradOutput = self._expertView.expand_as(expertInputs)
torch.mul(gradOutput, expertInputs, out=self._expert)
expert = self._expert.transpose(self.dim, self.dimG)
if not expert.is_contiguous():
self._expert2.resize_as_(expert)
self._expert2.copy_(expert)
expert = self._expert2
if self.dimG == 0:
self._expertView2 = expert.view(gaterInput.size(0), -1)
else:
self._expertView2 = expert.view(gaterInput.size(0), gaterInput.size(1), -1)
torch.sum(self._expertView2, self.dimG + 1, True, out=gaterGradInput)
gaterGradInput.resize_as_(gaterInput)
# expert updateGradInput
torch.mul(self._gaterView.expand_as(expertInputs), gradOutput, out=expertGradInputs)
return self.gradInput
def type(self, type, tensorCache=None):
self._gaterView = None
self._expert = None
self._expertView = None
self._sum = None
self._expert2 = None
self._expertView2 = None
return super(MixtureTable, self).type(type, tensorCache)
def clearState(self, ):
clear(self, [
'_gaterView',
'_expert',
'_expertView',
'_sum',
'_expert2',
'_expertView2',
])
return super(MixtureTable, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/MixtureTable.py | 0.752286 | 0.192691 | MixtureTable.py | pypi |
import math
import torch
from .Module import Module
from .utils import clear
class Euclidean(Module):
def __init__(self, inputSize, outputSize):
super(Euclidean, self).__init__()
self.weight = torch.Tensor(inputSize, outputSize)
self.gradWeight = torch.Tensor(inputSize, outputSize)
# state
self.gradInput.resize_(inputSize)
self.output.resize_(outputSize)
self.fastBackward = True
self.reset()
self._input = None
self._weight = None
self._expand = None
self._expand2 = None
self._repeat = None
self._repeat2 = None
self._div = None
self._output = None
self._gradOutput = None
self._expand3 = None
self._sum = None
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.weight.size(0))
self.weight.uniform_(-stdv, stdv)
def _view(self, res, src, *args):
if src.is_contiguous():
res.set_(src.view(*args))
else:
res.set_(src.contiguous().view(*args))
def updateOutput(self, input):
# lazy initialize buffers
if self._input is None:
self._input = input.new()
if self._weight is None:
self._weight = self.weight.new()
if self._expand is None:
self._expand = self.output.new()
if self._expand2 is None:
self._expand2 = self.output.new()
if self._repeat is None:
self._repeat = self.output.new()
if self._repeat2 is None:
self._repeat2 = self.output.new()
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
# y_j = || w_j - x || = || x - w_j ||
assert input.dim() == 2
batchSize = input.size(0)
self._view(self._input, input, batchSize, inputSize, 1)
self._expand = self._input.expand(batchSize, inputSize, outputSize)
# make the expanded tensor contiguous (requires lots of memory)
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._weight = self.weight.view(1, inputSize, outputSize)
self._expand2 = self._weight.expand_as(self._repeat)
if torch.typename(input) == 'torch.cuda.FloatTensor':
# TODO: after adding new allocators this can be changed
# requires lots of memory, but minimizes cudaMallocs and loops
self._repeat2.resize_as_(self._expand2).copy_(self._expand2)
self._repeat.add_(-1, self._repeat2)
else:
self._repeat.add_(-1, self._expand2)
torch.norm(self._repeat, 2, 1, True, out=self.output)
self.output.resize_(batchSize, outputSize)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
if self._div is None:
self._div = input.new()
if self._output is None:
self._output = self.output.new()
if self._gradOutput is None:
self._gradOutput = input.new()
if self._expand3 is None:
self._expand3 = input.new()
if not self.fastBackward:
self.updateOutput(input)
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
"""
dy_j -2 * (w_j - x) x - w_j
---- = ---------------- = -------
dx 2 || w_j - x || y_j
"""
# to prevent div by zero (NaN) bugs
self._output.resize_as_(self.output).copy_(self.output).add_(0.0000001)
self._view(self._gradOutput, gradOutput, gradOutput.size())
torch.div(gradOutput, self._output, out=self._div)
assert input.dim() == 2
batchSize = input.size(0)
self._div.resize_(batchSize, 1, outputSize)
self._expand3 = self._div.expand(batchSize, inputSize, outputSize)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat2.resize_as_(self._expand3).copy_(self._expand3)
self._repeat2.mul_(self._repeat)
else:
torch.mul(self._repeat, self._expand3, out=self._repeat2)
torch.sum(self._repeat2, 2, True, out=self.gradInput)
self.gradInput.resize_as_(input)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
"""
dy_j 2 * (w_j - x) w_j - x
---- = --------------- = -------
dw_j 2 || w_j - x || y_j
"""
# assumes a preceding call to updateGradInput
assert input.dim() == 2
if self._sum is None:
self._sum = input.new()
torch.sum(self._repeat2, 0, True, out=self._sum)
self._sum.resize_(inputSize, outputSize)
self.gradWeight.add_(-scale, self._sum)
def type(self, type=None, tensorCache=None):
if type:
# prevent premature memory allocations
self.clearState()
return super(Euclidean, self).type(type, tensorCache)
def clearState(self):
clear(self, [
'_input',
'_output',
'_gradOutput',
'_weight',
'_div',
'_sum',
'_expand',
'_expand2',
'_expand3',
'_repeat',
'_repeat2',
])
return super(Euclidean, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/Euclidean.py | 0.696887 | 0.277234 | Euclidean.py | pypi |
import torch
from .Module import Module
from .Identity import Identity
from .LookupTable import LookupTable
from .Sequential import Sequential
from .ParallelTable import ParallelTable
from .MM import MM
class PartialLinear(Module):
"""
PartialLinear is a Linear layer that allows the user to a set a collection of
column indices. When the column indices are set, the layer will behave like a
Linear layer that only has those columns. Meanwhile, all parameters are
preserved, so resetting the PartialLinear layer will result in a module that
behaves just like a regular Linear layer.
This module is useful, for instance, when you want to: forward-backward on
only a subset of a Linear layer during training but use the full Linear layer
at test time.
"""
def __init__(self, inputsize, outputsize, bias=True):
super(PartialLinear, self).__init__()
# define the layer as a small network:
pt = ParallelTable()
pt.add(Identity()).add(LookupTable(outputsize, inputsize))
self.network = Sequential().add(pt).add(MM(False, True))
if bias:
self.bias = torch.zeros(1, outputsize)
self.gradBias = torch.zeros(1, outputsize)
else:
self.bias = self.gradBias = None
# set partition:
self.inputsize = inputsize
self.outputsize = outputsize
self.allcolumns = torch.arange(0, self.outputsize).long()
self.resetPartition()
self.addBuffer = None
self.buffer = None
def setPartition(self, indices):
self.partition = indices.type(self.allcolumns.type())
return self
def resetPartition(self):
self.partition = self.allcolumns
return self
def parameters(self):
return [self.network.get(0).get(1).weight, self.bias], \
[self.network.get(0).get(1).gradWeight, self.gradBias]
# should return only the relevant partition?
def updateOutput(self, input):
self.output.set_(self.network.forward([input, self.partition]))
if self.bias is not None:
self.output.add_(torch.index_select(self.bias, 1, self.partition).expand_as(self.output))
if self.addBuffer is None:
self.addBuffer = input.new()
if self.addBuffer.nelement() != input.size(0):
self.addBuffer.resize_(input.size(0)).fill_(1)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is not None:
self.network.updateGradInput([input, self.partition], gradOutput)
self.gradInput.set_(self.network.gradInput[0])
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self.network.accGradParameters([input, self.partition], gradOutput, scale)
if self.bias is not None:
if self.buffer is None:
self.buffer = input.new()
self.buffer.resize_(gradOutput.size(1))
torch.mv(gradOutput.t(), self.addBuffer, out=self.buffer).mul_(scale)
self.gradBias.index_add_(
1, self.partition, self.buffer.view(1, self.buffer.nelement())
)
def accUpdateGradParameters(self, input, gradOutput, lr):
gradWeight = self.network.get(0).get(1).gradWeight
gradBias = self.gradBias
self.network.get(0).get(1).gradWeight = self.network.get(0).get(1).weight
self.gradBias = self.bias
self.accGradParameters(input, gradOutput, -lr)
self.network.get(0).get(1).gradWeight = gradWeight
self.gradBias = gradBias
def zeroGradParameters(self):
self.network.zeroGradParameters()
self.gradBias.zero_()
def updateParameters(self, learningRate):
self.network.updateParameters(learningRate)
self.bias._add(-learningRate, self.gradBias)
def type(self, type=None, tensorCache=None):
result = super(PartialLinear, self).type(type, tensorCache)
self.partition = self.partition.long()
self.allcolumns = self.allcolumns.long()
if type == 'torch.cuda.FloatTensor':
self.allcolumns = self.allcolumns.cuda()
self.partition = self.partition.cuda()
return result
def __repr__(self):
return super(ParallelTable, self).__repr__() + \
'({} -> {})'.format(self.inputsize, self.outputsize) + \
' without bias' if self.bias is None else '' | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/PartialLinear.py | 0.897753 | 0.548492 | PartialLinear.py | pypi |
import torch
from .Module import Module
class MM(Module):
def __init__(self, transA=False, transB=False):
super(MM, self).__init__()
self.transA = transA
self.transB = transB
self.gradInput = [torch.Tensor(), torch.Tensor()]
def updateOutput(self, input):
assert len(input) == 2
a, b = input
assert a.ndimension() == 2 or a.ndimension() == 3
assert a.dim() == b.dim()
if a.ndimension() == 2:
if self.transA:
a = a.t()
if self.transB:
b = b.t()
self.output.resize_(a.size(0), b.size(1))
torch.mm(a, b, out=self.output)
else:
if self.transA:
a = a.transpose(1, 2)
if self.transB:
b = b.transpose(1, 2)
self.output.resize_(a.size(0), a.size(1), b.size(2))
torch.bmm(a, b, out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput[0] is None:
self.gradInput[0] = input[0].new()
if self.gradInput[1] is None:
self.gradInput[1] = input[1].new()
assert len(input) == 2
a, b = input
self.gradInput[0].resize_as_(a)
self.gradInput[1].resize_as_(b)
assert gradOutput.ndimension() == 2 or gradOutput.ndimension() == 3
assert a.dim() == b.dim() == gradOutput.dim()
if gradOutput.ndimension() == 2:
h_dim, w_dim = 0, 1
f = "mm"
else:
h_dim, w_dim = 1, 2
f = "bmm"
if self.transA == self.transB:
a = a.transpose(h_dim, w_dim)
b = b.transpose(h_dim, w_dim)
if self.transA:
getattr(torch, f)(b, gradOutput.transpose(h_dim, w_dim), out=self.gradInput[0])
else:
getattr(torch, f)(gradOutput, b, out=self.gradInput[0])
if self.transB:
getattr(torch, f)(gradOutput.transpose(h_dim, w_dim), a, out=self.gradInput[1])
else:
getattr(torch, f)(a, gradOutput, out=self.gradInput[1])
return self.gradInput | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/MM.py | 0.701815 | 0.659433 | MM.py | pypi |
import math
import torch
from .Module import Module
class SpatialFractionalMaxPooling(Module):
# Usage:
# nn.SpatialFractionalMaxPooling(poolSizeW, poolSizeH, outW, outH)
# the output should be the exact size (outH x outW)
# nn.SpatialFractionalMaxPooling(poolSizeW, poolSizeH, ratioW, ratioH)
# the output should be the size (floor(inH x ratioH) x floor(inW x ratioW))
# ratios are numbers between (0, 1) exclusive
def __init__(self, poolSizeW, poolSizeH, arg1, arg2):
super(SpatialFractionalMaxPooling, self).__init__()
assert poolSizeW >= 2
assert poolSizeH >= 2
# Pool size (how wide the pooling for each output unit is)
self.poolSizeW = poolSizeW
self.poolSizeH = poolSizeH
# Random samples are drawn for all
# batch * plane * (height, width; i.e., 2) points. This determines
# the 2d "pseudorandom" overlapping pooling regions for each
# (batch element x input plane). A new set of random samples is
# drawn every updateOutput call, unless we disable it via
# .fixPoolingRegions().
self.randomSamples = None
# Flag to disable re-generation of random samples for producing
# a new pooling. For testing purposes
self.newRandomPool = False
self.indices = None
if arg1 >= 1 and arg2 >= 1:
# Desired output size: the input tensor will determine the reduction
# ratio
self.outW = arg1
self.outH = arg2
self.ratioW = self.ratioH = None
else:
# Reduction ratio specified per each input
# This is the reduction ratio that we use
self.ratioW = arg1
self.ratioH = arg2
self.outW = self.outH = None
# The reduction ratio must be between 0 and 1
assert self.ratioW > 0 and self.ratioW < 1
assert self.ratioH > 0 and self.ratioH < 1
def _getBufferSize(self, input):
assert input.ndimension() == 4
batchSize = input.size(0)
planeSize = input.size(1)
return torch.Size([batchSize, planeSize, 2])
def _initSampleBuffer(self, input):
sampleBufferSize = self._getBufferSize(input)
if self.randomSamples is None:
self.randomSamples = input.new().resize_(sampleBufferSize).uniform_()
elif self.randomSamples.size(0) != sampleBufferSize[0] or self.randomSamples.size(1) != sampleBufferSize[1]:
self.randomSamples.resize_(sampleBufferSize).uniform_()
elif not self.newRandomPool:
# Create new pooling windows, since this is a subsequent call
self.randomSamples.uniform_()
def _getOutputSizes(self, input):
outW = self.outW
outH = self.outH
if self.ratioW is not None and self.ratioH is not None:
assert input.ndimension() == 4
outW = int(math.floor(input.size(3) * self.ratioW))
outH = int(math.floor(input.size(2) * self.ratioH))
# Neither can be smaller than 1
assert outW > 0
assert outH > 0
else:
assert outW is not None and outH is not None
return outW, outH
# Call this to turn off regeneration of random pooling regions each
# updateOutput call.
def fixPoolingRegions(self, val=True):
self.newRandomPool = val
return self
def updateOutput(self, input):
if self.indices is None:
self.indices = input.new()
self.indices = self.indices.long()
self._initSampleBuffer(input)
outW, outH = self._getOutputSizes(input)
self._backend.SpatialFractionalMaxPooling_updateOutput(
self._backend.library_state,
input,
self.output,
outW, outH, self.poolSizeW, self.poolSizeH,
self.indices, self.randomSamples)
return self.output
def updateGradInput(self, input, gradOutput):
assert self.randomSamples is not None
outW, outH = self._getOutputSizes(input)
self._backend.SpatialFractionalMaxPooling_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
outW, outH, self.poolSizeW, self.poolSizeH,
self.indices)
return self.gradInput
# backward compat
def empty(self):
self.clearState()
def clearState(self):
self.indices = None
self.randomSamples = None
return super(SpatialFractionalMaxPooling, self).clearState()
def __repr__(self):
return super(SpatialFractionalMaxPooling, self).__repr__() + \
'({}x{}, {}, {})'.format(self.outW or self.ratioW,
self.outH or self.ratioH,
self.poolSizeW, self.poolSizeH) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialFractionalMaxPooling.py | 0.862656 | 0.427397 | SpatialFractionalMaxPooling.py | pypi |
import torch
from .Criterion import Criterion
# TODO: use THNN
class BCECriterion(Criterion):
eps = 1e-12
def __init__(self, weights=None, sizeAverage=True):
if weights is not None and weights.dim() != 1:
raise ValueError("weights input should be 1D Tensor")
super(BCECriterion, self).__init__()
self.sizeAverage = sizeAverage
self.buffer = None
self.weights = weights
def updateOutput(self, input, target):
# - log(input) * target - log(1 - input) * (1 - target)
if input.nelement() != target.nelement():
raise RuntimeError("input and target size mismatch")
if self.buffer is None:
self.buffer = input.new()
buffer = self.buffer
weights = self.weights
buffer.resize_as_(input)
if weights is not None and target.dim() != 1:
weights = self.weights.view(1, target.size(1)).expand_as(target)
# log(input) * target
torch.add(input, self.eps, out=buffer).log_()
if weights is not None:
buffer.mul_(weights)
target_1d = target.contiguous().view(-1)
# don't save a 1-d view of buffer: it should already be contiguous, and it's
# used as non-1d tensor later.
output = torch.dot(target_1d, buffer.contiguous().view(-1))
# log(1 - input) * (1 - target)
torch.mul(input, -1, out=buffer).add_(1 + self.eps).log_()
if weights is not None:
buffer.mul_(weights)
output = output + torch.sum(buffer)
output = output - torch.dot(target_1d, buffer.contiguous().view(-1))
if self.sizeAverage:
output = output / input.nelement()
self.output = - output.item()
return self.output
def updateGradInput(self, input, target):
# - (target - input) / ( input (1 - input) )
# The gradient is slightly incorrect:
# It should have be divided by (input + self.eps) (1 - input + self.eps)
# but it is divided by input (1 - input + self.eps) + self.eps
# This modification requires less memory to be computed.
if input.nelement() != target.nelement():
raise RuntimeError("input and target size mismatch")
if self.buffer is None:
self.buffer = input.new()
buffer = self.buffer
weights = self.weights
gradInput = self.gradInput
if weights is not None and target.dim() != 1:
weights = self.weights.view(1, target.size(1)).expand_as(target)
buffer.resize_as_(input)
# - x ( 1 + self.eps -x ) + self.eps
torch.add(input, -1, out=buffer).add_(-self.eps).mul_(input).add_(-self.eps)
gradInput.resize_as_(input)
# y - x
torch.add(target, -1, input, out=gradInput)
# - (y - x) / ( x ( 1 + self.eps -x ) + self.eps )
gradInput.div_(buffer)
if weights is not None:
gradInput.mul_(weights)
if self.sizeAverage:
gradInput.div_(target.nelement())
return gradInput | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/BCECriterion.py | 0.432782 | 0.331823 | BCECriterion.py | pypi |
import torch
from .Module import Module
from .utils import clear
class SpatialCrossMapLRN(Module):
def __init__(self, size, alpha=1e-4, beta=0.75, k=1):
super(SpatialCrossMapLRN, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
self.scale = None
self.paddedRatio = None
self.accumRatio = None
def updateOutput(self, input):
assert input.dim() == 4
if self.scale is None:
self.scale = input.new()
if input.type() == 'torch.cuda.FloatTensor':
self._backend.SpatialCrossMapLRN_updateOutput(
self._backend.library_state,
input,
self.output,
self.scale,
self.size,
self.alpha,
self.beta,
self.k
)
else:
batchSize = input.size(0)
channels = input.size(1)
inputHeight = input.size(2)
inputWidth = input.size(3)
self.output.resize_as_(input)
self.scale.resize_as_(input)
# use output storage as temporary buffer
inputSquare = self.output
torch.pow(input, 2, out=inputSquare)
prePad = int((self.size - 1) / 2 + 1)
prePadCrop = channels if prePad > channels else prePad
scaleFirst = self.scale.select(1, 0)
scaleFirst.zero_()
# compute first feature map normalization
for c in range(prePadCrop):
scaleFirst.add_(inputSquare.select(1, c))
# reuse computations for next feature maps normalization
# by adding the next feature map and removing the previous
for c in range(1, channels):
scalePrevious = self.scale.select(1, c - 1)
scaleCurrent = self.scale.select(1, c)
scaleCurrent.copy_(scalePrevious)
if c < channels - prePad + 1:
squareNext = inputSquare.select(1, c + prePad - 1)
scaleCurrent.add_(1, squareNext)
if c > prePad:
squarePrevious = inputSquare.select(1, c - prePad)
scaleCurrent.add_(-1, squarePrevious)
self.scale.mul_(self.alpha / self.size).add_(self.k)
torch.pow(self.scale, -self.beta, out=self.output)
self.output.mul_(input)
return self.output
def updateGradInput(self, input, gradOutput):
assert input.dim() == 4
if input.type() == 'torch.cuda.FloatTensor':
self._backend.SpatialCrossMapLRN_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.scale,
self.output,
self.size,
self.alpha,
self.beta,
self.k
)
else:
batchSize = input.size(0)
channels = input.size(1)
inputHeight = input.size(2)
inputWidth = input.size(3)
if self.paddedRatio is None:
self.paddedRatio = input.new()
if self.accumRatio is None:
self.accumRatio = input.new()
self.paddedRatio.resize_(channels + self.size - 1, inputHeight, inputWidth)
self.accumRatio.resize_(inputHeight, inputWidth)
cacheRatioValue = 2 * self.alpha * self.beta / self.size
inversePrePad = int(self.size - (self.size - 1) / 2)
self.gradInput.resize_as_(input)
torch.pow(self.scale, -self.beta, out=self.gradInput).mul_(gradOutput)
self.paddedRatio.zero_()
paddedRatioCenter = self.paddedRatio.narrow(0, inversePrePad, channels)
for n in range(batchSize):
torch.mul(gradOutput[n], self.output[n], out=paddedRatioCenter)
paddedRatioCenter.div_(self.scale[n])
torch.sum(self.paddedRatio.narrow(0, 0, self.size - 1), 0, keepdim=False, out=self.accumRatio)
for c in range(channels):
self.accumRatio.add_(self.paddedRatio[c + self.size - 1])
self.gradInput[n][c].addcmul_(-cacheRatioValue, input[n][c], self.accumRatio)
self.accumRatio.add_(-1, self.paddedRatio[c])
return self.gradInput
def clearState(self):
clear(self, 'scale', 'paddedRatio', 'accumRatio')
return super(SpatialCrossMapLRN, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialCrossMapLRN.py | 0.870941 | 0.379608 | SpatialCrossMapLRN.py | pypi |
import math
import torch
from .Module import Module
from .utils import clear
class Cosine(Module):
def __init__(self, inputSize, outputSize):
super(Cosine, self).__init__()
self.weight = torch.Tensor(outputSize, inputSize)
self.gradWeight = torch.Tensor(outputSize, inputSize)
self.reset()
self._weight = None
self._sum = None
self._gradOutput = None
self._sum = None
self._weightNorm = None
self._inputNorm = None
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.weight.size(0))
self.weight.uniform_(-stdv, stdv)
def updateOutput(self, input):
assert input.dim() == 2
inputSize = self.weight.size(1)
outputSize = self.weight.size(0)
if self._weightNorm is None:
self._weightNorm = self.weight.new()
if self._inputNorm is None:
self._inputNorm = self.weight.new()
# y_j = (w_j * x) / ( || w_j || * || x || )
torch.norm(self.weight, 2, 1, out=self._weightNorm, keepdim=True).add_(1e-12)
batchSize = input.size(0)
nelement = self.output.nelement()
self.output.resize_(batchSize, outputSize)
if self.output.nelement() != nelement:
self.output.zero_()
self.output.addmm_(0., 1., input, self.weight.t())
torch.norm(input, 2, 1, out=self._inputNorm, keepdim=True).add_(1e-12)
self.output.div_(self._weightNorm.view(1, outputSize).expand_as(self.output))
self.output.div_(self._inputNorm.expand_as(self.output))
return self.output
def updateGradInput(self, input, gradOutput):
assert input.dim() == 2
if self.gradInput is None:
return
inputSize = self.weight.size(1)
outputSize = self.weight.size(0)
"""
dy_j w_ji x_i
---- = ------------------- - y_j ---------
dx_i || w_j || * || x || || x ||^2
"""
nelement = self.gradInput.nelement()
self.gradInput.resize_as_(input)
if self.gradInput.nelement() != nelement:
self.gradInput.zero_()
inputNorm = self._inputNorm.expand_as(input)
weightNorm = self._weightNorm.view(1, outputSize).expand_as(gradOutput)
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
if self._sum is None:
self._sum = input.new()
self.gradInput.copy_(input).div_(inputNorm)
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
self._gradOutput.mul_(self.output)
torch.sum(self._gradOutput, 1, out=self._sum, keepdim=True)
self.gradInput.mul_(self._sum.expand_as(input))
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
self._gradOutput.div_(weightNorm)
self.gradInput.addmm_(-1, 1, self._gradOutput, self.weight)
self.gradInput.div_(inputNorm)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
assert input.dim() == 2
inputSize = self.weight.size(1)
outputSize = self.weight.size(0)
"""
dy_j x_i w_ji
----- = ------------------- - y_j -----------
dw_ji || w_j || * || x || || w_j ||^2
"""
if self._weight is None:
self._weight = self.weight.new()
if self._sum is None:
self._sum = input.new()
self._weight.resize_as_(self.weight).copy_(self.weight)
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
self._gradOutput.mul_(self.output)
torch.sum(self._gradOutput, 0, out=self._sum, keepdim=True)
grad = self._sum[0]
grad.div_(self._weightNorm.select(1, 0))
self._weight.mul_(grad.view(outputSize, 1).expand_as(self._weight))
input_ = self._gradOutput
input_.resize_as_(input).copy_(input)
input_.div_(self._inputNorm.expand_as(input))
self._weight.addmm_(-1, 1, gradOutput.t(), input_)
self._weight.div_(self._weightNorm.expand_as(self._weight))
self.gradWeight.add_(self._weight)
def type(self, type=None, tensorCache=None):
if type is not None:
# prevent premature memory allocations
self._input = None
self._weight = None
self._inputNorm = None
self._weightNorm = None
self._gradOutput = None
self._sum = None
return super(Cosine, self).type(type, tensorCache)
def clearState(self):
clear(self, [
'_input',
'_weight',
'_gradOutput',
'_sum',
'_inputNorm',
'_weightNorm',
])
return super(Cosine, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/Cosine.py | 0.715225 | 0.377139 | Cosine.py | pypi |
import torch
from .Module import Module
class MV(Module):
"""Module to perform matrix vector multiplication on two minibatch inputs,
producing a minibatch.
"""
def __init__(self, trans=False):
super(MV, self).__init__()
self.trans = trans
self.gradInput = [torch.Tensor(), torch.Tensor()]
def updateOutput(self, input):
M, v = input
assert M.ndimension() == 2 or M.ndimension() == 3
if M.ndimension() == 2:
assert v.ndimension() == 1
if self.trans:
M = M.transpose(0, 1)
self.output.resize_(M.size(0))
torch.mv(M, v, out=self.output)
else:
assert v.ndimension() == 2
if self.trans:
M = M.transpose(1, 2)
self.output.resize_(M.size(0), M.size(1), 1)
torch.bmm(M, v.view(v.size(0), v.size(1), 1), out=self.output).resize_(M.size(0), M.size(1))
return self.output
def updateGradInput(self, input, gradOutput):
M, v = input
self.gradInput[0].resize_as_(M)
self.gradInput[1].resize_as_(v)
gradOutput = gradOutput.contiguous()
assert gradOutput.ndimension() == 1 or gradOutput.ndimension() == 2
if gradOutput.ndimension() == 2:
assert M.ndimension() == 3
assert v.ndimension() == 2
bdim = M.size(0)
odim = M.size(1)
idim = M.size(2)
if self.trans:
torch.bmm(v.view(bdim, odim, 1), gradOutput.view(bdim, 1, idim), out=self.gradInput[0])
torch.bmm(M, gradOutput.view(bdim, idim, 1), out=self.gradInput[1].view(bdim, odim, 1))
else:
torch.bmm(gradOutput.view(bdim, odim, 1), v.view(bdim, 1, idim), out=self.gradInput[0])
torch.bmm(M.transpose(1, 2), gradOutput.view(bdim, odim, 1), out=self.gradInput[1].view(bdim, idim, 1))
else:
assert M.ndimension() == 2
assert v.ndimension() == 1
if self.trans:
torch.ger(v, gradOutput, out=self.gradInput[0])
self.gradInput[1] = M * gradOutput
else:
torch.ger(gradOutput, v, out=self.gradInput[0])
self.gradInput[1] = M.t() * gradOutput
return self.gradInput | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/MV.py | 0.825027 | 0.733786 | MV.py | pypi |
import math
import torch
from .Module import Module
from .Sequential import Sequential
from .SpatialZeroPadding import SpatialZeroPadding
from .SpatialConvolution import SpatialConvolution
from .SpatialConvolutionMap import SpatialConvolutionMap
from .Replicate import Replicate
from .CSubTable import CSubTable
from .CDivTable import CDivTable
from .utils import clear
import warnings
class SpatialSubtractiveNormalization(Module):
def __init__(self, nInputPlane=1, kernel=None):
super(SpatialSubtractiveNormalization, self).__init__()
# get args
self.nInputPlane = nInputPlane
if kernel is None:
kernel = torch.Tensor(9, 9).fill_(1)
self.kernel = kernel
kdim = self.kernel.ndimension()
# check args
if kdim != 2 and kdim != 1:
raise ValueError('SpatialSubtractiveNormalization averaging kernel must be 2D or 1D')
if (self.kernel.size(0) % 2) == 0 or (kdim == 2 and (self.kernel.size(1) % 2) == 0):
raise ValueError('SpatialSubtractiveNormalization averaging kernel must have ODD dimensions')
# normalize kernel
self.kernel.div_(self.kernel.sum() * self.nInputPlane)
# padding values
padH = int(math.floor(self.kernel.size(0) / 2))
padW = padH
if kdim == 2:
padW = int(math.floor(self.kernel.size(1) / 2))
# create convolutional mean extractor
self.meanestimator = Sequential()
self.meanestimator.add(SpatialZeroPadding(padW, padW, padH, padH))
if kdim == 2:
self.meanestimator.add(SpatialConvolution(self.nInputPlane, 1, self.kernel.size(1), self.kernel.size(0)))
else:
# TODO: map
self.meanestimator.add(SpatialConvolutionMap(
SpatialConvolutionMap.maps.oneToOne(self.nInputPlane), self.kernel.size(0), 1))
self.meanestimator.add(SpatialConvolution(self.nInputPlane, 1, 1, self.kernel.size(0)))
self.meanestimator.add(Replicate(self.nInputPlane, 0))
# set kernel and bias
if kdim == 2:
for i in range(self.nInputPlane):
self.meanestimator.modules[1].weight[0][i] = self.kernel
self.meanestimator.modules[1].bias.zero_()
else:
for i in range(self.nInputPlane):
self.meanestimator.modules[1].weight[i] = self.kernel.unsqueeze(0)
self.meanestimator.modules[2].weight[0][i] = self.kernel.unsqueeze(1)
self.meanestimator.modules[1].bias.zero_()
self.meanestimator.modules[2].bias.zero_()
# other operation
self.subtractor = CSubTable()
self.divider = CDivTable()
# coefficient array, to adjust side effects
self.coef = torch.Tensor(1, 1, 1)
self.ones = None
self._coef = None
def updateOutput(self, input):
# compute side coefficients
dim = input.dim()
if (input.dim() + 1 != self.coef.dim() or
(input.size(dim - 1) != self.coef.size(dim - 1)) or
(input.size(dim - 2) != self.coef.size(dim - 2))):
if self.ones is None:
self.ones = input.new()
if self._coef is None:
self._coef = self.coef.new()
self.ones.resize_as_(input[0:1]).fill_(1)
coef = self.meanestimator.updateOutput(self.ones).squeeze(0)
self._coef.resize_as_(coef).copy_(coef) # make contiguous for view
size = list(coef.size())
size = [input.size(0)] + size
self.coef = self._coef.view(1, *self._coef.size()).expand(*size)
# compute mean
self.localsums = self.meanestimator.updateOutput(input)
self.adjustedsums = (self.divider.updateOutput(
[self.localsums, self.coef.contiguous().view_as(self.localsums)]))
self.output = self.subtractor.updateOutput([input, self.adjustedsums.contiguous().view_as(input)])
return self.output
def updateGradInput(self, input, gradOutput):
# resize grad
self.gradInput.resize_as_(input).zero_()
# backprop through all modules
gradsub = self.subtractor.updateGradInput([input, self.adjustedsums.contiguous().view_as(input)], gradOutput)
graddiv = (self.divider.updateGradInput(
[self.localsums, self.coef.contiguous().view_as(self.localsums)], gradsub[1]))
size = self.meanestimator.updateGradInput(input, graddiv[0]).size()
self.gradInput.add_(self.meanestimator.updateGradInput(input, graddiv[0]))
self.gradInput.add_(gradsub[0])
return self.gradInput
def clearState(self):
clear(self, 'ones', '_coef')
self.meanestimator.clearState()
return super(SpatialSubtractiveNormalization, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialSubtractiveNormalization.py | 0.675978 | 0.453927 | SpatialSubtractiveNormalization.py | pypi |
import random
import math
import torch
from .Module import Module
# TODO fix THNN...
class SpatialConvolutionMap(Module):
class maps(object):
@staticmethod
def full(nin, nout):
ft = torch.Tensor(nin * nout, 2)
p = 0
for j in range(nout):
for i in range(nin):
ft[p][0] = i
ft[p][1] = j
p += 1
return ft
@staticmethod
def oneToOne(nfeat):
ft = torch.Tensor(nfeat, 2)
for i in range(nfeat):
ft[i][0] = i
ft[i][1] = i
return ft
@staticmethod
def random(nin, nout, nto):
nker = nto * nout
tbl = torch.Tensor(nker, 2)
fi = torch.randperm(nin)
frcntr = 0
nfi = math.floor(nin / nto) # number of distinct nto chunks
totbl = tbl.select(1, 1)
frtbl = tbl.select(1, 0)
fitbl = fi.narrow(0, 0, (nfi * nto)) # part of fi that covers distinct chunks
ufrtbl = frtbl.unfold(0, nto, nto)
utotbl = totbl.unfold(0, nto, nto)
ufitbl = fitbl.unfold(0, nto, nto)
# start fill_ing frtbl
for i in range(nout): # fro each unit in target map
ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
frcntr += 1
if frcntr - 1 == nfi: # reset fi
fi.copy_(torch.randperm(nin))
frcntr = 1
for tocntr in range(utotbl.size(0)):
utotbl.select(0, tocntr).fill_(tocntr)
return tbl
def __init__(self, conMatrix, kW, kH, dW=1, dH=1):
super(SpatialConvolutionMap, self).__init__()
self.kW = kW
self.kH = kH
self.dW = dW
self.dH = dH
self.connTable = conMatrix
self.nInputPlane = int(self.connTable.select(1, 0).max()) + 1
self.nOutputPlane = int(self.connTable.select(1, 1).max()) + 1
self.weight = torch.Tensor(self.connTable.size(0), kH, kW)
self.bias = torch.Tensor(self.nOutputPlane)
self.gradWeight = torch.Tensor(self.connTable.size(0), kH, kW)
self.gradBias = torch.Tensor(self.nOutputPlane)
self.reset()
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
self.weight.uniform_(-stdv, stdv)
self.bias.uniform_(-stdv, stdv)
else:
ninp = torch.Tensor(self.nOutputPlane).zero_()
for i in range(self.connTable.size(0)):
idx = int(self.connTable[i, 1])
ninp[idx] += 1
for k in range(self.connTable.size(0)):
idx = int(self.connTable[k, 1])
stdv = 1. / math.sqrt(self.kW * self.kH * ninp[idx])
self.weight.select(0, k).uniform_(-stdv, stdv)
for k in range(self.bias.size(0)):
stdv = 1. / math.sqrt(self.kW * self.kH * ninp[k])
# TODO: torch.uniform
self.bias[k] = random.uniform(-stdv, stdv)
def updateOutput(self, input):
self._backend.SpatialConvolutionMap_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH
)
return self.output
def updateGradInput(self, input, gradOutput):
self._backend.SpatialConvolutionMap_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.bias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH
)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self._backend.SpatialConvolutionMap_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH,
scale
) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialConvolutionMap.py | 0.407216 | 0.312331 | SpatialConvolutionMap.py | pypi |
import math
import torch
from .Concat import Concat
class DepthConcat(Concat):
def windowNarrow(self, output, currentOutput, offset):
outputWindow = output.narrow(self.dimension, offset, currentOutput.size(self.dimension))
for dim in range(len(self.outputSize)):
currentSize = currentOutput.size(dim)
if dim != self.dimension and self.outputSize[dim] != currentSize:
# 5x5 vs 3x3 -> start = [(5-3)/2] + 1 = 2 (1 pad each side)
# 9x9 vs 5x5 -> start = [(9-5)/2] + 1 = 3 (2 pad each side)
# 9x9 vs 4x4 -> start = [(9-4)/2] + 1 = 3.5 (2 pad, 3 pad)
start = int(math.floor(((self.outputSize[dim] - currentSize) / 2)))
outputWindow = outputWindow.narrow(dim, start, currentSize)
return outputWindow
def updateOutput(self, input):
outs = []
for i in range(len(self.modules)):
currentOutput = self.modules[i].updateOutput(input)
outs.append(currentOutput)
if i == 0:
size = list(currentOutput.size())
else:
size[self.dimension] += currentOutput.size(self.dimension)
for dim in range(len(self.outputSize)):
if dim != self.dimension:
# take the maximum size (shouldn't change anything for batch dim)
size[dim] = max(size[dim], currentOutput.size(dim))
self.outputSize = torch.Size(size)
self.output.resize_(self.outputSize).zero_() # zero for padding
offset = 0
for i, module in enumerate(self.modules):
currentOutput = outs[i]
outputWindow = self.windowNarrow(self.output, currentOutput, offset)
outputWindow.copy_(currentOutput)
offset = offset + currentOutput.size(self.dimension)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput.resize_as_(input)
offset = 0
for i, module in enumerate(self.modules):
currentOutput = module.output
gradOutputWindow = self.windowNarrow(gradOutput, currentOutput, offset)
currentGradInput = module.updateGradInput(input, gradOutputWindow)
if i == 0:
self.gradInput.copy_(currentGradInput)
else:
self.gradInput.add_(currentGradInput)
offset += currentOutput.size(self.dimension)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
offset = 0
for i, module in enumerate(self.modules):
currentOutput = module.output
gradOutputWindow = self.windowNarrow(gradOutput, currentOutput, offset)
module.accGradParameters(input, gradOutputWindow, scale)
offset += currentOutput.size(self.dimension)
def backward(self, input, gradOutput, scale=1):
self.gradInput.resize_as_(input)
offset = 0
for i, module in enumerate(self.modules):
currentOutput = module.output
gradOutputWindow = self.windowNarrow(gradOutput, currentOutput, offset)
currentGradInput = module.backward(input, gradOutputWindow)
if i == 0:
self.gradInput.copy_(currentGradInput)
else:
self.gradInput.add_(currentGradInput)
offset = offset + currentOutput.size(self.dimension)
return self.gradInput
def accUpdateGradParameters(self, input, gradOutput, lr):
offset = 0
for i, module in enumerate(self.modules):
currentOutput = module.output
gradOutputWindow = self.windowNarrow(gradOutput, currentOutput, offset)
module.accUpdateGradParameters(input, gradOutputWindow, lr)
offset = offset + currentOutput.size(self.dimension) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/DepthConcat.py | 0.54577 | 0.296457 | DepthConcat.py | pypi |
import torch
from .Module import Module
class SpatialZeroPadding(Module):
def __init__(self, pad_l, pad_r=None, pad_t=None, pad_b=None):
super(SpatialZeroPadding, self).__init__()
self.pad_l = pad_l
self.pad_r = pad_r if pad_r is not None else pad_l
self.pad_t = pad_t if pad_t is not None else pad_l
self.pad_b = pad_b if pad_b is not None else pad_l
def updateOutput(self, input):
assert input.dim() == 4
# sizes
h = input.size(2) + self.pad_t + self.pad_b
w = input.size(3) + self.pad_l + self.pad_r
if w < 1 or h < 1:
raise RuntimeError('input is too small (feature map size: {}x{})'.format(h, w))
self.output.resize_(input.size(0), input.size(1), h, w)
self.output.zero_()
# crop input if necessary
c_input = input
if self.pad_t < 0:
c_input = c_input.narrow(2, 0 - self.pad_t, c_input.size(2) + self.pad_t)
if self.pad_b < 0:
c_input = c_input.narrow(2, 0, c_input.size(2) + self.pad_b)
if self.pad_l < 0:
c_input = c_input.narrow(3, 0 - self.pad_l, c_input.size(3) + self.pad_l)
if self.pad_r < 0:
c_input = c_input.narrow(3, 0, c_input.size(3) + self.pad_r)
# crop output if necessary
c_output = self.output
if self.pad_t > 0:
c_output = c_output.narrow(2, 0 + self.pad_t, c_output.size(2) - self.pad_t)
if self.pad_b > 0:
c_output = c_output.narrow(2, 0, c_output.size(2) - self.pad_b)
if self.pad_l > 0:
c_output = c_output.narrow(3, 0 + self.pad_l, c_output.size(3) - self.pad_l)
if self.pad_r > 0:
c_output = c_output.narrow(3, 0, c_output.size(3) - self.pad_r)
# copy input to output
c_output.copy_(c_input)
return self.output
def updateGradInput(self, input, gradOutput):
assert input.dim() == 4
self.gradInput.resize_as_(input).zero_()
# crop gradInput if necessary
cg_input = self.gradInput
if self.pad_t < 0:
cg_input = cg_input.narrow(2, 0 - self.pad_t, cg_input.size(2) + self.pad_t)
if self.pad_b < 0:
cg_input = cg_input.narrow(2, 0, cg_input.size(2) + self.pad_b)
if self.pad_l < 0:
cg_input = cg_input.narrow(3, 0 - self.pad_l, cg_input.size(3) + self.pad_l)
if self.pad_r < 0:
cg_input = cg_input.narrow(3, 0, cg_input.size(3) + self.pad_r)
# crop gradOutput if necessary
cg_output = gradOutput
if self.pad_t > 0:
cg_output = cg_output.narrow(2, 0 + self.pad_t, cg_output.size(2) - self.pad_t)
if self.pad_b > 0:
cg_output = cg_output.narrow(2, 0, cg_output.size(2) - self.pad_b)
if self.pad_l > 0:
cg_output = cg_output.narrow(3, 0 + self.pad_l, cg_output.size(3) - self.pad_l)
if self.pad_r > 0:
cg_output = cg_output.narrow(3, 0, cg_output.size(3) - self.pad_r)
# copy gradOutput to gradInput
cg_input.copy_(cg_output)
return self.gradInput
def __tostring__(self, ):
s = super(SpatialZeroPadding, self).__repr__()
s += '({}, {}, {}, {})'.foramat(self.pad_l, self.pad_r, self.pad_t, self.pad_b)
return s | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialZeroPadding.py | 0.681197 | 0.419945 | SpatialZeroPadding.py | pypi |
import math
import torch
from .Module import Module
from .utils import clear
class SpatialConvolution(Module):
def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None):
super(SpatialConvolution, self).__init__()
self.nInputPlane = nInputPlane
self.nOutputPlane = nOutputPlane
self.kW = kW
self.kH = kH
self.dW = dW
self.dH = dH
self.padW = padW
self.padH = padH if padH is not None else padW
self.weight = torch.Tensor(nOutputPlane, nInputPlane, kH, kW)
self.bias = torch.Tensor(nOutputPlane)
self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane, kH, kW)
self.gradBias = torch.Tensor(nOutputPlane)
self.reset()
self._input = None
self._gradOutput = None
self.finput = None
self.fgradInput = None
def noBias(self):
self.bias = None
self.gradBias = None
return self
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.kW * self.kH * self.nInputPlane)
self.weight.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.uniform_(-stdv, stdv)
def _makeContiguous(self, input, gradOutput=None):
if not input.is_contiguous():
if self._input is None:
self._input = input.new()
self._input.resize_as_(input).copy_(input)
input = self._input
if gradOutput is not None:
if not gradOutput.is_contiguous():
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
return input, gradOutput
return input
def _init(self):
if self.finput is None:
self.finput = self.weight.new()
if self.fgradInput is None:
self.fgradInput = self.weight.new()
# function to re-view the weight layout in a way that would make the MM ops happy
def _viewWeight(self):
self.weight = self.weight.view(self.nOutputPlane, self.nInputPlane * self.kH * self.kW)
if self.gradWeight is not None and self.gradWeight.dim() > 0:
self.gradWeight = self.gradWeight.view(self.nOutputPlane, self.nInputPlane * self.kH * self.kW)
def _unviewWeight(self):
self.weight = self.weight.view(self.nOutputPlane, self.nInputPlane, self.kH, self.kW)
if self.gradWeight is not None and self.gradWeight.dim() > 0:
self.gradWeight = self.gradWeight.view(self.nOutputPlane, self.nInputPlane, self.kH, self.kW)
def updateOutput(self, input):
self._init()
self._viewWeight()
input = self._makeContiguous(input)
self._backend.SpatialConvolutionMM_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH
)
self._unviewWeight()
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
self._init()
self._viewWeight()
input, gradOutput = self._makeContiguous(input, gradOutput)
self._backend.SpatialConvolutionMM_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH
)
self._unviewWeight()
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self._init()
input, gradOutput = self._makeContiguous(input, gradOutput)
self._viewWeight()
self._backend.SpatialConvolutionMM_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
scale
)
self._unviewWeight()
def type(self, type=None, tensorCache={}):
if self.finput is not None:
self.finput = torch.Tensor()
if self.fgradInput is not None:
self.fgradInput = torch.Tensor()
return super(SpatialConvolution, self).type(type, tensorCache)
def __repr__(self):
s = super(SpatialConvolution, self).__repr__()
s += '({} -> {}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kW, self.kH)
if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0:
s += ', {}, {}'.format(self.dW, self.dH)
if self.padW != 0 or self.padH != 0:
s += ', {}, {}'.format(self.padW, self.padH)
s += ')'
if self.bias is None:
s += ' without bias'
return s
def clearState(self):
clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')
return super(SpatialConvolution, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialConvolution.py | 0.786254 | 0.23206 | SpatialConvolution.py | pypi |
import torch
from .Module import Module
from .utils import clear
class PairwiseDistance(Module):
def __init__(self, p):
super(PairwiseDistance, self).__init__()
assert p % 1 == 0
self.gradInput = []
self.diff = torch.Tensor()
self.norm = p
self.outExpand = None
self.grad = None
self.ones = None
def updateOutput(self, input):
self.output.resize_(1)
assert input[0].dim() == 2
if self.diff is None:
self.diff = input[0].new()
torch.add(input[0], -1, input[1], out=self.diff).abs_()
self.output.resize_(input[0].size(0))
self.output.zero_()
self.output.add_(self.diff.pow_(self.norm).sum(1, keepdim=False))
self.output.pow_(1. / self.norm)
return self.output
def updateGradInput(self, input, gradOutput):
assert input[0].dim() == 2
if len(self.gradInput) != 2:
self.gradInput[:] = [None, None]
if self.gradInput[0] is None:
self.gradInput[0] = input[0].new()
self.gradInput[0].resize_(input[0].size())
if self.gradInput[1] is None:
self.gradInput[1] = input[1].new()
self.gradInput[1].resize_(input[1].size())
self.gradInput[0].copy_(input[0])
self.gradInput[0].add_(-1, input[1])
if self.norm == 1:
self.gradInput[0].sign_()
else:
# Note: derivative of p-norm:
# d/dx_k(||x||_p) = (x_k * abs(x_k)^(p-2)) / (||x||_p)^(p-1)
if self.norm > 2:
self.gradInput[0].mul_(self.gradInput[0].abs().pow_(self.norm - 2))
if self.outExpand is None:
self.outExpand = self.output.new()
self.outExpand.resize_(self.output.size(0), 1)
self.outExpand.copy_(self.output.view(self.output.size(0), 1))
self.outExpand.add_(1e-6) # Prevent divide by zero errors
self.outExpand.pow_(-(self.norm - 1))
self.gradInput[0].mul_(self.outExpand.expand(self.gradInput[0].size(0),
self.gradInput[0].size(1)))
if self.grad is None:
self.grad = gradOutput.new()
if self.ones is None:
self.ones = gradOutput.new()
self.grad.resize_as_(input[0]).zero_()
self.ones.resize_(input[0].size(1)).fill_(1)
self.grad.addr_(gradOutput, self.ones)
self.gradInput[0].mul_(self.grad)
self.gradInput[1].zero_().add_(-1, self.gradInput[0])
return self.gradInput
def clearState(self):
clear(self, 'diff', 'outExpand', 'grad', 'ones')
return super(PairwiseDistance, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/PairwiseDistance.py | 0.756717 | 0.542439 | PairwiseDistance.py | pypi |
import random
import math
import torch
from .Module import Module
class SpatialFullConvolutionMap(Module):
def __init__(self, conMatrix, kW, kH, dW=1, dH=1):
super(SpatialFullConvolutionMap, self).__init__()
self.kW = kW
self.kH = kH
self.dW = dW
self.dH = dH
self.connTable = conMatrix
self.nInputPlane = int(self.connTable.select(1, 0).max()) + 1
self.nOutputPlane = int(self.connTable.select(1, 1).max()) + 1
self.weight = torch.Tensor(self.connTable.size(0), kH, kW)
self.gradWeight = torch.Tensor(self.connTable.size(0), kH, kW)
self.bias = torch.Tensor(self.nOutputPlane)
self.gradBias = torch.Tensor(self.nOutputPlane)
self.reset()
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
self.weight.uniform_(-stdv, stdv)
self.bias.uniform_(-stdv, stdv)
else:
ninp = torch.Tensor(self.nOutputPlane).zero_()
for i in range(self.connTable.size(0)):
idx = int(self.connTable[i][1])
ninp[idx] += 1
for k in range(self.connTable.size(0)):
idx = int(self.connTable[k][1])
stdv = 1. / math.sqrt(self.kW * self.kH * ninp[idx])
self.weight[k].uniform_(-stdv, stdv)
for k in range(self.bias.size(0)):
stdv = 1. / math.sqrt(self.kW * self.kH * ninp[k])
# TODO: torch.uniform
self.bias[k] = random.uniform(-stdv, stdv)
def updateOutput(self, input):
self._backend.SpatialFullConvolutionMap_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH
)
return self.output
def updateGradInput(self, input, gradOutput):
self._backend.SpatialFullConvolutionMap_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.bias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH
)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self._backend.SpatialFullConvolutionMap_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH,
scale
) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialFullConvolutionMap.py | 0.557364 | 0.290226 | SpatialFullConvolutionMap.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.