code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import torch
from .Criterion import Criterion
class CosineEmbeddingCriterion(Criterion):
def __init__(self, margin=0, sizeAverage=True):
super(CosineEmbeddingCriterion, self).__init__()
self.margin = margin
self.sizeAverage = sizeAverage
self.gradInput = [torch.Tensor(), torch.Tensor()]
self.buffer = None
self.w1 = None
self.w22 = None
self.w = None
self.w32 = None
self._outputs = None
self._idx = None
def updateOutput(self, input, y):
input1, input2 = input[0], input[1]
# keep backward compatibility
if self.buffer is None:
self.buffer = input1.new()
self.w1 = input1.new()
self.w22 = input1.new()
self.w = input1.new()
self.w32 = input1.new()
self._outputs = input1.new()
# comparison operators behave differently from cuda/c implementations
# TODO: verify name
if input1.type() == 'torch.cuda.FloatTensor':
self._idx = torch.cuda.ByteTensor()
else:
self._idx = torch.ByteTensor()
torch.mul(input1, input2, out=self.buffer)
torch.sum(self.buffer, 1, out=self.w1, keepdim=True)
epsilon = 1e-12
torch.mul(input1, input1, out=self.buffer)
torch.sum(self.buffer, 1, out=self.w22, keepdim=True).add_(epsilon)
# self._outputs is also used as a temporary buffer
self._outputs.resize_as_(self.w22).fill_(1)
torch.div(self._outputs, self.w22, out=self.w22)
self.w.resize_as_(self.w22).copy_(self.w22)
torch.mul(input2, input2, out=self.buffer)
torch.sum(self.buffer, 1, out=self.w32, keepdim=True).add_(epsilon)
torch.div(self._outputs, self.w32, out=self.w32)
self.w.mul_(self.w32)
self.w.sqrt_()
torch.mul(self.w1, self.w, out=self._outputs)
self._outputs = self._outputs.select(1, 0)
torch.eq(y, -1, out=self._idx)
self._outputs[self._idx] = self._outputs[self._idx].add_(-self.margin).clamp_(min=0)
torch.eq(y, 1, out=self._idx)
self._outputs[self._idx] = self._outputs[self._idx].mul_(-1).add_(1)
self.output = self._outputs.sum().item()
if self.sizeAverage:
self.output = self.output / y.size(0)
return self.output
def updateGradInput(self, input, y):
v1 = input[0]
v2 = input[1]
gw1 = self.gradInput[0]
gw2 = self.gradInput[1]
gw1.resize_as_(v1).copy_(v2)
gw2.resize_as_(v1).copy_(v1)
torch.mul(self.w1, self.w22, out=self.buffer)
gw1.addcmul_(-1, self.buffer.expand_as(v1), v1)
gw1.mul_(self.w.expand_as(v1))
torch.mul(self.w1, self.w32, out=self.buffer)
gw2.addcmul_(-1, self.buffer.expand_as(v1), v2)
gw2.mul_(self.w.expand_as(v1))
# self._idx = self._outputs <= 0
torch.le(self._outputs, 0, out=self._idx)
self._idx = self._idx.view(-1, 1).expand(gw1.size())
gw1[self._idx] = 0
gw2[self._idx] = 0
torch.eq(y, 1, out=self._idx)
self._idx = self._idx.view(-1, 1).expand(gw2.size())
gw1[self._idx] = gw1[self._idx].mul_(-1)
gw2[self._idx] = gw2[self._idx].mul_(-1)
if self.sizeAverage:
gw1.div_(y.size(0))
gw2.div_(y.size(0))
return self.gradInput
def type(self, type=None, tensorCache=None):
if not type:
return self._type
self._idx = None
super(CosineEmbeddingCriterion, self).type(type, tensorCache)
# comparison operators behave differently from cuda/c implementations
if type == 'torch.cuda.FloatTensor':
self._idx = torch.cuda.ByteTensor()
else:
self._idx = torch.ByteTensor()
return self | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/CosineEmbeddingCriterion.py | 0.656108 | 0.267244 | CosineEmbeddingCriterion.py | pypi |
import torch
from .Module import Module
from .utils import clear
class MaskedSelect(Module):
def __init__(self):
super(MaskedSelect, self).__init__()
self._maskIndices = torch.LongTensor()
self._maskIndexBuffer = torch.LongTensor()
self._maskIndexBufferCPU = torch.FloatTensor()
self._gradBuffer = torch.Tensor()
self._gradMask = torch.ByteTensor()
def updateOutput(self, input):
input, mask = input
torch.masked_select(input, mask, out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
input, mask = input
if input.type() == 'torch.cuda.FloatTensor':
torch.arange(0, mask.nelement(), out=self._maskIndexBufferCPU).resize_(mask.size())
self._maskIndexBuffer.resize_(self._maskIndexBufferCPU.size()).copy_(self._maskIndexBufferCPU)
else:
torch.arange(0, mask.nelement(), out=self._maskIndexBuffer).resize_(mask.size())
torch.masked_select(self._maskIndexBuffer, mask, out=self._maskIndices)
self._gradBuffer.resize_(input.nelement()).zero_()
self._gradBuffer.scatter_(0, self._maskIndices, gradOutput)
self._gradBuffer.resize_(input.size())
self.gradInput = [self._gradBuffer, self._gradMask.resize_(mask.size()).fill_(0)]
return self.gradInput
def type(self, type=None, tensorCache=None):
if type is None:
return self._type
self._gradBuffer = self._gradBuffer.type(type)
self.gradInput = self.gradInput.type(type)
self.output = self.output.type(type)
# These casts apply when switching between cuda/non-cuda types
if type != 'torch.cuda.FloatTensor':
self._maskIndexBuffer = self._maskIndexBuffer.long()
self._maskIndices = self._maskIndices.long()
self._gradMask = self._gradMask.byte()
else:
self._maskIndexBuffer = self._maskIndexBuffer.cuda()
self._maskIndices = self._maskIndices.cuda()
self._gradMask = self._gradMask.cuda()
self._type = type
return self
def clearState(self):
return clear(self, ['output',
'gradInput',
'_maskIndexBuffer',
'_maskIndexBufferCPU',
'_maskIndices',
'_gradBuffer',
'_gradMask']) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/MaskedSelect.py | 0.82029 | 0.357539 | MaskedSelect.py | pypi |
import math
import torch
from .Module import Module
class VolumetricFullConvolution(Module):
def __init__(self, nInputPlane, nOutputPlane,
kT, kW, kH, # kernel size
dT=1, dW=1, dH=1, # stride
padT=0, padW=0, padH=0, # padding
adjT=0, adjW=0, adjH=0): # extra output adjustment
super(VolumetricFullConvolution, self).__init__()
self.nInputPlane = nInputPlane
self.nOutputPlane = nOutputPlane
self.kW = kW
self.kH = kH
self.kT = kT
self.dW = dW
self.dH = dH
self.dT = dT
self.padW = padW
self.padH = padH
self.padT = padT
self.adjW = adjW
self.adjH = adjH
self.adjT = adjT
if self.adjW > self.dW - 1 or self.adjH > self.dH - 1 or self.adjT > self.dT - 1:
raise RuntimeError('adjW, adjH and adjT must be smaller than self.dW - 1, '
' self.dH - 1 and self.dT - 1 respectively')
self.weight = torch.Tensor(nInputPlane, nOutputPlane, kT, kH, kW)
self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kT, kH, kW)
self.bias = torch.Tensor(self.nOutputPlane)
self.gradBias = torch.Tensor(self.nOutputPlane)
self.ones = torch.Tensor()
self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()
self._gradOutput = None
self.reset()
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
nInputPlane = self.nInputPlane
kT = self.kT
kH = self.kH
kW = self.kW
stdv = 1. / math.sqrt(kW * kH * kT * nInputPlane)
self.weight.uniform_(-stdv, stdv)
self.bias.uniform_(-stdv, stdv)
def _makeContiguous(self, input, gradOutput=None):
if not input.is_contiguous():
if self._input is None:
self._input = input.new()
self._input.resize_as_(input).copy_(input)
input = self._input
if gradOutput is not None:
if not gradOutput.is_contiguous():
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
return input, gradOutput
return input
def _calculateAdj(targetSize, ker, pad, stride):
return (targetSize + 2 * pad - ker) % stride
def updateOutput(self, input):
inputTensor = input
adjT, adjW, adjH = self.adjT, self.adjW, self.adjH
# The input can be a table where the second element indicates the target
# output size, in which case the adj factors are computed automatically
if isinstance(input, list):
inputTensor = input[0]
targetTensor = input[1]
tDims = targetTensor.dim()
tT = targetTensor.size(tDims - 3)
tH = targetTensor.size(tDims - 2)
tW = targetTensor.size(tDims - 1)
adjT = self._calculateAdj(tT, self.kT, self.padT, self.dT)
adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
inputTensor = self._makeContiguous(inputTensor)
self._backend.VolumetricFullConvolution_updateOutput(
self._backend.library_state,
inputTensor,
self.output,
self.weight,
self.bias,
self.finput,
self.fgradInput,
self.kT, self.kW, self.kH,
self.dT, self.dW, self.dH,
self.padT, self.padW, self.padH,
adjT, adjW, adjH
)
return self.output
def updateGradInput(self, input, gradOutput):
inputTensor = input
adjT, adjW, adjH = self.adjT, self.adjW, self.adjH
# The input can be a table where the second element indicates the target
# output size, in which case the adj factors are computed automatically
if isinstance(input, list):
inputTensor = input[0]
targetTensor = input[1]
tDims = targetTensor.dim()
tT = targetTensor.size(tDims - 3)
tH = targetTensor.size(tDims - 2)
tW = targetTensor.size(tDims - 1)
adjT = self._calculateAdj(tT, self.kT, self.padT, self.dT)
adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
# Momentarily extract the gradInput tensor
if isinstance(self.gradInput, list):
self.gradInput = self.gradInput[0]
inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)
self._backend.VolumetricFullConvolution_updateGradInput(
self._backend.library_state,
inputTensor,
gradOutput,
self.gradInput,
self.weight,
self.finput,
self.fgradInput,
self.kT, self.kW, self.kH,
self.dT, self.dW, self.dH,
self.padT, self.padW, self.padH,
adjT, adjW, adjH
)
if isinstance(input, list):
# Create a zero tensor to be expanded and used as gradInput[1].
if self.zeroScalar is None:
self.zeroScalar = input[1].new(1).zero_()
self.ones.resize_(input[1].dim()).fill_(1)
zeroTensor = self.zeroScalar.view(self.ones.tolist()).expand_as(input[1])
self.gradInput = [self.gradInput, zeroTensor]
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
inputTensor = input
adjT, adjW, adjH = self.adjT, self.adjW, self.adjH
# The input can be a table where the second element indicates the target
# output size, in which case the adj factors are computed automatically
if isinstance(input, list):
inputTensor = input[0]
targetTensor = input[1]
tDims = targetTensor.dim()
tT = targetTensor.size(tDims - 3)
tH = targetTensor.size(tDims - 2)
tW = targetTensor.size(tDims - 1)
adjT = self._calculateAdj(tT, self.kT, self.padT, self.dT)
adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)
self._backend.VolumetricFullConvolution_accGradParameters(
self._backend.library_state,
inputTensor,
gradOutput,
self.gradWeight,
self.gradBias,
self.finput,
self.fgradInput,
self.kT, self.kW, self.kH,
self.dT, self.dW, self.dH,
self.padT, self.padW, self.padH,
adjT, adjW, adjH,
scale
)
def type(self, type, tensorCache=None):
self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()
return super(VolumetricFullConvolution, self).type(type, tensorCache)
def __repr__(self):
s = super(VolumetricFullConvolution, self).__repr__()
s += '({} -> {}, {}x{}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kT, self.kW, self.kH)
if self.dT != 1 or self.dW != 1 or self.dH != 1 or \
self.padT != 0 or self.padW != 0 or self.padH != 0 or \
self.adjT != 0 or self.adjW != 0 or self.adjH != 0:
s += ', {}, {}, {}'.format(self.dT, self.dW, self.dH)
if self.padT != 0 or self.padW != 0 or self.padH != 0 or \
self.adjT != 0 or self.adjW != 0 or self.adjH != 0:
s += ', {}, {}, {}'.format(self.padT, self.padW, self.padH)
if self.adjT != 0 or self.adjW != 0 or self.adjH != 0:
s += ', {}, {}, {}'.format(self.adjT, self.adjW, self.adjH)
s += ')'
return s | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/VolumetricFullConvolution.py | 0.859029 | 0.407628 | VolumetricFullConvolution.py | pypi |
import math
import torch
from .MSECriterion import MSECriterion
"""
This file implements a criterion for multi-class classification.
It learns an embedding per class, where each class' embedding
is a point on an (N-1)-dimensional simplex, where N is
the number of classes.
For example usage of this class, look at.c/criterion.md
Reference: http.//arxiv.org/abs/1506.08230
"""
class ClassSimplexCriterion(MSECriterion):
def __init__(self, nClasses):
super(ClassSimplexCriterion, self).__init__()
self.nClasses = nClasses
# embedding the simplex in a space of dimension strictly greater than
# the minimum possible (nClasses-1) is critical for effective training.
simp = self._regsplex(nClasses - 1)
self.simplex = torch.cat((simp, torch.zeros(simp.size(0), nClasses - simp.size(1))), 1)
self._target = torch.Tensor(nClasses)
self.output_tensor = None
def _regsplex(self, n):
"""
regsplex returns the coordinates of the vertices of a
regular simplex centered at the origin.
The Euclidean norms of the vectors specifying the vertices are
all equal to 1. The input n is the dimension of the vectors;
the simplex has n+1 vertices.
input:
n # dimension of the vectors specifying the vertices of the simplex
output:
a # tensor dimensioned (n+1, n) whose rows are
vectors specifying the vertices
reference:
http.//en.wikipedia.org/wiki/Simplex#Cartesian_coordinates_for_regular_n-dimensional_simplex_in_Rn
"""
a = torch.zeros(n + 1, n)
for k in range(n):
# determine the last nonzero entry in the vector for the k-th vertex
if k == 0:
a[k][k] = 1
else:
a[k][k] = math.sqrt(1 - a[k:k + 1, 0:k + 1].norm() ** 2)
# fill_ the k-th coordinates for the vectors of the remaining vertices
c = (a[k][k] ** 2 - 1 - 1 / n) / a[k][k]
a[k + 1:n + 2, k:k + 1].fill_(c)
return a
# handle target being both 1D tensor, and
# target being 2D tensor (2D tensor means.nt: anything)
def _transformTarget(self, target):
assert target.dim() == 1
nSamples = target.size(0)
self._target.resize_(nSamples, self.nClasses)
for i in range(nSamples):
self._target[i].copy_(self.simplex[int(target[i])])
def updateOutput(self, input, target):
self._transformTarget(target)
assert input.nelement() == self._target.nelement()
if self.output_tensor is None:
self.output_tensor = input.new(1)
self._backend.MSECriterion_updateOutput(
self._backend.library_state,
input,
self._target,
self.output_tensor,
self.sizeAverage,
True, # reduce
)
self.output = self.output_tensor[0].item()
return self.output
def updateGradInput(self, input, target):
assert input.nelement() == self._target.nelement()
implicit_gradOutput = torch.Tensor([1]).type(input.type())
self._backend.MSECriterion_updateGradInput(
self._backend.library_state,
input,
self._target,
implicit_gradOutput,
self.gradInput,
self.sizeAverage,
True, # reduce
)
return self.gradInput
def getPredictions(self, input):
return torch.mm(input, self.simplex.t())
def getTopPrediction(self, input):
prod = self.getPredictions(input)
_, maxs = prod.max(prod.ndimension() - 1)
return maxs.view(-1) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/ClassSimplexCriterion.py | 0.905771 | 0.62088 | ClassSimplexCriterion.py | pypi |
import torch
from .Module import Module
from .utils import clear
class SpatialMaxPooling(Module):
def __init__(self, kW, kH, dW=None, dH=None, padW=0, padH=0):
super(SpatialMaxPooling, self).__init__()
dW = dW or kW
dH = dH or kH
self.kW = kW
self.kH = kH
self.dW = dW
self.dH = dH
self.padW = padW
self.padH = padH
self.ceil_mode = False
self.indices = torch.LongTensor()
def ceil(self):
self.ceil_mode = True
return self
def floor(self):
self.ceil_mode = False
return self
def updateOutput(self, input):
if not hasattr(self, 'indices') or self.indices is None:
self.indices = input.new()
self.indices = self.indices.long()
dims = input.dim()
self.iheight = input.size(dims - 2)
self.iwidth = input.size(dims - 1)
self._backend.SpatialMaxPooling_updateOutput(
self._backend.library_state,
input,
self.output,
self.indices,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
self.ceil_mode
)
return self.output
def updateGradInput(self, input, gradOutput):
self._backend.SpatialMaxPooling_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.indices,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
self.ceil_mode
)
return self.gradInput
def __repr__(self):
s = super(SpatialMaxPooling, self).__repr__()
s += '({}x{}, {}, {}'.format(self.kW, self.kH, self.dW, self.dH)
if (self.padW or self.padH) and (self.padW != 0 or self.padH != 0):
s += ', {}, {}'.format(self.padW, self.padH)
s += ')'
return s
def clearState(self):
clear(self, 'indices')
return super(SpatialMaxPooling, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialMaxPooling.py | 0.773858 | 0.158891 | SpatialMaxPooling.py | pypi |
import math
import torch
from .Module import Module
from .utils import clear
class Linear(Module):
def __init__(self, inputSize, outputSize, bias=True):
super(Linear, self).__init__()
self.weight = torch.Tensor(outputSize, inputSize)
self.gradWeight = torch.Tensor(outputSize, inputSize)
self.bias = torch.Tensor(outputSize) if bias else None
self.gradBias = torch.Tensor(outputSize) if bias else None
self.reset()
self.addBuffer = None
def noBias(self):
self.bias = None
self.gradBias = None
return self
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.uniform_(-stdv, stdv)
return self
def _updateAddBuffer(self, input):
nframe = input.size(0)
if self.addBuffer is None:
self.addBuffer = input.new()
if self.addBuffer.nelement() != nframe:
self.addBuffer.resize_(nframe).fill_(1)
def updateOutput(self, input):
assert input.dim() == 2
nframe = input.size(0)
nelement = self.output.nelement()
self.output.resize_(nframe, self.weight.size(0))
if self.output.nelement() != nelement:
self.output.zero_()
self._updateAddBuffer(input)
self.output.addmm_(0, 1, input, self.weight.t())
if self.bias is not None:
self.output.addr_(self.addBuffer, self.bias)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
nelement = self.gradInput.nelement()
self.gradInput.resize_as_(input)
if self.gradInput.nelement() != nelement:
self.gradInput.zero_()
assert input.dim() == 2
self.gradInput.addmm_(0, 1, gradOutput, self.weight)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
assert input.dim() == 2
self.gradWeight.addmm_(scale, gradOutput.t(), input)
if self.bias is not None:
# update the size of addBuffer if the input is not the same size as the one we had in last updateGradInput
self._updateAddBuffer(input)
self.gradBias.addmv_(scale, gradOutput.t(), self.addBuffer)
def clearState(self):
clear(self, 'addBuffer')
return super(Linear, self).clearState()
def __repr__(self):
return super(Linear, self).__repr__() + \
'({} -> {})'.format(self.weight.size(1), self.weight.size(0)) + \
(' without bias' if self.bias is None else '') | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/Linear.py | 0.842183 | 0.452234 | Linear.py | pypi |
import torch
from .SpatialConvolution import SpatialConvolution
class SpatialDilatedConvolution(SpatialConvolution):
def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None, dilationH=1, dilationW=None):
super(SpatialDilatedConvolution, self).__init__(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH)
self.dilationH = dilationH
self.dilationW = dilationW if dilationW is not None else dilationH
def updateOutput(self, input):
if self.finput is None:
self.finput = self.weight.new()
if self.fgradInput is None:
self.fgradInput = self.weight.new()
input = self._makeContiguous(input)
self._backend.SpatialDilatedConvolution_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
self.dilationH, self.dilationW
)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
input, gradOutput = self._makeContiguous(input, gradOutput)
if self.fgradInput is None:
self.fgradInput = self.weight.new()
self._backend.SpatialDilatedConvolution_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.finput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
self.dilationH, self.dilationW
)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
input, gradOutput = self._makeContiguous(input, gradOutput)
if self.fgradInput is None:
self.fgradInput = self.weight.new()
self._backend.SpatialDilatedConvolution_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.finput,
self.fgradInput,
self.kW, self.kH,
self.dW, self.dH,
self.padW, self.padH,
self.dilationH, self.dilationW,
scale
)
def __repr__(self):
s = super(SpatialConvolution, self).__repr__()
s += '({} -> {}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kW, self.kH)
if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0:
s += ', {}, {}'.format(self.dW, self.dH)
if self.padW != 0 or self.padH != 0:
s += ', {}, {}'.format(self.padW, self.padH)
s += ', {}, {}'.format(self.dilationW, self.dilationH)
s += ')'
if self.bias is None:
s += ' without bias'
return s | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialDilatedConvolution.py | 0.750278 | 0.252131 | SpatialDilatedConvolution.py | pypi |
import math
import torch
from .Module import Module
from .utils import clear
class VolumetricConvolution(Module):
def __init__(self, nInputPlane, nOutputPlane, kT, kW, kH, dT=1, dW=1, dH=1, padT=0, padW=None, padH=None):
super(VolumetricConvolution, self).__init__()
self.nInputPlane = nInputPlane
self.nOutputPlane = nOutputPlane
self.kT = kT
self.kW = kW
self.kH = kH
self.dT = dT
self.dW = dW
self.dH = dH
self.padT = padT
self.padW = padW if padW is not None else self.padT
self.padH = padH if padH is not None else self.padW
self.weight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW)
self.bias = torch.Tensor(nOutputPlane)
self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW)
self.gradBias = torch.Tensor(nOutputPlane)
self.reset()
self.finput = None
self.fgradInput = None
self._gradOutput = None
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.kT * self.kW * self.kH * self.nInputPlane)
self.weight.uniform_(-stdv, stdv)
self.bias.uniform_(-stdv, stdv)
def _makeContiguous(self, input, gradOutput=None):
if not input.is_contiguous():
if self._input is None:
self._input = input.new()
self._input.resize_as_(input).copy_(input)
input = self._input
if gradOutput is not None:
if not gradOutput.is_contiguous():
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
return input, gradOutput
return input
# function to re-view the weight layout in a way that would make the MM ops happy
def _viewWeight(self):
self.weight = self.weight.view(self.nOutputPlane, self.nInputPlane * self.kT * self.kH * self.kW)
if self.gradWeight is not None and self.gradWeight.dim() > 0:
self.gradWeight = self.gradWeight.view(self.nOutputPlane, self.nInputPlane * self.kT * self.kH * self.kW)
def _unviewWeight(self):
self.weight = self.weight.view(self.nOutputPlane, self.nInputPlane, self.kT, self.kH, self.kW)
if self.gradWeight is not None and self.gradWeight.dim() > 0:
self.gradWeight = self.gradWeight.view(self.nOutputPlane, self.nInputPlane, self.kT, self.kH, self.kW)
def updateOutput(self, input):
if self.finput is None:
self.finput = input.new()
if self.fgradInput is None:
self.fgradInput = input.new()
if input.type() == 'torch.cuda.FloatTensor':
self._backend.VolumetricConvolution_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.finput,
self.fgradInput,
self.dT, self.dW, self.dH,
self.padT, self.padW, self.padH
)
else:
self._viewWeight()
input = self._makeContiguous(input)
self._backend.VolumetricConvolutionMM_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.finput,
self.fgradInput,
self.kT, self.kW, self.kH,
self.dT, self.dW, self.dH,
self.padT, self.padW, self.padH
)
self._unviewWeight()
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
if input.type() == 'torch.cuda.FloatTensor':
self._backend.VolumetricConvolution_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.finput,
self.dT, self.dW, self.dH,
self.padT, self.padW, self.padH
)
else:
self._viewWeight()
input, gradOutput = self._makeContiguous(input, gradOutput)
self._backend.VolumetricConvolutionMM_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.finput,
self.fgradInput,
self.kT, self.kW, self.kH,
self.dT, self.dW, self.dH,
self.padT, self.padW, self.padH
)
self._unviewWeight()
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
if input.type() == 'torch.cuda.FloatTensor':
self._backend.VolumetricConvolution_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.finput,
self.fgradInput,
self.dT, self.dW, self.dH,
self.padT, self.padW, self.padH,
scale
)
else:
input, gradOutput = self._makeContiguous(input, gradOutput)
self._viewWeight()
self._backend.VolumetricConvolutionMM_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.finput,
self.fgradInput,
self.kT, self.kW, self.kH,
self.dT, self.dW, self.dH,
self.padT, self.padW, self.padH,
scale
)
self._unviewWeight()
def type(self, type, tensorCache=None):
clear(self, 'finput', 'fgradInput')
return super(VolumetricConvolution, self).type(type, tensorCache)
def clearState(self, ):
clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')
return super(VolumetricConvolution, self).clearState()
def __repr__(self):
s = super(VolumetricConvolution, self).__repr__()
s += '({} -> {}, {}x{}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kT, self.kW, self.kH)
if self.dT != 1 or self.dW != 1 or self.dH != 1 or \
self.padT != 0 or self.padW != 0 or self.padH != 0:
s += ', {}, {}, {}'.format(self.dT, self.dW, self.dH)
if self.padT != 0 or self.padW != 0 or self.padH != 0:
s += ', {}, {}, {}'.format(self.padT, self.padW, self.padH)
s += ')'
return s | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/VolumetricConvolution.py | 0.729134 | 0.19199 | VolumetricConvolution.py | pypi |
import math
import torch
from .Module import Module
from .utils import clear, contiguousView
class CMul(Module):
def __init__(self, *args):
super(CMul, self).__init__()
if len(args) == 1 and isinstance(args[0], torch.Size):
self.size = args[0]
else:
self.size = torch.Size(args)
self.weight = torch.Tensor(self.size)
self.gradWeight = torch.Tensor(self.size)
self.output.resize_(self.size)
self.reset()
self._output = None
self._weight = None
self._expand = None
self._repeat = None
self._gradOutput = None
self._gradInput = None
self._input = None
self._gradWeight = None
self._sum = None
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.weight.nelement())
self.weight.uniform_(-stdv, stdv)
def updateOutput(self, input):
# lazy-initialize
if self._output is None:
self._output = input.new()
self._weight = input.new()
self._expand = input.new()
self._repeat = input.new()
self.output.resize_as_(input).copy_(input)
batchSize = input.size(0)
# TODO: expand_as_, view_
self._output = self.output.view(batchSize, -1)
self._weight = self.weight.view(1, -1)
self._expand = self._weight.expand_as(self._output)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._output.mul_(self._repeat)
else:
self._output.mul_(self._expand)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
if self._gradOutput is None:
self._gradOutput = input.new()
self._gradInput = input.new()
self.gradInput.resize_as_(input).zero_()
batchSize = input.size(0)
contiguousView(self._gradOutput, gradOutput, batchSize, -1)
contiguousView(self._gradInput, self.gradInput, batchSize, -1)
self._weight = self.weight.view(1, -1)
self._expand = self._weight.expand_as(self._gradOutput)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
else:
self._gradInput.addcmul_(1, self._expand, self._gradOutput)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
if self._input is None:
self._input = input.new()
self._gradWeight = input.new()
self._sum = input.new()
batchSize = input.size(0)
contiguousView(self._input, input, batchSize, -1)
contiguousView(self._gradOutput, gradOutput, batchSize, -1)
self._gradWeight = self.gradWeight.view(1, -1)
torch.mul(self._input, self._gradOutput, out=self._repeat)
torch.sum(self._repeat, 0, True, out=self._sum)
self._gradWeight.add_(scale, self._sum)
def type(self, type=None, tensorCache=None):
if type:
self.clearState()
return super(CMul, self).type(type, tensorCache)
def clearState(self):
clear(self, [
'_input',
'_output',
'_weight',
'_gradWeight',
'_expand',
'_repeat',
'_sum',
])
return super(CMul, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/CMul.py | 0.651133 | 0.230065 | CMul.py | pypi |
import torch
from .Container import Container
class Parallel(Container):
def __init__(self, inputDimension, outputDimension):
super(Parallel, self).__init__()
self.inputDimension = inputDimension
self.outputDimension = outputDimension
self.totalOutputSize = None
def updateOutput(self, input):
nModule = input.size(self.inputDimension)
outputs = []
for i in range(nModule):
currentInput = input.select(self.inputDimension, i)
currentOutput = self.modules[i].updateOutput(currentInput)
outputs.append(currentOutput)
outputSize = currentOutput.size(self.outputDimension)
if i == 0:
totalOutputSize = list(currentOutput.size())
else:
totalOutputSize[self.outputDimension] += outputSize
self.totalOutputSize = torch.Size(totalOutputSize)
self.output.resize_(self.totalOutputSize)
offset = 0
for i in range(nModule):
currentOutput = outputs[i]
outputSize = currentOutput.size(self.outputDimension)
self.output.narrow(self.outputDimension, offset, outputSize).copy_(currentOutput)
offset = offset + currentOutput.size(self.outputDimension)
return self.output
def updateGradInput(self, input, gradOutput):
nModule = input.size(self.inputDimension)
self.gradInput.resize_as_(input)
offset = 0
for i in range(nModule):
module = self.modules[i]
currentInput = input.select(self.inputDimension, i)
currentOutput = module.output
outputSize = currentOutput.size(self.outputDimension)
currentGradOutput = gradOutput.narrow(self.outputDimension, offset, outputSize)
currentGradInput = module.updateGradInput(currentInput, currentGradOutput)
self.gradInput.select(self.inputDimension, i).copy_(currentGradInput)
offset = offset + outputSize
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
nModule = input.size(self.inputDimension)
offset = 0
for i in range(nModule):
module = self.modules[i]
currentOutput = module.output
outputSize = currentOutput.size(self.outputDimension)
module.accGradParameters(
input.select(self.inputDimension, i),
gradOutput.narrow(self.outputDimension, offset, outputSize),
scale)
offset += outputSize
def accUpdateGradParameters(self, input, gradOutput, lr):
nModule = input.size(self.inputDimension)
offset = 0
for i in range(nModule):
module = self.modules[i]
currentOutput = module.output
module.accupdateGradParameters(
input.select(self.inputDimension, i),
gradOutput.narrow(self.outputDimension, offset, currentOutput.size(self.outputDimension)),
lr)
offset = offset + currentOutput.size(self.outputDimension)
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' ... -> '
res = torch.typename(self)
res += ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res += line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)
res += line + tab + last + 'output'
res += line + '}'
return res | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/Parallel.py | 0.656768 | 0.176867 | Parallel.py | pypi |
import torch
from .Module import Module
from .utils import clear
class CosineDistance(Module):
def __init__(self, ):
super(CosineDistance, self).__init__()
self.gradInput = [torch.Tensor(), torch.Tensor()]
self._input1 = None
self._input2 = None
self.buffer = None
self.w1 = None
self.w22 = None
self.w = None
self.w32 = None
self.ones = None
def _makeContiguous(self, input1, input2):
if not input1.is_contiguous():
if self._input1 is None:
self._input1 = input1.new()
self._input1.resize_as_(input1).copy_(input1)
input1 = self._input1
if not input2.is_contiguous():
if self._input2 is None:
self._input2 = input2.new()
self._input2.resize_as_(input2).copy_(input2)
input2 = self._input2
return input1, input2
def updateOutput(self, input):
input1, input2 = input[0], input[1]
input1, input2 = self._makeContiguous(input1, input2)
if self.buffer is None:
self.buffer = input1.new()
self.w1 = input1.new()
self.w22 = input1.new()
self.w = input1.new()
self.w32 = input1.new()
self.ones = input1.new()
torch.mul(input1, input2, out=self.buffer)
torch.sum(self.buffer, 1, out=self.w1, keepdim=True)
epsilon = 1e-12
torch.mul(input1, input1, out=self.buffer)
torch.sum(self.buffer, 1, out=self.w22, keepdim=True).add_(epsilon)
self.w22.reciprocal_()
self.w.resize_as_(self.w22).copy_(self.w22)
torch.mul(input2, input2, out=self.buffer)
torch.sum(self.buffer, 1, out=self.w32, keepdim=True).add_(epsilon)
self.w32.reciprocal_()
self.w.mul_(self.w32)
self.w.sqrt_()
torch.mul(self.w1, self.w, out=self.output)
self.output.resize_(input1.size(0))
return self.output
def updateGradInput(self, input, gradOutput):
v1 = input[0]
v2 = input[1]
v1, v2 = self._makeContiguous(v1, v2)
if len(self.gradInput) != 2:
if self.gradInput[0] is None:
self.gradInput[0] = v1.new()
if self.gradInput[1] is None:
self.gradInput[1] = v1.new()
self.gradInput = self.gradInput[:2]
gw1 = self.gradInput[0]
gw2 = self.gradInput[1]
gw1.resize_as_(v1).copy_(v2)
gw2.resize_as_(v1).copy_(v1)
torch.mul(self.w1, self.w22, out=self.buffer)
gw1.addcmul_(-1, self.buffer.expand_as(v1), v1)
gw1.mul_(self.w.expand_as(v1))
torch.mul(self.w1, self.w32, out=self.buffer)
gw2.addcmul_(-1, self.buffer.expand_as(v1), v2)
gw2.mul_(self.w.expand_as(v1))
go = gradOutput.contiguous().view(-1, 1).expand_as(v1)
gw1.mul_(go)
gw2.mul_(go)
return self.gradInput
def clearState(self):
clear(self, [
'buffer',
'w1',
'w22',
'w',
'w32',
'ones',
])
return super(CosineDistance, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/CosineDistance.py | 0.756088 | 0.309206 | CosineDistance.py | pypi |
import torch
from .Container import Container
class Sequential(Container):
def __len__(self):
return len(self.modules)
def add(self, module):
if len(self.modules) == 0:
self.gradInput = module.gradInput
self.modules.append(module)
self.output = module.output
return self
def insert(self, module, index):
self.modules.insert(module, index)
self.output = self.modules[-1].output
self.gradInput = self.modules[0].gradInput
def remove(self, index=-1):
del self.modules[index]
if len(self.modules) > 0:
self.output = self.modules[-1].output
self.gradInput = self.modules[0].gradInput
else:
self.output = torch.Tensor()
self.gradInput = torch.Tensor()
def updateOutput(self, input):
currentOutput = input
for i, module in enumerate(self.modules):
currentOutput = module.updateOutput(currentOutput)
self.output = currentOutput
return self.output
def _iter_with_prev(self):
return zip(self.modules[-2::-1], self.modules[-1:0:-1])
def updateGradInput(self, input, gradOutput):
currentGradOutput = gradOutput
for prev, current in self._iter_with_prev():
currentGradOutput = current.updateGradInput(prev.output, currentGradOutput)
self.gradInput = self.modules[0].updateGradInput(input, currentGradOutput)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
currentGradOutput = gradOutput
for prev, current in self._iter_with_prev():
current.accGradParameters(prev.output, currentGradOutput, scale)
currentGradOutput = current.gradInput
self.modules[0].accGradParameters(input, currentGradOutput, scale)
def backward(self, input, gradOutput, scale=1):
currentGradOutput = gradOutput
for prev, current in self._iter_with_prev():
currentGradOutput = current.backward(prev.output, currentGradOutput, scale)
# currentModule.gradInput = currentGradOutput
self.gradInput = self.modules[0].backward(input, currentGradOutput, scale)
return self.gradInput
def accUpdateGradParameters(self, input, gradOutput, lr):
currentGradOutput = gradOutput
for prev, current in self._iter_with_prev():
current.accUpdateGradParameters(prev.output, currentGradOutput, lr)
currentGradOutput = current.gradInput
self.modules[0].accUpdateGradParameters(input, currentGradOutput, lr)
def __repr__(self):
tab = ' '
line = '\n'
next = ' -> '
res = 'nn.Sequential'
res = res + ' {' + line + tab + '[input'
for i in range(len(self.modules)):
res = res + next + '(' + str(i) + ')'
res = res + next + 'output]'
for i in range(len(self.modules)):
res = res + line + tab + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab)
res = res + line + '}'
return res | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/Sequential.py | 0.586878 | 0.167117 | Sequential.py | pypi |
import torch
from .Module import Module
from .utils import clear
class LookupTable(Module):
def __init__(self, nIndex, nOutput, paddingValue=-1, maxNorm=None, normType=None):
super(LookupTable, self).__init__()
self.weight = torch.Tensor(nIndex, nOutput)
self.gradWeight = torch.Tensor(nIndex, nOutput).zero_()
self.paddingValue = paddingValue
self.maxNorm = maxNorm
self.normType = normType
self.shouldScaleGradByFreq = False
self._gradOutput = None
self._sorted = None
self._indices = None
self._count = torch.IntTensor()
self._input = torch.LongTensor()
self.reset()
def accUpdateOnly(self):
self.gradWeight = None
return self
def setPadding(self, paddingValue):
self.paddingValue = paddingValue
return self
def setMaxNorm(self, maxNorm):
self.maxNorm = maxNorm
return self
def setNormType(self, normType):
self.normType = normType
return self
def scaleGradByFreq(self):
self.shouldScaleGradByFreq = True
return self
def reset(self, stdv=1):
self.weight.normal_(0, stdv)
def _makeInputContiguous(self, input):
# make sure input is a contiguous torch.LongTensor
if not input.is_contiguous() or input.type() != self._input.type():
self.copiedInput = True
self._input.resize_(input.size()).copy_(input)
return self._input
else:
self.copiedInput = False
return input
def updateOutput(self, input):
self.renorm(input)
input = self._makeInputContiguous(input)
if input.dim() == 1:
torch.index_select(self.weight, 0, input, out=self.output)
elif input.dim() == 2:
torch.index_select(self.weight, 0, input.view(-1), out=self.output)
self.output = self.output.view(input.size(0), input.size(1), self.weight.size(1))
else:
raise RuntimeError("input must be a vector or matrix")
return self.output
def updateGradInput(self, input, gradOutput):
# the input can be of any type (as in the forward it's
# converted anyway to LongTensor) thus, need to allocate
# new memory each time the user changes the input type
if self.gradInput.type() != input.type():
self.gradInput = input.new()
if not self.gradInput.is_same_size(input):
self.gradInput.resize_as_(input).zero_()
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
input = self._input if self.copiedInput else input
if input.dim() == 2:
input = input.view(-1)
elif input.dim() != 1:
raise RuntimeError("input must be a vector or matrix")
if not gradOutput.is_contiguous():
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
self._backend.LookupTable_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self._count,
self._sorted,
self._indices,
self.shouldScaleGradByFreq,
self.paddingValue or 0,
scale
)
def renorm(self, input):
if self.maxNorm is None:
return
# copy input into _input, so _input is continuous.
# The copied _input will be modified in the C code.
self._input.resize_(input.size()).copy_(input)
row_idx = self._input
if row_idx.dim() == 2:
row_idx = row_idx.view(-1)
elif row_idx.dim() != 1:
raise RuntimeError("input must be a vector or matrix")
# "row_idx" and "weight" will be modified in the C code
self._backend.LookupTable_renorm(
self._backend.library_state,
row_idx,
self.weight,
self.maxNorm,
self.normType or 2
)
def type(self, type=None, tensorCache=None):
if type is None:
return self._type
super(LookupTable, self).type(type, tensorCache)
if type == 'torch.cuda.FloatTensor':
# CUDA uses _sorted and _indices temporary tensors
self._sorted = torch.cuda.LongTensor()
self._indices = torch.cuda.LongTensor()
self._count = torch.cuda.LongTensor()
self._input = torch.cuda.LongTensor()
else:
# self._count and self._input should only be converted if using Cuda
self._count = torch.IntTensor()
self._input = torch.LongTensor()
return self
def clearState(self):
clear(self, '_count', '_input', '_sorted', '_indices', '_gradOutput')
return super(LookupTable, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/LookupTable.py | 0.837387 | 0.407687 | LookupTable.py | pypi |
import math
import torch
from .Module import Module
from .utils import clear
class Bilinear(Module):
def _assertInput(self, input):
if len(input) != 2 or not isinstance(input[0], torch.Tensor) or not isinstance(input[1], torch.Tensor):
raise RuntimeError('input should be a table containing two data Tensors')
if input[0].ndimension() != 2 or input[1].ndimension() != 2:
raise RuntimeError('input Tensors should be two-dimensional')
if input[0].size(0) != input[1].size(0):
raise RuntimeError('input Tensors should have the same number of rows')
if input[0].size(1) != self.weight.size(1):
raise RuntimeError('dimensionality of first input is erroneous')
if input[1].size(1) != self.weight.size(2):
raise RuntimeError('dimensionality of second input is erroneous')
def _assertInputGradOutput(self, input, gradOutput):
if input[0].size(0) != gradOutput.size(0):
raise RuntimeError('number of rows in gradOutput.es not match input')
if gradOutput.size(1) != self.weight.size(0):
raise RuntimeError('number of columns in gradOutput does not match layer\'s output size')
def __init__(self, inputSize1, inputSize2, outputSize, bias=True):
# set up model:
super(Bilinear, self).__init__()
self.weight = torch.Tensor(outputSize, inputSize1, inputSize2)
self.gradWeight = torch.Tensor(outputSize, inputSize1, inputSize2)
if bias:
self.bias = torch.Tensor(outputSize)
self.gradBias = torch.Tensor(outputSize)
else:
self.bias = None
self.gradBias = None
self.buff1 = None
self.buff2 = None
self.gradInput = [torch.Tensor(), torch.Tensor()]
self.reset()
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.uniform_(-stdv, stdv)
return self
def updateOutput(self, input):
self._assertInput(input)
# set up buffer:
if self.buff2 is None:
self.buff2 = input[0].new()
self.buff2.resize_as_(input[1])
# compute output scores:
self.output.resize_(input[0].size(0), self.weight.size(0))
for k in range(self.weight.size(0)):
torch.mm(input[0], self.weight[k], out=self.buff2)
self.buff2.mul_(input[1])
torch.sum(self.buff2, 1, True, out=self.output.narrow(1, k, 1))
if self.bias is not None:
self.output.add_(self.bias.view(1, self.bias.nelement()).expand_as(self.output))
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
self._assertInputGradOutput(input, gradOutput)
# compute d output / d input:
self.gradInput[0].resize_as_(input[0]).fill_(0)
self.gradInput[1].resize_as_(input[1]).fill_(0)
#: first slice of weight tensor (k = 1)
self.gradInput[0].addmm_(input[1], self.weight[0].t())
self.gradInput[0].mul_(gradOutput.narrow(1, 0, 1).expand(self.gradInput[0].size(0),
self.gradInput[0].size(1)))
self.gradInput[1].addmm_(input[0], self.weight[0])
self.gradInput[1].mul_(gradOutput.narrow(1, 0, 1).expand(self.gradInput[1].size(0),
self.gradInput[1].size(1)))
#: remaining slices of weight tensor
if self.weight.size(0) > 1:
if self.buff1 is None:
self.buff1 = input[0].new()
self.buff1.resize_as_(input[0])
for k in range(1, self.weight.size(0)):
torch.mm(input[1], self.weight[k].t(), out=self.buff1)
self.buff1.mul_(gradOutput.narrow(1, k, 1).expand(self.gradInput[0].size(0),
self.gradInput[0].size(1)))
self.gradInput[0].add_(self.buff1)
torch.mm(input[0], self.weight[k], out=self.buff2)
self.buff2.mul_(gradOutput.narrow(1, k, 1).expand(self.gradInput[1].size(0),
self.gradInput[1].size(1)))
self.gradInput[1].add_(self.buff2)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self._assertInputGradOutput(input, gradOutput)
# make sure we have buffer:
if self.buff1 is None:
self.buff1 = input[0].new()
self.buff1.resize_as_(input[0])
# accumulate parameter gradients:
for k in range(self.weight.size(0)):
torch.mul(input[0], gradOutput.narrow(1, k, 1).expand_as(input[0]), out=self.buff1)
self.gradWeight[k].addmm_(self.buff1.t(), input[1])
if self.bias is not None:
self.gradBias.add_(scale, gradOutput.sum(0, keepdim=False))
def __repr__(self):
return str(type(self)) + \
'({}x{} -> {}) {}'.format(
self.weight.size(1), self.weight.size(2), self.weight.size(0),
(' without bias' if self.bias is None else '')
)
def clearState(self):
clear(self, 'buff1', 'buff2')
return super(Bilinear, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/Bilinear.py | 0.761272 | 0.582194 | Bilinear.py | pypi |
import torch
from .Module import Module
class Padding(Module):
# pad puts in [pad] amount of [value] over dimension [dim], starting at
# index [index] in that dimension. If pad<0, index counts from the left.
# If pad>0 index counts from the right index = 1 pads before index 1.
# index = 2 pads starting before index 2 and after index 1 in dimension [dim]
# When nInputDim is provided, inputs larger than that value will be considered batches
# where the actual dim to be padded will be dimension dim + 1.
def __init__(self, dim, pad, value=0, index=0, nInputDim=0):
self.value = value
self.index = index
self.dim = dim
self.pad = pad
self.nInputDim = nInputDim
self.outputSize = torch.Size()
super(Padding, self).__init__()
def updateOutput(self, input):
dim = self.dim
if hasattr(self, "nInputDim") and self.nInputDim > 0 and input.dim() != self.nInputDim:
dim = dim + 1
outputSize = list(input.size())
outputSize[dim] += abs(self.pad)
self.outputSize = torch.Size(outputSize)
self.output.resize_(self.outputSize)
self.output.fill_(self.value)
index = self.index
pad = self.pad
if pad > 0:
index = input.size(dim) - index
else:
pad = -pad
if index == 0:
self.output.narrow(dim, pad, input.size(dim)).copy_(input)
elif index == input.size(dim):
self.output.narrow(dim, 0, input.size(dim)).copy_(input)
else:
self.output.narrow(dim, 0, index).copy_(input.narrow(dim, 0, index))
self.output.narrow(dim, index + pad, input.size(dim) -
index).copy_(input.narrow(dim, index, input.size(dim) - index))
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput.resize_as_(input)
dim = self.dim
if hasattr(self, "nInputDim") and self.nInputDim > 0 and input.dim() != self.nInputDim:
dim = dim + 1
index = self.index
pad = self.pad
if pad > 0:
index = input.size(dim) - index
else:
pad = -pad
if index == 0:
self.gradInput.copy_(gradOutput.narrow(dim, pad, input.size(dim)))
elif index == input.size(dim):
self.gradInput.copy_(gradOutput.narrow(dim, 0, input.size(dim)))
else:
self.gradInput.narrow(dim, 0, index).copy_(gradOutput.narrow(dim, 0, index))
self.gradInput.narrow(dim, index, input.size(
dim) - index).copy_(gradOutput.narrow(dim, index + pad, input.size(dim) - index))
return self.gradInput | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/Padding.py | 0.751192 | 0.697016 | Padding.py | pypi |
import math
import torch
from .Module import Module
from .Sequential import Sequential
from .SpatialZeroPadding import SpatialZeroPadding
from .SpatialConvolution import SpatialConvolution
from .SpatialConvolutionMap import SpatialConvolutionMap
from .Replicate import Replicate
from .Square import Square
from .Sqrt import Sqrt
from .CDivTable import CDivTable
from .Threshold import Threshold
from .utils import clear
class SpatialDivisiveNormalization(Module):
def __init__(self, nInputPlane=1, kernel=None, threshold=1e-4, thresval=None):
super(SpatialDivisiveNormalization, self).__init__()
# get args
self.nInputPlane = nInputPlane
if kernel is None:
kernel = torch.Tensor(9, 9).fill_(1)
self.kernel = kernel
self.threshold = threshold
self.thresval = thresval if thresval is not None else threshold
kdim = self.kernel.ndimension()
# check args
if kdim != 2 and kdim != 1:
raise ValueError('SpatialDivisiveNormalization averaging kernel must be 2D or 1D')
if (self.kernel.size(0) % 2) == 0 or (kdim == 2 and (self.kernel.size(1) % 2) == 0):
raise ValueError('SpatialDivisiveNormalization averaging kernel must have ODD dimensions')
# padding values
padH = int(math.floor(self.kernel.size(0) / 2))
padW = padH
if kdim == 2:
padW = int(math.floor(self.kernel.size(1) / 2))
# create convolutional mean estimator
self.meanestimator = Sequential()
self.meanestimator.add(SpatialZeroPadding(padW, padW, padH, padH))
if kdim == 2:
self.meanestimator.add(SpatialConvolution(self.nInputPlane, 1, self.kernel.size(1), self.kernel.size(0)))
else:
self.meanestimator.add(SpatialConvolutionMap(
SpatialConvolutionMap.maps.oneToOne(self.nInputPlane), self.kernel.size(0), 1))
self.meanestimator.add(SpatialConvolution(self.nInputPlane, 1, 1, self.kernel.size(0)))
self.meanestimator.add(Replicate(self.nInputPlane, 1))
# create convolutional std estimator
self.stdestimator = Sequential()
self.stdestimator.add(Square())
self.stdestimator.add(SpatialZeroPadding(padW, padW, padH, padH))
if kdim == 2:
self.stdestimator.add(SpatialConvolution(self.nInputPlane, 1, self.kernel.size(1), self.kernel.size(0)))
else:
self.stdestimator.add(SpatialConvolutionMap(
SpatialConvolutionMap.maps.oneToOne(self.nInputPlane), self.kernel.size(0), 1))
self.stdestimator.add(SpatialConvolution(self.nInputPlane, 1, 1, self.kernel.size(0)))
self.stdestimator.add(Replicate(self.nInputPlane, 1))
self.stdestimator.add(Sqrt())
# set kernel and bias
if kdim == 2:
self.kernel.div_(self.kernel.sum() * self.nInputPlane)
for i in range(self.nInputPlane):
self.meanestimator.modules[1].weight[0][i] = self.kernel
self.stdestimator.modules[2].weight[0][i] = self.kernel
self.meanestimator.modules[1].bias.zero_()
self.stdestimator.modules[2].bias.zero_()
else:
self.kernel.div_(self.kernel.sum() * math.sqrt(self.nInputPlane))
for i in range(self.nInputPlane):
self.meanestimator.modules[1].weight[i].copy_(self.kernel)
self.meanestimator.modules[2].weight[0][i].copy_(self.kernel)
self.stdestimator.modules[2].weight[i].copy_(self.kernel)
self.stdestimator.modules[3].weight[0][i].copy_(self.kernel)
self.meanestimator.modules[1].bias.zero_()
self.meanestimator.modules[2].bias.zero_()
self.stdestimator.modules[2].bias.zero_()
self.stdestimator.modules[3].bias.zero_()
# other operation
self.normalizer = CDivTable()
self.divider = CDivTable()
self.thresholder = Threshold(self.threshold, self.thresval)
# coefficient array, to adjust side effects
self.coef = torch.Tensor(1, 1, 1)
self.ones = None
self._coef = None
def updateOutput(self, input):
self.localstds = self.stdestimator.updateOutput(input)
# compute side coefficients
dim = input.dim()
if (self.localstds.dim() != self.coef.dim() or
(input.size(dim - 1) != self.coef.size(dim - 1)) or
(input.size(dim - 2) != self.coef.size(dim - 2))):
if self.ones is None:
self.ones = input.new()
self.ones.resize_as_(input[0:1]).fill_(1)
coef = self.meanestimator.updateOutput(self.ones).squeeze(0)
if self._coef is None:
self._coef = input.new()
self._coef.resize_as_(coef).copy_(coef) # make contiguous for view
self.coef = self._coef.view(1, *self._coef.size()).expand_as(self.localstds)
# normalize std dev
self.adjustedstds = self.divider.updateOutput([self.localstds, self.coef.contiguous().view_as(self.localstds)])
self.thresholdedstds = self.thresholder.updateOutput(self.adjustedstds)
self.output = self.normalizer.updateOutput([input, self.thresholdedstds.contiguous().view_as(input)])
return self.output
def updateGradInput(self, input, gradOutput):
# resize grad
self.gradInput.resize_as_(input).zero_()
# backprop through all modules
gradnorm = (self.normalizer.updateGradInput(
[input, self.thresholdedstds.contiguous().view_as(input)], gradOutput))
gradadj = self.thresholder.updateGradInput(self.adjustedstds, gradnorm[1])
graddiv = (self.divider.updateGradInput(
[self.localstds, self.coef.contiguous().view_as(self.localstds)], gradadj))
self.gradInput.add_(self.stdestimator.updateGradInput(input, graddiv[0]))
self.gradInput.add_(gradnorm[0])
return self.gradInput
def clearState(self):
clear(self, 'ones', '_coef')
self.meanestimator.clearState()
self.stdestimator.clearState()
return super(SpatialDivisiveNormalization, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/SpatialDivisiveNormalization.py | 0.775817 | 0.39321 | SpatialDivisiveNormalization.py | pypi |
import torch
from .Container import Container
class ConcatTable(Container):
def __init__(self, ):
super(ConcatTable, self).__init__()
self.modules = []
self.output = []
def updateOutput(self, input):
self.output = [module.updateOutput(input) for module in self.modules]
return self.output
def _map_list(self, l1, l2, f):
for i, v in enumerate(l2):
if isinstance(v, list):
res = self._map_list(l1[i] if i < len(l1) else [], v, f)
if i >= len(l1):
assert i == len(l1)
l1.append(res)
else:
l1[i] = res
else:
f(l1, i, v)
for i in range(len(l1) - 1, len(l2) - 1, -1):
del l1[i]
return l1
def _backward(self, method, input, gradOutput, scale=1):
isTable = isinstance(input, list)
wasTable = isinstance(self.gradInput, list)
if isTable:
for i, module in enumerate(self.modules):
if method == 'updateGradInput':
currentGradInput = module.updateGradInput(input, gradOutput[i])
elif method == 'backward':
currentGradInput = module.backward(input, gradOutput[i], scale)
if not isinstance(currentGradInput, list):
raise RuntimeError("currentGradInput is not a table!")
if len(input) != len(currentGradInput):
raise RuntimeError("table size mismatch")
if i == 0:
self.gradInput = self.gradInput if wasTable else []
def fn(l, i, v):
if i >= len(l):
assert len(l) == i
l.append(v.clone())
else:
l[i].resize_as_(v)
l[i].copy_(v)
self._map_list(self.gradInput, currentGradInput, fn)
else:
def fn(l, i, v):
if i < len(l):
l[i].add_(v)
else:
assert len(l) == i
l.append(v.clone())
self._map_list(self.gradInput, currentGradInput, fn)
else:
self.gradInput = self.gradInput if not wasTable else input.clone()
for i, module in enumerate(self.modules):
if method == 'updateGradInput':
currentGradInput = module.updateGradInput(input, gradOutput[i])
elif method == 'backward':
currentGradInput = module.backward(input, gradOutput[i], scale)
if i == 0:
self.gradInput.resize_as_(currentGradInput).copy_(currentGradInput)
else:
self.gradInput.add_(currentGradInput)
return self.gradInput
def updateGradInput(self, input, gradOutput):
return self._backward('updateGradInput', input, gradOutput)
def backward(self, input, gradOutput, scale=1):
return self._backward('backward', input, gradOutput, scale)
def accGradParameters(self, input, gradOutput, scale=1):
for i, module in ipairs(self.modules):
self.rethrowErrors(module, i, 'accGradParameters', input, gradOutput[i], scale)
def accUpdateGradParameters(self, input, gradOutput, lr):
for i, module in ipairs(self.modules):
self.rethrowErrors(module, i, 'accUpdateGradParameters', input, gradOutput[i], lr)
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' +. -> '
res = torch.typename(self)
res = res + ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + ext)
res = res + line + tab + last + 'output'
res = res + line + '}'
return res | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/ConcatTable.py | 0.479747 | 0.216239 | ConcatTable.py | pypi |
import torch
from .Module import Module
from .utils import clear, addSingletondimension
class Min(Module):
def __init__(self, dimension=0):
super(Min, self).__init__()
self.dimension = dimension
self._output = None
self._indices = None
def _getPositiveDimension(self, input):
dimension = self.dimension
if dimension < 0:
dimension = input.dim() + dimension
return dimension
def _lazyInit(self):
if self._output is None:
self._output = self.output.new()
if self._indices is None:
self._indices = \
(torch.cuda.LongTensor() if self.output.type() == 'torch.cuda.FloatTensor'
else torch.LongTensor())
def updateOutput(self, input):
self._lazyInit()
dimension = self._getPositiveDimension(input)
torch.min(input, dimension, out=(self._output, self._indices), keepdim=True)
if input.dim() > 1:
self.output.set_(self._output.select(dimension, 0))
else:
self.output.set_(self._output)
return self.output
def updateGradInput(self, input, gradOutput):
self._lazyInit()
dimension = self._getPositiveDimension(input)
if input.dim() > 1:
gradOutputView = addSingletondimension(gradOutput, dimension)
else:
gradOutputView = gradOutput
self.gradInput.resize_as_(input).zero_().scatter_(dimension, self._indices, gradOutputView)
return self.gradInput
def type(self, type, tensorCache=None):
# torch.min expects a LongTensor as indices, whereas cutorch.max expects a CudaTensor.
if type == 'torch.cuda.FloatTensor':
indices, self._indices = self._indices, None
super(Min, self).type(type, tensorCache)
self._indices = indices.type('torch.cuda.LongTensor') if indices is not None else None
else:
# self._indices must be a LongTensor. Setting it to nil temporarily avoids
# unnecessary memory allocations.
indices, self._indices = self._indices, None
super(Min, self).type(type, tensorCache)
self._indices = indices.long() if indices is not None else None
return self
def clearState(self):
clear(self, '_indices', '_output')
return super(Min, self).clearState() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/Min.py | 0.812942 | 0.247416 | Min.py | pypi |
import math
import torch
from .Module import Module
class WeightedEuclidean(Module):
def __init__(self, inputSize, outputSize):
super(WeightedEuclidean, self).__init__()
self.weight = torch.Tensor(inputSize, outputSize)
self.gradWeight = torch.Tensor(inputSize, outputSize)
# each template (output dim) has its own diagonal covariance matrix
self.diagCov = torch.Tensor(inputSize, outputSize)
self.gradDiagCov = torch.Tensor(inputSize, outputSize)
self.reset()
self._diagCov = self.output.new()
# TODO: confirm
self.fastBackward = False
self._input = None
self._weight = None
self._expand = None
self._expand2 = None
self._expand3 = None
self._repeat = None
self._repeat2 = None
self._repeat3 = None
self._div = None
self._output = None
self._expand4 = None
self._gradOutput = None
self._sum = None
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.uniform_(-stdv, stdv)
self.diagCov.fill_(1)
def _view(self, res, src, *args):
if src.is_contiguous():
res.set_(src.view(*args))
else:
res.set_(src.contiguous().view(*args))
def updateOutput(self, input):
# lazy-initialize
if self._diagCov is None:
self._diagCov = self.output.new()
if self._input is None:
self._input = input.new()
if self._weight is None:
self._weight = self.weight.new()
if self._expand is None:
self._expand = self.output.new()
if self._expand2 is None:
self._expand2 = self.output.new()
if self._expand3 is None:
self._expand3 = self.output.new()
if self._repeat is None:
self._repeat = self.output.new()
if self._repeat2 is None:
self._repeat2 = self.output.new()
if self._repeat3 is None:
self._repeat3 = self.output.new()
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
# y_j = || c_j * (w_j - x) ||
if input.dim() == 1:
self._view(self._input, input, inputSize, 1)
self._expand.expand_as(self._input, self.weight)
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._repeat.add_(-1, self.weight)
self._repeat.mul_(self.diagCov)
torch.norm(self._repeat, 2, 0, True, out=self.output)
self.output.resize_(outputSize)
elif input.dim() == 2:
batchSize = input.size(0)
self._view(self._input, input, batchSize, inputSize, 1)
self._expand = self._input.expand(batchSize, inputSize, outputSize)
# make the expanded tensor contiguous (requires lots of memory)
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._weight = self.weight.view(1, inputSize, outputSize)
self._expand2 = self._weight.expand_as(self._repeat)
self._diagCov = self.diagCov.view(1, inputSize, outputSize)
self._expand3 = self._diagCov.expand_as(self._repeat)
if input.type() == 'torch.cuda.FloatTensor':
# TODO: this can be fixed with a custom allocator
# requires lots of memory, but minimizes cudaMallocs and loops
self._repeat2.resize_as_(self._expand2).copy_(self._expand2)
self._repeat.add_(-1, self._repeat2)
self._repeat3.resize_as_(self._expand3).copy_(self._expand3)
self._repeat.mul_(self._repeat3)
else:
self._repeat.add_(-1, self._expand2)
self._repeat.mul_(self._expand3)
torch.norm(self._repeat, 2, 1, True, out=self.output)
self.output.resize_(batchSize, outputSize)
else:
raise RuntimeError("1D or 2D input expected")
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
if self._div is None:
self._div = input.new()
if self._output is None:
self._output = self.output.new()
if self._expand4 is None:
self._expand4 = input.new()
if self._gradOutput is None:
self._gradOutput = input.new()
if not self.fastBackward:
self.updateOutput(input)
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
"""
dy_j -2 * c_j * c_j * (w_j - x) c_j * c_j * (x - w_j)
---- = -------------------------- = ---------------------
dx 2 || c_j * (w_j - x) || y_j
"""
# to prevent div by zero (NaN) bugs
self._output.resize_as_(self.output).copy_(self.output).add_(1e-7)
self._view(self._gradOutput, gradOutput, gradOutput.size())
torch.div(gradOutput, self._output, out=self._div)
if input.dim() == 1:
self._div.resize_(1, outputSize)
self._expand4 = self._div.expand_as(self.weight)
if torch.type(input) == 'torch.cuda.FloatTensor':
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat2.mul_(self._repeat)
else:
self._repeat2.mul_(self._repeat, self._expand4)
self._repeat2.mul_(self.diagCov)
torch.sum(self._repeat2, 1, True, out=self.gradInput)
self.gradInput.resize_as_(input)
elif input.dim() == 2:
batchSize = input.size(0)
self._div.resize_(batchSize, 1, outputSize)
self._expand4 = self._div.expand(batchSize, inputSize, outputSize)
if input.type() == 'torch.cuda.FloatTensor':
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat2.mul_(self._repeat)
self._repeat2.mul_(self._repeat3)
else:
torch.mul(self._repeat, self._expand4, out=self._repeat2)
self._repeat2.mul_(self._expand3)
torch.sum(self._repeat2, 2, True, out=self.gradInput)
self.gradInput.resize_as_(input)
else:
raise RuntimeError("1D or 2D input expected")
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
"""
dy_j 2 * c_j * c_j * (w_j - x) c_j * c_j * (w_j - x)
---- = -------------------------- = ---------------------
dw_j 2 || c_j * (w_j - x) || y_j
dy_j 2 * c_j * (w_j - x)^2 c_j * (w_j - x)^2
---- = ----------------------- = -----------------
dc_j 2 || c_j * (w_j - x) || y_j
#"""
# assumes a preceding call to updateGradInput
if input.dim() == 1:
self.gradWeight.add_(-scale, self._repeat2)
self._repeat.div_(self.diagCov)
self._repeat.mul_(self._repeat)
self._repeat.mul_(self.diagCov)
if torch.type(input) == 'torch.cuda.FloatTensor':
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat2.mul_(self._repeat)
else:
torch.mul(self._repeat, self._expand4, out=self._repeat2)
self.gradDiagCov.add_(self._repeat2)
elif input.dim() == 2:
if self._sum is None:
self._sum = input.new()
torch.sum(self._repeat2, 0, True, out=self._sum)
self._sum.resize_(inputSize, outputSize)
self.gradWeight.add_(-scale, self._sum)
if input.type() == 'torch.cuda.FloatTensor':
# requires lots of memory, but minimizes cudaMallocs and loops
self._repeat.div_(self._repeat3)
self._repeat.mul_(self._repeat)
self._repeat.mul_(self._repeat3)
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat.mul_(self._repeat2)
else:
self._repeat.div_(self._expand3)
self._repeat.mul_(self._repeat)
self._repeat.mul_(self._expand3)
self._repeat.mul_(self._expand4)
torch.sum(self._repeat, 0, True, out=self._sum)
self._sum.resize_(inputSize, outputSize)
self.gradDiagCov.add_(scale, self._sum)
else:
raise RuntimeError("1D or 2D input expected")
def type(self, type=None, tensorCache=None):
if type:
# prevent premature memory allocations
self._input = None
self._output = None
self._gradOutput = None
self._weight = None
self._div = None
self._sum = None
self._expand = None
self._expand2 = None
self._expand3 = None
self._expand4 = None
self._repeat = None
self._repeat2 = None
self._repeat3 = None
return super(WeightedEuclidean, self).type(type, tensorCache)
def parameters(self):
return [self.weight, self.diagCov], [self.gradWeight, self.gradDiagCov]
def accUpdateGradParameters(self, input, gradOutput, lr):
gradWeight = self.gradWeight
gradDiagCov = self.gradDiagCov
self.gradWeight = self.weight
self.gradDiagCov = self.diagCov
self.accGradParameters(input, gradOutput, -lr)
self.gradWeight = gradWeight
self.gradDiagCov = gradDiagCov | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/WeightedEuclidean.py | 0.543833 | 0.262121 | WeightedEuclidean.py | pypi |
import torch
from .Criterion import Criterion
class MarginRankingCriterion(Criterion):
def __init__(self, margin=0, sizeAverage=True):
super(MarginRankingCriterion, self).__init__()
self.margin = margin
self.sizeAverage = sizeAverage
self.gradInput = [torch.Tensor(), torch.Tensor()]
self._output = None
self.dist = None
self.mask = None
def updateOutput(self, input, y):
if input[0].size(0) == 1:
self.output = max(0, -y * (input[0][0] - input[1][0]) + self.margin)
else:
if self._output is None:
self._output = input[0].clone()
self._output.resize_as_(input[0])
self._output.copy_(input[0])
self._output.add_(-1, input[1])
self._output.mul_(-1).mul_(y)
self._output.add_(self.margin)
self._output.clamp_(min=0)
self.output = self._output.sum().item()
if self.sizeAverage:
self.output = self.output / y.size(0)
return self.output
def updateGradInput(self, input, y):
if input[0].size(0) == 1:
dist = -y * (input[0][0] - input[1][0]) + self.margin
if dist < 0:
self.gradInput[0][0] = 0
self.gradInput[1][0] = 0
else:
self.gradInput[0][0] = -y
self.gradInput[1][0] = y
else:
if self.dist is None:
self.dist = input[0].new()
self.dist = self.dist.resize_as_(input[0]).copy_(input[0])
dist = self.dist
dist.add_(-1, input[1])
dist.mul_(-1).mul_(y)
dist.add_(self.margin)
self.mask = dist > 0
mask = self.mask
torch.ge(dist, 0, out=mask)
self.gradInput[0].resize_(dist.size())
self.gradInput[1].resize_(dist.size())
self.gradInput[0].copy_(mask)
self.gradInput[0].mul_(-1).mul_(y)
self.gradInput[1].copy_(mask)
self.gradInput[1].mul_(y)
if self.sizeAverage:
self.gradInput[0].div_(y.size(0))
self.gradInput[1].div_(y.size(0))
return self.gradInput | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/nn/MarginRankingCriterion.py | 0.705481 | 0.263676 | MarginRankingCriterion.py | pypi |
import math
INFINITY = float('inf')
def sqrt_nothrow(x):
return math.sqrt(x) if x >= 0 else float('nan')
def cg(opfunc, x, config, state=None):
"""
This cg implementation is a rewrite of minimize.m written by Carl
E. Rasmussen. It is supposed to produce exactly same results (give
or take numerical accuracy due to some changed order of
operations). You can compare the result on rosenbrock with minimize.m.
http://www.gatsby.ucl.ac.uk/~edward/code/minimize/example.html
[x fx c] = minimize([0 0]', 'rosenbrock', -25)
Note that we limit the number of function evaluations only, it seems much
more important in practical use.
ARGS:
- `opfunc` : a function that takes a single input, the point of evaluation.
- `x` : the initial point
- `state` : a table of parameters and temporary allocations.
- `state['maxEval']` : max number of function evaluations
- `state['maxIter']` : max number of iterations
- `state['df0']` : if you pass torch.Tensor they will be used for temp storage
- `state['df1']` : if you pass torch.Tensor they will be used for temp storage
- `state['df2']` : if you pass torch.Tensor they will be used for temp storage
- `state['df3']` : if you pass torch.Tensor they will be used for temp storage
- `state['s']` : if you pass torch.Tensor they will be used for temp storage
- `state['x0']` : if you pass torch.Tensor they will be used for temp storage
RETURN:
- `x*` : the new x vector, at the optimal point
- `f` : a table of all function values where
`f[1]` is the value of the function before any optimization and
`f[#f]` is the final fully optimized value, at x*
(Koray Kavukcuoglu, 2012)
"""
# parameters
if config is None and state is None:
raise ValueError("cg requires a dictionary to retain state between iterations")
state = state if state is not None else config
rho = config.get('rho', 0.01)
sig = config.get('sig', 0.5)
_int = config.get('int', 0.1)
ext = config.get('ext', 3.0)
maxIter = config.get('maxIter', 20)
ratio = config.get('ratio', 100)
maxEval = config.get('maxEval', maxIter * 1.25)
red = 1
i = 0
ls_failed = 0
fx = []
# we need three points for the interpolation/extrapolation stuff
z1, z2, z3 = 0, 0, 0
d1, d2, d3 = 0, 0, 0
f1, f2, f3 = 0, 0, 0
df1 = state.get('df1', x.new())
df2 = state.get('df2', x.new())
df3 = state.get('df3', x.new())
df1.resize_as_(x)
df2.resize_as_(x)
df3.resize_as_(x)
# search direction
s = state.get('s', x.new())
s.resize_as_(x)
# we need a temp storage for X
x0 = state.get('x0', x.new())
f0 = 0
df0 = state.get('df0', x.new())
x0.resize_as_(x)
df0.resize_as_(x)
# evaluate at initial point
f1, tdf = opfunc(x)
fx.append(f1)
df1.copy_(tdf)
i = i + 1
# initial search direction
s.copy_(df1).mul_(-1)
d1 = -s.dot(s) # slope
z1 = red / (1 - d1) # initial step
while i < abs(maxEval):
x0.copy_(x)
f0 = f1
df0.copy_(df1)
x.add_(z1, s)
f2, tdf = opfunc(x)
df2.copy_(tdf)
i = i + 1
d2 = df2.dot(s)
f3, d3, z3 = f1, d1, -z1 # init point 3 equal to point 1
m = min(maxIter, maxEval - i)
success = 0
limit = -1
while True:
while (f2 > f1 + z1 * rho * d1 or d2 > -sig * d1) and m > 0:
limit = z1
if f2 > f1:
z2 = z3 - (0.5 * d3 * z3 * z3) / (d3 * z3 + f2 - f3)
else:
A = 6 * (f2 - f3) / z3 + 3 * (d2 + d3)
B = 3 * (f3 - f2) - z3 * (d3 + 2 * d2)
z2 = (sqrt_nothrow(B * B - A * d2 * z3 * z3) - B) / A
if z2 != z2 or z2 == INFINITY or z2 == -INFINITY:
z2 = z3 / 2
z2 = max(min(z2, _int * z3), (1 - _int) * z3)
z1 = z1 + z2
x.add_(z2, s)
f2, tdf = opfunc(x)
df2.copy_(tdf)
i = i + 1
m = m - 1
d2 = df2.dot(s)
z3 = z3 - z2
if f2 > f1 + z1 * rho * d1 or d2 > -sig * d1:
break
elif d2 > sig * d1:
success = 1
break
elif m == 0:
break
A = 6 * (f2 - f3) / z3 + 3 * (d2 + d3)
B = 3 * (f3 - f2) - z3 * (d3 + 2 * d2)
_denom = (B + sqrt_nothrow(B * B - A * d2 * z3 * z3))
z2 = -d2 * z3 * z3 / _denom if _denom != 0 else float('nan')
if z2 != z2 or z2 == INFINITY or z2 == -INFINITY or z2 < 0:
if limit < -0.5:
z2 = z1 * (ext - 1)
else:
z2 = (limit - z1) / 2
elif (limit > -0.5) and (z2 + z1) > limit:
z2 = (limit - z1) / 2
elif limit < -0.5 and (z2 + z1) > z1 * ext:
z2 = z1 * (ext - 1)
elif z2 < -z3 * _int:
z2 = -z3 * _int
elif limit > -0.5 and z2 < (limit - z1) * (1 - _int):
z2 = (limit - z1) * (1 - _int)
f3 = f2
d3 = d2
z3 = -z2
z1 = z1 + z2
x.add_(z2, s)
f2, tdf = opfunc(x)
df2.copy_(tdf)
i = i + 1
m = m - 1
d2 = df2.dot(s)
if success == 1:
f1 = f2
fx.append(f1)
ss = (df2.dot(df2) - df2.dot(df1)) / df1.dot(df1)
s.mul_(ss)
s.add_(-1, df2)
tmp = df1.clone()
df1.copy_(df2)
df2.copy_(tmp)
d2 = df1.dot(s)
if d2 > 0:
s.copy_(df1)
s.mul_(-1)
d2 = -s.dot(s)
z1 = z1 * min(ratio, d1 / (d2 - 1e-320))
d1 = d2
ls_failed = 0
else:
x.copy_(x0)
f1 = f0
df1.copy_(df0)
if ls_failed or i > maxEval:
break
tmp = df1.clone()
df1.copy_(df2)
df2.copy_(tmp)
s.copy_(df1)
s.mul_(-1)
d1 = -s.dot(s)
z1 = 1 / (1 - d1)
ls_failed = 1
state['df0'] = df0
state['df1'] = df1
state['df2'] = df2
state['df3'] = df3
state['x0'] = x0
state['s'] = s
return x, fx, i | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/legacy/optim/cg.py | 0.772788 | 0.643861 | cg.py | pypi |
import torch
from .optimizer import Optimizer
class RMSprop(Optimizer):
"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0)
alpha (float, optional): smoothing constant (default: 0.99)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay)
super(RMSprop, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSprop, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p.data)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p.data)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.mul_(alpha).add_(1 - alpha, grad)
avg = square_avg.addcmul(-1, grad_avg, grad_avg).sqrt().add_(group['eps'])
else:
avg = square_avg.sqrt().add_(group['eps'])
if group['momentum'] > 0:
buf = state['momentum_buffer']
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_(-group['lr'], buf)
else:
p.data.addcdiv_(-group['lr'], grad, avg)
return loss | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/rmsprop.py | 0.931633 | 0.592107 | rmsprop.py | pypi |
import torch
from .optimizer import Optimizer
class Adagrad(Optimizer):
"""Implements Adagrad algorithm.
It has been proposed in `Adaptive Subgradient Methods for Online Learning
and Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. _Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization: http://jmlr.org/papers/v12/duchi11a.html
"""
def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= lr_decay:
raise ValueError("Invalid lr_decay value: {}".format(lr_decay))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= initial_accumulator_value:
raise ValueError("Invalid initial_accumulator_value value: {}".format(initial_accumulator_value))
defaults = dict(lr=lr, lr_decay=lr_decay, weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value)
super(Adagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.full_like(p.data, initial_accumulator_value)
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if grad.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/adagrad.py | 0.923301 | 0.490907 | adagrad.py | pypi |
import math
from bisect import bisect_right
from functools import partial
from .optimizer import Optimizer
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def __getstate__(self):
return self.state_dict()
def __setstate__(self, state):
self.load_state_dict(state)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_lr(self):
raise NotImplementedError
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
class LambdaLR(_LRScheduler):
"""Sets the learning rate of each parameter group to the initial lr
times a given function. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
lr_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch, or a list of such
functions, one for each group in optimizer.param_groups.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # Assuming optimizer has two groups.
>>> lambda1 = lambda epoch: epoch // 30
>>> lambda2 = lambda epoch: 0.95 ** epoch
>>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])
>>> for epoch in range(100):
>>> scheduler.step()
>>> train(...)
>>> validate(...)
"""
def __init__(self, optimizer, lr_lambda, last_epoch=-1):
self.optimizer = optimizer
if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
else:
if len(lr_lambda) != len(optimizer.param_groups):
raise ValueError("Expected {} lr_lambdas, but got {}".format(
len(optimizer.param_groups), len(lr_lambda)))
self.lr_lambdas = list(lr_lambda)
self.last_epoch = last_epoch
super(LambdaLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * lmbda(self.last_epoch)
for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)]
class StepLR(_LRScheduler):
"""Sets the learning rate of each parameter group to the initial lr
decayed by gamma every step_size epochs. When last_epoch=-1, sets
initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
step_size (int): Period of learning rate decay.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 60
>>> # lr = 0.0005 if 60 <= epoch < 90
>>> # ...
>>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
>>> for epoch in range(100):
>>> scheduler.step()
>>> train(...)
>>> validate(...)
"""
def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1):
self.step_size = step_size
self.gamma = gamma
super(StepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * self.gamma ** (self.last_epoch // self.step_size)
for base_lr in self.base_lrs]
class MultiStepLR(_LRScheduler):
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones. When
last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
milestones (list): List of epoch indices. Must be increasing.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 80
>>> # lr = 0.0005 if epoch >= 80
>>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)
>>> for epoch in range(100):
>>> scheduler.step()
>>> train(...)
>>> validate(...)
"""
def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing integers. Got {}', milestones)
self.milestones = milestones
self.gamma = gamma
super(MultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs]
class ExponentialLR(_LRScheduler):
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma every epoch. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
gamma (float): Multiplicative factor of learning rate decay.
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(self, optimizer, gamma, last_epoch=-1):
self.gamma = gamma
super(ExponentialLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * self.gamma ** self.last_epoch
for base_lr in self.base_lrs]
class CosineAnnealingLR(_LRScheduler):
r"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{max}}\pi))
When last_epoch=-1, sets initial lr as lr.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.eta_min = eta_min
super(CosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs]
class ReduceLROnPlateau(object):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This scheduler reads a metrics
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Args:
optimizer (Optimizer): Wrapped optimizer.
mode (str): One of `min`, `max`. In `min` mode, lr will
be reduced when the quantity monitored has stopped
decreasing; in `max` mode it will be reduced when the
quantity monitored has stopped increasing. Default: 'min'.
factor (float): Factor by which the learning rate will be
reduced. new_lr = lr * factor. Default: 0.1.
patience (int): Number of epochs with no improvement after
which learning rate will be reduced. For example, if
`patience = 2`, then we will ignore the first 2 epochs
with no improvement, and will only decrease the LR after the
3rd epoch if the loss still hasn't improved then.
Default: 10.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
threshold (float): Threshold for measuring the new optimum,
to only focus on significant changes. Default: 1e-4.
threshold_mode (str): One of `rel`, `abs`. In `rel` mode,
dynamic_threshold = best * ( 1 + threshold ) in 'max'
mode or best * ( 1 - threshold ) in `min` mode.
In `abs` mode, dynamic_threshold = best + threshold in
`max` mode or best - threshold in `min` mode. Default: 'rel'.
cooldown (int): Number of epochs to wait before resuming
normal operation after lr has been reduced. Default: 0.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
eps (float): Minimal decay applied to lr. If the difference
between new and old lr is smaller than eps, the update is
ignored. Default: 1e-8.
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = ReduceLROnPlateau(optimizer, 'min')
>>> for epoch in range(10):
>>> train(...)
>>> val_loss = validate(...)
>>> # Note that step should be called after validate()
>>> scheduler.step(val_loss)
"""
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
verbose=False, threshold=1e-4, threshold_mode='rel',
cooldown=0, min_lr=0, eps=1e-8):
if factor >= 1.0:
raise ValueError('Factor should be < 1.0.')
self.factor = factor
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self.best = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.is_better = None
self.eps = eps
self.last_epoch = -1
self._init_is_better(mode=mode, threshold=threshold,
threshold_mode=threshold_mode)
self._reset()
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
def step(self, metrics, epoch=None):
current = metrics
if epoch is None:
epoch = self.last_epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self._reduce_lr(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group['lr'] = new_lr
if self.verbose:
print('Epoch {:5d}: reducing learning rate'
' of group {} to {:.4e}.'.format(epoch, i, new_lr))
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def _cmp(self, mode, threshold_mode, threshold, a, best):
if mode == 'min' and threshold_mode == 'rel':
rel_epsilon = 1. - threshold
return a < best * rel_epsilon
elif mode == 'min' and threshold_mode == 'abs':
return a < best - threshold
elif mode == 'max' and threshold_mode == 'rel':
rel_epsilon = threshold + 1.
return a > best * rel_epsilon
else: # mode == 'max' and epsilon_mode == 'abs':
return a > best + threshold
def _init_is_better(self, mode, threshold, threshold_mode):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if threshold_mode not in {'rel', 'abs'}:
raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')
if mode == 'min':
self.mode_worse = float('inf')
else: # mode == 'max':
self.mode_worse = (-float('inf'))
self.is_better = partial(self._cmp, mode, threshold_mode, threshold)
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key not in {'optimizer', 'is_better'}}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/lr_scheduler.py | 0.892533 | 0.324342 | lr_scheduler.py | pypi |
import math
import torch
from .optimizer import Optimizer
class Adam(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/adam.py | 0.909365 | 0.591458 | adam.py | pypi |
from collections import defaultdict, Iterable
import torch
from copy import deepcopy
from itertools import chain
required = object()
class Optimizer(object):
r"""Base class for all optimizers.
.. warning::
Parameters need to be specified as collections that have a deterministic
ordering that is consistent between runs. Examples of objects that don't
satisfy those properties are sets and iterators over values of dictionaries.
Arguments:
params (iterable): an iterable of :class:`torch.Tensor` s or
:class:`dict` s. Specifies what Tensors should be optimized.
defaults: (dict): a dict containing default values of optimization
options (used when a parameter group doesn't specify them).
"""
def __init__(self, params, defaults):
self.defaults = defaults
if isinstance(params, torch.Tensor):
raise TypeError("params argument given to the optimizer should be "
"an iterable of Tensors or dicts, but got " +
torch.typename(params))
self.state = defaultdict(dict)
self.param_groups = []
param_groups = list(params)
if len(param_groups) == 0:
raise ValueError("optimizer got an empty parameter list")
if not isinstance(param_groups[0], dict):
param_groups = [{'params': param_groups}]
for param_group in param_groups:
self.add_param_group(param_group)
def __getstate__(self):
return {
'state': self.state,
'param_groups': self.param_groups,
}
def __setstate__(self, state):
self.__dict__.update(state)
def __repr__(self):
format_string = self.__class__.__name__ + ' ('
for i, group in enumerate(self.param_groups):
format_string += '\n'
format_string += 'Parameter Group {0}\n'.format(i)
for key in sorted(group.keys()):
if key != 'params':
format_string += ' {0}: {1}\n'.format(key, group[key])
format_string += ')'
return format_string
def state_dict(self):
r"""Returns the state of the optimizer as a :class:`dict`.
It contains two entries:
* state - a dict holding current optimization state. Its content
differs between optimizer classes.
* param_groups - a dict containing all parameter groups
"""
# Save ids instead of Tensors
def pack_group(group):
packed = {k: v for k, v in group.items() if k != 'params'}
packed['params'] = [id(p) for p in group['params']]
return packed
param_groups = [pack_group(g) for g in self.param_groups]
# Remap state to use ids as keys
packed_state = {(id(k) if isinstance(k, torch.Tensor) else k): v
for k, v in self.state.items()}
return {
'state': packed_state,
'param_groups': param_groups,
}
def load_state_dict(self, state_dict):
r"""Loads the optimizer state.
Arguments:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = deepcopy(state_dict)
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict['param_groups']
if len(groups) != len(saved_groups):
raise ValueError("loaded state dict has a different number of "
"parameter groups")
param_lens = (len(g['params']) for g in groups)
saved_lens = (len(g['params']) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError("loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group")
# Update the state
id_map = {old_id: p for old_id, p in
zip(chain(*(g['params'] for g in saved_groups)),
chain(*(g['params'] for g in groups)))}
def cast(param, value):
r"""Make a deep copy of value, casting all tensors to device of param."""
if isinstance(value, torch.Tensor):
# Floating-point types are a bit special here. They are the only ones
# that are assumed to always match the type of params.
if param.is_floating_point():
value = value.to(param.dtype)
value = value.to(param.device)
return value
elif isinstance(value, dict):
return {k: cast(param, v) for k, v in value.items()}
elif isinstance(value, Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = defaultdict(dict)
for k, v in state_dict['state'].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
# Update parameter groups, setting their 'params' value
def update_group(group, new_group):
new_group['params'] = group['params']
return new_group
param_groups = [
update_group(g, ng) for g, ng in zip(groups, saved_groups)]
self.__setstate__({'state': state, 'param_groups': param_groups})
def zero_grad(self):
r"""Clears the gradients of all optimized :class:`torch.Tensor` s."""
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def step(self, closure):
r"""Performs a single optimization step (parameter update).
Arguments:
closure (callable): A closure that reevaluates the model and
returns the loss. Optional for most optimizers.
"""
raise NotImplementedError
def add_param_group(self, param_group):
r"""Add a param group to the :class:`Optimizer` s `param_groups`.
This can be useful when fine tuning a pre-trained network as frozen layers can be made
trainable and added to the :class:`Optimizer` as training progresses.
Arguments:
param_group (dict): Specifies what Tensors should be optimized along with group
specific optimization options.
"""
assert isinstance(param_group, dict), "param group must be a dict"
params = param_group['params']
if isinstance(params, torch.Tensor):
param_group['params'] = [params]
elif isinstance(params, set):
raise TypeError('optimizer parameters need to be organized in ordered collections, but '
'the ordering of tensors in sets will change between runs. Please use a list instead.')
else:
param_group['params'] = list(params)
for param in param_group['params']:
if not isinstance(param, torch.Tensor):
raise TypeError("optimizer can only optimize Tensors, "
"but one of the params is " + torch.typename(param))
if not param.is_leaf:
raise ValueError("can't optimize a non-leaf Tensor")
for name, default in self.defaults.items():
if default is required and name not in param_group:
raise ValueError("parameter group didn't specify a value of required optimization parameter " +
name)
else:
param_group.setdefault(name, default)
param_set = set()
for group in self.param_groups:
param_set.update(set(group['params']))
if not param_set.isdisjoint(set(param_group['params'])):
raise ValueError("some parameters appear in more than one parameter group")
self.param_groups.append(param_group) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/optimizer.py | 0.938864 | 0.358129 | optimizer.py | pypi |
import torch
from .optimizer import Optimizer
class Adadelta(Optimizer):
"""Implements Adadelta algorithm.
It has been proposed in `ADADELTA: An Adaptive Learning Rate Method`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
rho (float, optional): coefficient used for computing a running average
of squared gradients (default: 0.9)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-6)
lr (float, optional): coefficient that scale delta before it is applied
to the parameters (default: 1.0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
__ https://arxiv.org/abs/1212.5701
"""
def __init__(self, params, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= rho <= 1.0:
raise ValueError("Invalid rho value: {}".format(rho))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay)
super(Adadelta, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adadelta does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p.data)
state['acc_delta'] = torch.zeros_like(p.data)
square_avg, acc_delta = state['square_avg'], state['acc_delta']
rho, eps = group['rho'], group['eps']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
square_avg.mul_(rho).addcmul_(1 - rho, grad, grad)
std = square_avg.add(eps).sqrt_()
delta = acc_delta.add(eps).sqrt_().div_(std).mul_(grad)
p.data.add_(-group['lr'], delta)
acc_delta.mul_(rho).addcmul_(1 - rho, delta, delta)
return loss | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/adadelta.py | 0.932891 | 0.512937 | adadelta.py | pypi |
import torch
from .optimizer import Optimizer, required
class SGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
v = \rho * v + g \\
p = p - lr * v
where p, g, v and :math:`\rho` denote the parameters, gradient,
velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
v = \rho * v + lr * g \\
p = p - v
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/sgd.py | 0.952668 | 0.636777 | sgd.py | pypi |
import math
import torch
from .optimizer import Optimizer
class ASGD(Optimizer):
"""Implements Averaged Stochastic Gradient Descent.
It has been proposed in `Acceleration of stochastic approximation by
averaging`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lambd (float, optional): decay term (default: 1e-4)
alpha (float, optional): power for eta update (default: 0.75)
t0 (float, optional): point at which to start averaging (default: 1e6)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. _Acceleration of stochastic approximation by averaging:
http://dl.acm.org/citation.cfm?id=131098
"""
def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, lambd=lambd, alpha=alpha, t0=t0,
weight_decay=weight_decay)
super(ASGD, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('ASGD does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['eta'] = group['lr']
state['mu'] = 1
state['ax'] = torch.zeros_like(p.data)
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# decay term
p.data.mul_(1 - group['lambd'] * state['eta'])
# update parameter
p.data.add_(-state['eta'], grad)
# averaging
if state['mu'] != 1:
state['ax'].add_(p.data.sub(state['ax']).mul(state['mu']))
else:
state['ax'].copy_(p.data)
# update eta and mu
state['eta'] = (group['lr'] /
math.pow((1 + group['lambd'] * group['lr'] * state['step']), group['alpha']))
state['mu'] = 1 / max(1, state['step'] - group['t0'])
return loss | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/asgd.py | 0.867766 | 0.417331 | asgd.py | pypi |
import torch
from .optimizer import Optimizer
class Adamax(Optimizer):
"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
It has been proposed in `Adam: A Method for Stochastic Optimization`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
__ https://arxiv.org/abs/1412.6980
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(Adamax, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adamax does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_inf'] = torch.zeros_like(p.data)
exp_avg, exp_inf = state['exp_avg'], state['exp_inf']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Update biased first moment estimate.
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# Update the exponentially weighted infinity norm.
norm_buf = torch.cat([
exp_inf.mul_(beta2).unsqueeze(0),
grad.abs().add_(eps).unsqueeze_(0)
], 0)
torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
bias_correction = 1 - beta1 ** state['step']
clr = group['lr'] / bias_correction
p.data.addcdiv_(-clr, exp_avg, exp_inf)
return loss | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/adamax.py | 0.914739 | 0.568955 | adamax.py | pypi |
import math
import torch
from .optimizer import Optimizer
class SparseAdam(Optimizer):
"""Implements lazy version of Adam algorithm suitable for sparse tensors.
In this variant, only moments that show up in the gradient get updated, and
only those portions of the gradient get applied to the parameters.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8):
if not 0.0 < lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps)
super(SparseAdam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if not grad.is_sparse:
raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['step'] += 1
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# old <- b * old + (1 - b) * new
# <==> old += (1 - b) * (new - old)
old_exp_avg_values = exp_avg._sparse_mask(grad)._values()
exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
exp_avg.add_(make_sparse(exp_avg_update_values))
old_exp_avg_sq_values = exp_avg_sq._sparse_mask(grad)._values()
exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
# Dense addition again is intended, avoiding another _sparse_mask
numer = exp_avg_update_values.add_(old_exp_avg_values)
exp_avg_sq_update_values.add_(old_exp_avg_sq_values)
denom = exp_avg_sq_update_values.sqrt_().add_(group['eps'])
del exp_avg_update_values, exp_avg_sq_update_values
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(make_sparse(-step_size * numer.div_(denom)))
return loss | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/optim/sparse_adam.py | 0.88906 | 0.608449 | sparse_adam.py | pypi |
import math
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
class Cauchy(Distribution):
r"""
Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of
independent normally distributed random variables with means `0` follows a
Cauchy distribution.
Example::
>>> m = Cauchy(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Cauchy distribution with loc=0 and scale=1
2.3214
[torch.FloatTensor of size 1]
Args:
loc (float or Tensor): mode or median of the distribution.
scale (float or Tensor): half width at half maximum.
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
has_rsample = True
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Cauchy, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return self.loc.new_tensor(float('nan')).expand(self._extended_shape())
@property
def variance(self):
return self.loc.new_tensor(float('inf')).expand(self._extended_shape())
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = self.loc.new(shape).cauchy_()
return self.loc + eps * self.scale
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return -math.log(math.pi) - self.scale.log() - (1 + ((value - self.loc) / self.scale)**2).log()
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5
def icdf(self, value):
if self._validate_args:
self._validate_sample(value)
return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc
def entropy(self):
return math.log(4 * math.pi) + self.scale.log() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/cauchy.py | 0.946138 | 0.581303 | cauchy.py | pypi |
from numbers import Number
import math
import torch
from torch.distributions import constraints
from torch.distributions.uniform import Uniform
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import AffineTransform, ExpTransform
from torch.distributions.utils import _finfo, broadcast_all
euler_constant = 0.57721566490153286060 # Euler Mascheroni Constant
class Gumbel(TransformedDistribution):
r"""
Samples from a Gumbel Distribution.
Examples::
>>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0]))
>>> m.sample() # sample from Gumbel distribution with loc=1, scale=2
1.0124
[torch.FloatTensor of size 1]
Args:
loc (float or Tensor): Location parameter of the distribution
scale (float or Tensor): Scale parameter of the distribution
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
finfo = _finfo(self.loc)
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
base_dist = Uniform(finfo.tiny, 1 - finfo.eps)
else:
batch_shape = self.scale.size()
base_dist = Uniform(self.loc.new(self.loc.size()).fill_(finfo.tiny), 1 - finfo.eps)
transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)]
super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args)
@property
def mean(self):
return self.loc + self.scale * euler_constant
@property
def stddev(self):
return (math.pi / math.sqrt(6)) * self.scale
@property
def variance(self):
return self.stddev.pow(2)
def entropy(self):
return self.scale.log() + (1 + euler_constant) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/gumbel.py | 0.936619 | 0.456228 | gumbel.py | pypi |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, probs_to_logits, lazy_property, logits_to_probs
class Binomial(Distribution):
r"""
Creates a Binomial distribution parameterized by `total_count` and
either `probs` or `logits` (but not both). `total_count` must be
broadcastable with `probs`/`logits`.
Example::
>>> m = Binomial(100, torch.tensor([0 , .2, .8, 1]))
>>> x = m.sample()
0
22
71
100
[torch.FloatTensor of size 4]]
>>> m = Binomial(torch.Tensor([[5.], [10.]]), torch.Tensor([0.5, 0.8]))
>>> x = m.sample()
4 5
7 6
[torch.FloatTensor of size (2,2)]
Args:
total_count (int or Tensor): number of Bernoulli trials
probs (Tensor): Event probabilities
logits (Tensor): Event log-odds
"""
arg_constraints = {'total_count': constraints.nonnegative_integer,
'probs': constraints.unit_interval}
has_enumerate_support = True
def __init__(self, total_count=1, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.total_count, self.probs, = broadcast_all(total_count, probs)
self.total_count = self.total_count.type_as(self.logits)
is_scalar = isinstance(self.probs, Number)
else:
self.total_count, self.logits, = broadcast_all(total_count, logits)
self.total_count = self.total_count.type_as(self.logits)
is_scalar = isinstance(self.logits, Number)
self._param = self.probs if probs is not None else self.logits
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
super(Binomial, self).__init__(batch_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@constraints.dependent_property
def support(self):
return constraints.integer_interval(0, self.total_count)
@property
def mean(self):
return self.total_count * self.probs
@property
def variance(self):
return self.total_count * self.probs * (1 - self.probs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
max_count = max(int(self.total_count.max()), 1)
shape = self._extended_shape(sample_shape) + (max_count,)
bernoullis = torch.bernoulli(self.probs.unsqueeze(-1).expand(shape))
if self.total_count.min() != max_count:
arange = torch.arange(max_count, out=self.total_count.new_empty(max_count))
mask = arange >= self.total_count.unsqueeze(-1)
bernoullis.masked_fill_(mask, 0.)
return bernoullis.sum(dim=-1)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
max_val = (-self.logits).clamp(min=0.0)
# Note that: torch.log1p(-self.probs)) = max_val - torch.log1p((self.logits + 2 * max_val).exp()))
return (log_factorial_n - log_factorial_k - log_factorial_nmk +
value * self.logits + self.total_count * max_val -
self.total_count * torch.log1p((self.logits + 2 * max_val).exp()))
def enumerate_support(self):
total_count = int(self.total_count.max())
if not self.total_count.min() == total_count:
raise NotImplementedError("Inhomogeneous total count not supported by `enumerate_support`.")
values = self._new(1 + total_count,)
torch.arange(1 + total_count, out=values)
values = values.view((-1,) + (1,) * len(self._batch_shape))
values = values.expand((-1,) + self._batch_shape)
return values | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/binomial.py | 0.936663 | 0.47317 | binomial.py | pypi |
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.transforms import Transform
from torch.distributions.utils import _sum_rightmost
class TransformedDistribution(Distribution):
r"""
Extension of the Distribution class, which applies a sequence of Transforms
to a base distribution. Let f be the composition of transforms applied::
X ~ BaseDistribution
Y = f(X) ~ TransformedDistribution(BaseDistribution, f)
log p(Y) = log p(X) + log |det (dX/dY)|
Note that the ``.event_shape`` of a :class:`TransformedDistribution` is the
maximum shape of its base distribution and its transforms, since transforms
can introduce correlations among events.
"""
arg_constraints = {}
def __init__(self, base_distribution, transforms, validate_args=None):
self.base_dist = base_distribution
if isinstance(transforms, Transform):
self.transforms = [transforms, ]
elif isinstance(transforms, list):
if not all(isinstance(t, Transform) for t in transforms):
raise ValueError("transforms must be a Transform or a list of Transforms")
self.transforms = transforms
else:
raise ValueError("transforms must be a Transform or list, but was {}".format(transforms))
shape = self.base_dist.batch_shape + self.base_dist.event_shape
event_dim = max([len(self.base_dist.event_shape)] + [t.event_dim for t in self.transforms])
batch_shape = shape[:len(shape) - event_dim]
event_shape = shape[len(shape) - event_dim:]
super(TransformedDistribution, self).__init__(batch_shape, event_shape, validate_args=validate_args)
@constraints.dependent_property
def support(self):
return self.transforms[-1].codomain if self.transforms else self.base_dist.support
@property
def has_rsample(self):
return self.base_dist.has_rsample
def sample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped sample or sample_shape shaped batch of
samples if the distribution parameters are batched. Samples first from
base distribution and applies `transform()` for every transform in the
list.
"""
with torch.no_grad():
x = self.base_dist.sample(sample_shape)
for transform in self.transforms:
x = transform(x)
return x
def rsample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped reparameterized sample or sample_shape
shaped batch of reparameterized samples if the distribution parameters
are batched. Samples first from base distribution and applies
`transform()` for every transform in the list.
"""
x = self.base_dist.rsample(sample_shape)
for transform in self.transforms:
x = transform(x)
return x
def log_prob(self, value):
"""
Scores the sample by inverting the transform(s) and computing the score
using the score of the base distribution and the log abs det jacobian.
"""
event_dim = len(self.event_shape)
log_prob = 0.0
y = value
for transform in reversed(self.transforms):
x = transform.inv(y)
log_prob = log_prob - _sum_rightmost(transform.log_abs_det_jacobian(x, y),
event_dim - transform.event_dim)
y = x
log_prob = log_prob + _sum_rightmost(self.base_dist.log_prob(y),
event_dim - len(self.base_dist.event_shape))
return log_prob
def _monotonize_cdf(self, value):
"""
This conditionally flips ``value -> 1-value`` to ensure :meth:`cdf` is
monotone increasing.
"""
sign = 1
for transform in self.transforms:
sign = sign * transform.sign
if sign is 1:
return value
return sign * (value - 0.5) + 0.5
def cdf(self, value):
"""
Computes the cumulative distribution function by inverting the
transform(s) and computing the score of the base distribution.
"""
for transform in self.transforms[::-1]:
value = transform.inv(value)
if self._validate_args:
self.base_dist._validate_sample(value)
value = self.base_dist.cdf(value)
value = self._monotonize_cdf(value)
return value
def icdf(self, value):
"""
Computes the inverse cumulative distribution function using
transform(s) and computing the score of the base distribution.
"""
value = self._monotonize_cdf(value)
if self._validate_args:
self.base_dist._validate_sample(value)
value = self.base_dist.icdf(value)
for transform in self.transforms:
value = transform(value)
return value | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/transformed_distribution.py | 0.938871 | 0.725393 | transformed_distribution.py | pypi |
import torch
import warnings
from torch.distributions import constraints
from torch.distributions.utils import lazy_property
class Distribution(object):
r"""
Distribution is the abstract base class for probability distributions.
"""
has_rsample = False
has_enumerate_support = False
_validate_args = False
support = None
arg_constraints = {}
@staticmethod
def set_default_validate_args(value):
if value not in [True, False]:
raise ValueError
Distribution._validate_args = value
def __init__(self, batch_shape=torch.Size(), event_shape=torch.Size(), validate_args=None):
self._batch_shape = batch_shape
self._event_shape = event_shape
if validate_args is not None:
self._validate_args = validate_args
if self._validate_args:
for param, constraint in self.arg_constraints.items():
if constraints.is_dependent(constraint):
continue # skip constraints that cannot be checked
if param not in self.__dict__ and isinstance(getattr(type(self), param), lazy_property):
continue # skip checking lazily-constructed args
if not constraint.check(getattr(self, param)).all():
raise ValueError("The parameter {} has invalid values".format(param))
@property
def batch_shape(self):
"""
Returns the shape over which parameters are batched.
"""
return self._batch_shape
@property
def event_shape(self):
"""
Returns the shape of a single sample (without batching).
"""
return self._event_shape
@property
def arg_constraints(self):
"""
Returns a dictionary from argument names to
:class:`~torch.distributions.constraints.Constraint` objects that
should be satisfied by each argument of this distribution. Args that
are not tensors need not appear in this dict.
"""
raise NotImplementedError
@property
def support(self):
"""
Returns a :class:`~torch.distributions.constraints.Constraint` object
representing this distribution's support.
"""
raise NotImplementedError
@property
def mean(self):
"""
Returns the mean of the distribution.
"""
raise NotImplementedError
@property
def variance(self):
"""
Returns the variance of the distribution.
"""
raise NotImplementedError
@property
def stddev(self):
"""
Returns the standard deviation of the distribution.
"""
return self.variance.sqrt()
def sample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped sample or sample_shape shaped batch of
samples if the distribution parameters are batched.
"""
with torch.no_grad():
return self.rsample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped reparameterized sample or sample_shape
shaped batch of reparameterized samples if the distribution parameters
are batched.
"""
raise NotImplementedError
def sample_n(self, n):
"""
Generates n samples or n batches of samples if the distribution
parameters are batched.
"""
warnings.warn('sample_n will be deprecated. Use .sample((n,)) instead', UserWarning)
return self.sample(torch.Size((n,)))
def log_prob(self, value):
"""
Returns the log of the probability density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def cdf(self, value):
"""
Returns the cumulative density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def icdf(self, value):
"""
Returns the inverse cumulative density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def enumerate_support(self):
"""
Returns tensor containing all values supported by a discrete
distribution. The result will enumerate over dimension 0, so the shape
of the result will be `(cardinality,) + batch_shape + event_shape`
(where `event_shape = ()` for univariate distributions).
Note that this enumerates over all batched tensors in lock-step
`[[0, 0], [1, 1], ...]`. To iterate over the full Cartesian product
use `itertools.product(m.enumerate_support())`.
Returns:
Tensor iterating over dimension 0.
"""
raise NotImplementedError
def entropy(self):
"""
Returns entropy of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
raise NotImplementedError
def perplexity(self):
"""
Returns perplexity of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
return torch.exp(self.entropy())
def _extended_shape(self, sample_shape=torch.Size()):
"""
Returns the size of the sample returned by the distribution, given
a `sample_shape`. Note, that the batch and event shapes of a distribution
instance are fixed at the time of construction. If this is empty, the
returned shape is upcast to (1,).
Args:
sample_shape (torch.Size): the size of the sample to be drawn.
"""
return torch.Size(sample_shape + self._batch_shape + self._event_shape)
def _validate_sample(self, value):
"""
Argument validation for distribution methods such as `log_prob`,
`cdf` and `icdf`. The rightmost dimensions of a value to be
scored via these methods must agree with the distribution's batch
and event shapes.
Args:
value (Tensor): the tensor whose log probability is to be
computed by the `log_prob` method.
Raises
ValueError: when the rightmost dimensions of `value` do not match the
distribution's batch and event shapes.
"""
if not isinstance(value, torch.Tensor):
raise ValueError('The value argument to log_prob must be a Tensor')
event_dim_start = len(value.size()) - len(self._event_shape)
if value.size()[event_dim_start:] != self._event_shape:
raise ValueError('The right-most size of value must match event_shape: {} vs {}.'.
format(value.size(), self._event_shape))
actual_shape = value.size()
expected_shape = self._batch_shape + self._event_shape
for i, j in zip(reversed(actual_shape), reversed(expected_shape)):
if i != 1 and j != 1 and i != j:
raise ValueError('Value is not broadcastable with batch_shape+event_shape: {} vs {}.'.
format(actual_shape, expected_shape))
if not self.support.check(value).all():
raise ValueError('The value argument must be within the support')
def __repr__(self):
return self.__class__.__name__ + '()' | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/distribution.py | 0.936872 | 0.537102 | distribution.py | pypi |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property
from torch.nn.functional import binary_cross_entropy_with_logits
class Bernoulli(ExponentialFamily):
r"""
Creates a Bernoulli distribution parameterized by `probs` or `logits`.
Samples are binary (0 or 1). They take the value `1` with probability `p`
and `0` with probability `1 - p`.
Example::
>>> m = Bernoulli(torch.tensor([0.3]))
>>> m.sample() # 30% chance 1; 70% chance 0
0.0
[torch.FloatTensor of size 1]
Args:
probs (Number, Tensor): the probabilty of sampling `1`
logits (Number, Tensor): the log-odds of sampling `1`
"""
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.boolean
has_enumerate_support = True
_mean_carrier_measure = 0
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
is_scalar = isinstance(probs, Number)
self.probs, = broadcast_all(probs)
else:
is_scalar = isinstance(logits, Number)
self.logits, = broadcast_all(logits)
self._param = self.probs if probs is not None else self.logits
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
super(Bernoulli, self).__init__(batch_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@property
def mean(self):
return self.probs
@property
def variance(self):
return self.probs * (1 - self.probs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.bernoulli(self.probs.expand(shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
return -binary_cross_entropy_with_logits(logits, value, reduce=False)
def entropy(self):
return binary_cross_entropy_with_logits(self.logits, self.probs, reduce=False)
def enumerate_support(self):
values = self._new((2,))
torch.arange(2, out=values.data)
values = values.view((-1,) + (1,) * len(self._batch_shape))
values = values.expand((-1,) + self._batch_shape)
return values
@property
def _natural_params(self):
return (torch.log(self.probs / (1 - self.probs)), )
def _log_normalizer(self, x):
return torch.log(1 + torch.exp(x)) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/bernoulli.py | 0.946163 | 0.55646 | bernoulli.py | pypi |
import torch
from torch.distributions.distribution import Distribution
from torch.autograd import Variable
class ExponentialFamily(Distribution):
r"""
ExponentialFamily is the abstract base class for probability distributions belonging to an
exponential family, whose probability mass/density function has the form is defined below
.. math::
p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle) - F(\theta) + k(x))
where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic,
:math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier
measure.
Note:
This class is an intermediary between the `Distribution` class and distributions which belong
to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL
divergence methods. We use this class to compute the entropy and KL divergence using the AD frame-
work and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and
Cross-entropies of Exponential Families).
"""
@property
def _natural_params(self):
"""
Abstract method for natural parameters. Returns a tuple of Tensors based
on the distribution
"""
raise NotImplementedError
def _log_normalizer(self, *natural_params):
"""
Abstract method for log normalizer function. Returns a log normalizer based on
the distribution and input
"""
raise NotImplementedError
@property
def _mean_carrier_measure(self):
"""
Abstract method for expected carrier measure, which is required for computing
entropy.
"""
raise NotImplementedError
def entropy(self):
"""
Method to compute the entropy using Bregman divergence of the log normalizer.
"""
result = -self._mean_carrier_measure
nparams = [Variable(p.data, requires_grad=True) for p in self._natural_params]
lg_normal = self._log_normalizer(*nparams)
gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True)
result += lg_normal.clone()
for np, g in zip(nparams, gradients):
result -= np * g
return result | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/exp_family.py | 0.922822 | 0.882833 | exp_family.py | pypi |
import math
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
class Uniform(Distribution):
r"""
Generates uniformly distributed random samples from the half-open interval
`[low, high)`.
Example::
>>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0]))
>>> m.sample() # uniformly distributed in the range [0.0, 5.0)
2.3418
[torch.FloatTensor of size 1]
Args:
low (float or Tensor): lower range (inclusive).
high (float or Tensor): upper range (exclusive).
"""
# TODO allow (loc,scale) parameterization to allow independent constraints.
arg_constraints = {'low': constraints.dependent, 'high': constraints.dependent}
has_rsample = True
@property
def mean(self):
return (self.high + self.low) / 2
@property
def stddev(self):
return (self.high - self.low) / 12**0.5
@property
def variance(self):
return (self.high - self.low).pow(2) / 12
def __init__(self, low, high, validate_args=None):
self.low, self.high = broadcast_all(low, high)
if isinstance(low, Number) and isinstance(high, Number):
batch_shape = torch.Size()
else:
batch_shape = self.low.size()
super(Uniform, self).__init__(batch_shape, validate_args=validate_args)
if self._validate_args and not torch.lt(self.low, self.high).all():
raise ValueError("Uniform is not defined when low>= high")
@constraints.dependent_property
def support(self):
return constraints.interval(self.low, self.high)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
rand = self.low.new(shape).uniform_()
return self.low + rand * (self.high - self.low)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
lb = value.ge(self.low).type_as(self.low)
ub = value.lt(self.high).type_as(self.low)
return torch.log(lb.mul(ub)) - torch.log(self.high - self.low)
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
result = (value - self.low) / (self.high - self.low)
return result.clamp(min=0, max=1)
def icdf(self, value):
if self._validate_args:
self._validate_sample(value)
result = value * (self.high - self.low) + self.low
return result
def entropy(self):
return torch.log(self.high - self.low) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/uniform.py | 0.865196 | 0.563438 | uniform.py | pypi |
import math
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import lazy_property
def _get_batch_shape(bmat, bvec):
r"""
Given a batch of matrices and a batch of vectors, compute the combined `batch_shape`.
"""
try:
vec_shape = torch._C._infer_size(bvec.shape, bmat.shape[:-1])
except RuntimeError:
raise ValueError("Incompatible batch shapes: vector {}, matrix {}".format(bvec.shape, bmat.shape))
return torch.Size(vec_shape[:-1])
def _batch_mv(bmat, bvec):
r"""
Performs a batched matrix-vector product, with compatible but different batch shapes.
This function takes as input `bmat`, containing :math:`n \times n` matrices, and
`bvec`, containing length :math:`n` vectors.
Both `bmat` and `bvec` may have any number of leading dimensions, which correspond
to a batch shape. They are not necessarily assumed to have the same batch shape,
just ones which can be broadcasted.
"""
n = bvec.size(-1)
batch_shape = _get_batch_shape(bmat, bvec)
# to conform with `torch.bmm` interface, both bmat and bvec should have `.dim() == 3`
bmat = bmat.expand(batch_shape + (n, n)).reshape((-1, n, n))
bvec = bvec.unsqueeze(-1).expand(batch_shape + (n, 1)).reshape((-1, n, 1))
return torch.bmm(bmat, bvec).view(batch_shape + (n,))
def _batch_potrf_lower(bmat):
r"""
Applies a Cholesky decomposition to all matrices in a batch of arbitrary shape.
"""
n = bmat.size(-1)
cholesky = torch.stack([C.potrf(upper=False) for C in bmat.reshape((-1, n, n))])
return cholesky.view(bmat.shape)
def _batch_diag(bmat):
r"""
Returns the diagonals of a batch of square matrices.
"""
return bmat.reshape(bmat.shape[:-2] + (-1,))[..., ::bmat.size(-1) + 1]
def _batch_inverse(bmat):
r"""
Returns the inverses of a batch of square matrices.
"""
n = bmat.size(-1)
flat_bmat = bmat.reshape(-1, n, n)
flat_inv_bmat = torch.stack([m.inverse() for m in flat_bmat], 0)
return flat_inv_bmat.view(bmat.shape)
def _batch_mahalanobis(L, x):
r"""
Computes the squared Mahalanobis distance :math:`\mathbf{x}^\top\mathbf{M}^{-1}\mathbf{x}`
for a factored :math:`\mathbf{M} = \mathbf{L}\mathbf{L}^\top`.
Accepts batches for both L and x.
"""
# TODO: use `torch.potrs` or similar once a backwards pass is implemented.
flat_L = L.unsqueeze(0).reshape((-1,) + L.shape[-2:])
L_inv = torch.stack([torch.inverse(Li.t()) for Li in flat_L]).view(L.shape)
return (x.unsqueeze(-1) * L_inv).sum(-2).pow(2.0).sum(-1)
class MultivariateNormal(Distribution):
r"""
Creates a multivariate normal (also called Gaussian) distribution
parameterized by a mean vector and a covariance matrix.
The multivariate normal distribution can be parameterized either
in terms of a positive definite covariance matrix :math:`\mathbf{\Sigma}`
or a positive definite precition matrix :math:`\mathbf{\Sigma}^{-1}`
or a lower-triangular matrix :math:`\mathbf{L}` with positive-valued
diagonal entries, such that
:math:`\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top`. This triangular matrix
can be obtained via e.g. Cholesky decomposition of the covariance.
Example:
>>> m = MultivariateNormal(torch.zeros(2), torch.eye(2))
>>> m.sample() # normally distributed with mean=`[0,0]` and covariance_matrix=`I`
-0.2102
-0.5429
[torch.FloatTensor of size 2]
Args:
loc (Tensor): mean of the distribution
covariance_matrix (Tensor): positive-definite covariance matrix
precision_matrix (Tensor): positive-definite precision matrix
scale_tril (Tensor): lower-triangular factor of covariance, with positive-valued diagonal
Note:
Only one of :attr:`covariance_matrix` or :attr:`precision_matrix` or
:attr:`scale_tril` can be specified.
Using :attr:`scale_tril` will be more efficient: all computations internally
are based on :attr:`scale_tril`. If :attr:`covariance_matrix` or
:attr:`precision_matrix` is passed instead, it is only used to compute
the corresponding lower triangular matrices using a Cholesky decomposition.
"""
arg_constraints = {'loc': constraints.real_vector,
'covariance_matrix': constraints.positive_definite,
'precision_matrix': constraints.positive_definite,
'scale_tril': constraints.lower_cholesky}
support = constraints.real
has_rsample = True
def __init__(self, loc, covariance_matrix=None, precision_matrix=None, scale_tril=None, validate_args=None):
event_shape = torch.Size(loc.shape[-1:])
if (covariance_matrix is not None) + (scale_tril is not None) + (precision_matrix is not None) != 1:
raise ValueError("Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified.")
if scale_tril is not None:
if scale_tril.dim() < 2:
raise ValueError("scale_tril matrix must be at least two-dimensional, "
"with optional leading batch dimensions")
self.scale_tril = scale_tril
batch_shape = _get_batch_shape(scale_tril, loc)
elif covariance_matrix is not None:
if covariance_matrix.dim() < 2:
raise ValueError("covariance_matrix must be at least two-dimensional, "
"with optional leading batch dimensions")
self.covariance_matrix = covariance_matrix
batch_shape = _get_batch_shape(covariance_matrix, loc)
else:
if precision_matrix.dim() < 2:
raise ValueError("precision_matrix must be at least two-dimensional, "
"with optional leading batch dimensions")
self.precision_matrix = precision_matrix
self.covariance_matrix = _batch_inverse(precision_matrix)
batch_shape = _get_batch_shape(precision_matrix, loc)
self.loc = loc
super(MultivariateNormal, self).__init__(batch_shape, event_shape, validate_args=validate_args)
@lazy_property
def scale_tril(self):
return _batch_potrf_lower(self.covariance_matrix)
@lazy_property
def covariance_matrix(self):
return torch.matmul(self.scale_tril, self.scale_tril.transpose(-1, -2))
@lazy_property
def precision_matrix(self):
# TODO: use `torch.potri` on `scale_tril` once a backwards pass is implemented.
scale_tril_inv = _batch_inverse(self.scale_tril)
return torch.matmul(scale_tril_inv.transpose(-1, -2), scale_tril_inv)
@property
def mean(self):
return self.loc
@property
def variance(self):
n = self.covariance_matrix.size(-1)
var = torch.stack([cov.diag() for cov in self.covariance_matrix.view(-1, n, n)])
return var.view(self.covariance_matrix.size()[:-1])
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = self.loc.new(*shape).normal_()
return self.loc + _batch_mv(self.scale_tril, eps)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
diff = value - self.loc
M = _batch_mahalanobis(self.scale_tril, diff)
log_det = _batch_diag(self.scale_tril).abs().log().sum(-1)
return -0.5 * (M + self.loc.size(-1) * math.log(2 * math.pi)) - log_det
def entropy(self):
log_det = _batch_diag(self.scale_tril).abs().log().sum(-1)
H = 0.5 * (1.0 + math.log(2 * math.pi)) * self._event_shape[0] + log_det
if len(self._batch_shape) == 0:
return H
else:
return H.expand(self._batch_shape) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/multivariate_normal.py | 0.88996 | 0.795102 | multivariate_normal.py | pypi |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all
class Beta(ExponentialFamily):
r"""
Beta distribution parameterized by `concentration1` and `concentration0`.
Example::
>>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5]))
>>> m.sample() # Beta distributed with concentration concentration1 and concentration0
0.1046
[torch.FloatTensor of size 1]
Args:
concentration1 (float or Tensor): 1st concentration parameter of the distribution
(often referred to as alpha)
concentration0 (float or Tensor): 2nd concentration parameter of the distribution
(often referred to as beta)
"""
arg_constraints = {'concentration1': constraints.positive, 'concentration0': constraints.positive}
support = constraints.unit_interval
has_rsample = True
def __init__(self, concentration1, concentration0, validate_args=None):
if isinstance(concentration1, Number) and isinstance(concentration0, Number):
concentration1_concentration0 = torch.tensor([float(concentration1), float(concentration0)])
else:
concentration1, concentration0 = broadcast_all(concentration1, concentration0)
concentration1_concentration0 = torch.stack([concentration1, concentration0], -1)
self._dirichlet = Dirichlet(concentration1_concentration0)
super(Beta, self).__init__(self._dirichlet._batch_shape, validate_args=validate_args)
@property
def mean(self):
return self.concentration1 / (self.concentration1 + self.concentration0)
@property
def variance(self):
total = self.concentration1 + self.concentration0
return (self.concentration1 * self.concentration0 /
(total.pow(2) * (total + 1)))
def rsample(self, sample_shape=()):
value = self._dirichlet.rsample(sample_shape).select(-1, 0)
if isinstance(value, Number):
value = self._dirichlet.concentration.new_tensor(value)
return value
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
heads_tails = torch.stack([value, 1.0 - value], -1)
return self._dirichlet.log_prob(heads_tails)
def entropy(self):
return self._dirichlet.entropy()
@property
def concentration1(self):
result = self._dirichlet.concentration[..., 0]
if isinstance(result, Number):
return torch.Tensor([result])
else:
return result
@property
def concentration0(self):
result = self._dirichlet.concentration[..., 1]
if isinstance(result, Number):
return torch.Tensor([result])
else:
return result
@property
def _natural_params(self):
return (self.concentration1, self.concentration0)
def _log_normalizer(self, x, y):
return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/beta.py | 0.965536 | 0.59749 | beta.py | pypi |
from numbers import Number
import torch
import math
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.gamma import Gamma
from torch.distributions.utils import broadcast_all, _finfo
class FisherSnedecor(Distribution):
r"""
Creates a Fisher-Snedecor distribution parameterized by `df1` and `df2`.
Example::
>>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0]))
>>> m.sample() # Fisher-Snedecor-distributed with df1=1 and df2=2
0.2453
[torch.FloatTensor of size 1]
Args:
df1 (float or Tensor): degrees of freedom parameter 1
df2 (float or Tensor): degrees of freedom parameter 2
"""
arg_constraints = {'df1': constraints.positive, 'df2': constraints.positive}
support = constraints.positive
has_rsample = True
def __init__(self, df1, df2, validate_args=None):
self.df1, self.df2 = broadcast_all(df1, df2)
self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
self._gamma2 = Gamma(self.df2 * 0.5, self.df2)
if isinstance(df1, Number) and isinstance(df2, Number):
batch_shape = torch.Size()
else:
batch_shape = self.df1.size()
super(FisherSnedecor, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
df2 = self.df2.clone()
df2[df2 <= 2] = float('nan')
return df2 / (df2 - 2)
@property
def variance(self):
df2 = self.df2.clone()
df2[df2 <= 4] = float('nan')
return 2 * df2.pow(2) * (self.df1 + df2 - 2) / (self.df1 * (df2 - 2).pow(2) * (df2 - 4))
def rsample(self, sample_shape=torch.Size(())):
shape = self._extended_shape(sample_shape)
# X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
# Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
X1 = self._gamma1.rsample(sample_shape).view(shape)
X2 = self._gamma2.rsample(sample_shape).view(shape)
X2.clamp_(min=_finfo(X2).tiny)
Y = X1 / X2
Y.clamp_(min=_finfo(X2).tiny)
return Y
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
ct1 = self.df1 * 0.5
ct2 = self.df2 * 0.5
ct3 = self.df1 / self.df2
t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
return t1 + t2 - t3 | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/fishersnedecor.py | 0.943996 | 0.649718 | fishersnedecor.py | pypi |
import torch
from torch.distributions import constraints
from torch.distributions.categorical import Categorical
from torch.distributions.utils import clamp_probs, broadcast_all, log_sum_exp
from torch.distributions.distribution import Distribution
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import ExpTransform
class ExpRelaxedCategorical(Distribution):
r"""
Creates a ExpRelaxedCategorical parameterized by `probs` and `temperature`.
Returns the log of a point in the simplex. Based on the interface to OneHotCategorical.
Implementation based on [1].
See also: :func:`torch.distributions.OneHotCategorical`
Args:
temperature (Tensor): relaxation temperature
probs (Tensor): event probabilities
logits (Tensor): the log probability of each event.
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables
(Maddison et al, 2017)
[2] Categorical Reparametrization with Gumbel-Softmax
(Jang et al, 2017)
"""
arg_constraints = {'probs': constraints.simplex}
support = constraints.real
has_rsample = True
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
self._categorical = Categorical(probs, logits)
self.temperature = temperature
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super(ExpRelaxedCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@property
def param_shape(self):
return self._categorical.param_shape
@property
def logits(self):
return self._categorical.logits
@property
def probs(self):
return self._categorical.probs
def rsample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
uniforms = clamp_probs(self.logits.new(self._extended_shape(sample_shape)).uniform_())
gumbels = -((-(uniforms.log())).log())
scores = (self.logits + gumbels) / self.temperature
return scores - log_sum_exp(scores)
def log_prob(self, value):
K = self._categorical._num_events
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
log_scale = (self.temperature.new(self.temperature.shape).fill_(K).lgamma() -
self.temperature.log().mul(-(K - 1)))
score = logits - value.mul(self.temperature)
score = (score - log_sum_exp(score)).sum(-1)
return score + log_scale
class RelaxedOneHotCategorical(TransformedDistribution):
r"""
Creates a RelaxedOneHotCategorical distribution parametrized by `temperature` and either `probs` or `logits`.
This is a relaxed version of the `OneHotCategorical` distribution, so its
values are on simplex, and has reparametrizable samples.
Example::
>>> m = RelaxedOneHotCategorical(torch.tensor([2.2]),
torch.tensor([0.1, 0.2, 0.3, 0.4]))
>>> m.sample() # equal probability of 1, 1, 2, 3
0.1294
0.2324
0.3859
0.2523
[torch.FloatTensor of size 4]
Args:
temperature (Tensor): relaxation temperature
probs (Tensor): event probabilities
logits (Tensor): the log probability of each event.
"""
arg_constraints = {'probs': constraints.simplex}
support = constraints.simplex
has_rsample = True
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
super(RelaxedOneHotCategorical, self).__init__(ExpRelaxedCategorical(temperature, probs, logits),
ExpTransform(), validate_args=validate_args)
@property
def temperature(self):
return self.base_dist.temperature
@property
def logits(self):
return self.base_dist.logits
@property
def probs(self):
return self.base_dist.probs | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/relaxed_categorical.py | 0.956887 | 0.74934 | relaxed_categorical.py | pypi |
import math
import numbers
import weakref
import torch
from torch.distributions import constraints
from torch.distributions.utils import (_sum_rightmost, broadcast_all,
lazy_property)
from torch.nn.functional import pad, sigmoid
__all__ = [
'AbsTransform',
'AffineTransform',
'ComposeTransform',
'ExpTransform',
'LowerCholeskyTransform',
'PowerTransform',
'SigmoidTransform',
'SoftmaxTransform',
'StickBreakingTransform',
'Transform',
'identity_transform',
]
class Transform(object):
"""
Abstract class for invertable transformations with computable log
det jacobians. They are primarily used in
:class:`torch.distributions.TransformedDistribution`.
Caching is useful for tranforms whose inverses are either expensive or
numerically unstable. Note that care must be taken with memoized values
since the autograd graph may be reversed. For example while the following
works with or without caching::
y = t(x)
t.log_abs_det_jacobian(x, y).backward() # x will receive gradients.
However the following will error when caching due to dependency reversal::
y = t(x)
z = t.inv(y)
grad(z.sum(), [y]) # error because z is x
Derived classes should implement one or both of :meth:`_call` or
:meth:`_inverse`. Derived classes that set `bijective=True` should also
implement :meth:`log_abs_det_jacobian`.
Args:
cache_size (int): Size of cache. If zero, no caching is done. If one,
the latest single value is cached. Only 0 and 1 are supported.
Attributes:
domain (:class:`~torch.distributions.constraints.Constraint`):
The constraint representing valid inputs to this transform.
codomain (:class:`~torch.distributions.constraints.Constraint`):
The constraint representing valid outputs to this transform
which are inputs to the inverse transform.
bijective (bool): Whether this transform is bijective. A transform
``t`` is bijective iff ``t.inv(t(x)) == x`` and
``t(t.inv(y)) == y`` for every ``x`` in the domain and ``y`` in
the codomain. Transforms that are not bijective should at least
maintain the weaker pseudoinverse properties
``t(t.inv(t(x)) == t(x)`` and ``t.inv(t(t.inv(y))) == t.inv(y)``.
sign (int or Tensor): For bijective univariate transforms, this
should be +1 or -1 depending on whether transform is monotone
increasing or decreasing.
event_dim (int): Number of dimensions that are correlated together in
the transform ``event_shape``. This should be 0 for pointwise
transforms, 1 for transforms that act jointly on vectors, 2 for
transforms that act jointly on matrices, etc.
"""
bijective = False
event_dim = 0
def __init__(self, cache_size=0):
self._cache_size = cache_size
self._inv = None
if cache_size == 0:
pass # default behavior
elif cache_size == 1:
self._cached_x_y = None, None
else:
raise ValueError('cache_size must be 0 or 1')
@property
def inv(self):
"""
Returns the inverse :class:`Transform` of this transform.
This should satisfy ``t.inv.inv is t``.
"""
inv = None
if self._inv is not None:
inv = self._inv()
if inv is None:
inv = _InverseTransform(self)
self._inv = weakref.ref(inv)
return inv
@property
def sign(self):
"""
Returns the sign of the determinant of the Jacobian, if applicable.
In general this only makes sense for bijective transforms.
"""
raise NotImplementedError
def __eq__(self, other):
return self is other
def __ne__(self, other):
# Necessary for Python2
return not self.__eq__(other)
def __call__(self, x):
"""
Computes the transform `x => y`.
"""
if self._cache_size == 0:
return self._call(x)
x_old, y_old = self._cached_x_y
if x is x_old:
return y_old
y = self._call(x)
self._cached_x_y = x, y
return y
def _inv_call(self, y):
"""
Inverts the transform `y => x`.
"""
if self._cache_size == 0:
return self._inverse(y)
x_old, y_old = self._cached_x_y
if y is y_old:
return x_old
x = self._inverse(y)
self._cached_x_y = x, y
return x
def _call(self, x):
"""
Abstract method to compute forward transformation.
"""
raise NotImplementedError
def _inverse(self, y):
"""
Abstract method to compute inverse transformation.
"""
raise NotImplementedError
def log_abs_det_jacobian(self, x, y):
"""
Computes the log det jacobian `log |dy/dx|` given input and output.
"""
raise NotImplementedError
class _InverseTransform(Transform):
"""
Inverts a single :class:`Transform`.
This class is private; please instead use the ``Transform.inv`` property.
"""
def __init__(self, transform):
super(_InverseTransform, self).__init__()
self._inv = transform
@constraints.dependent_property
def domain(self):
return self._inv.codomain
@constraints.dependent_property
def codomain(self):
return self._inv.domain
@property
def bijective(self):
return self._inv.bijective
@property
def sign(self):
return self._inv.sign
@property
def event_dim(self):
return self._inv.event_dim
@property
def inv(self):
return self._inv
def __eq__(self, other):
if not isinstance(other, _InverseTransform):
return False
return self._inv == other._inv
def __call__(self, x):
return self._inv._inv_call(x)
def log_abs_det_jacobian(self, x, y):
return -self._inv.log_abs_det_jacobian(y, x)
class ComposeTransform(Transform):
"""
Composes multiple transforms in a chain.
The transforms being composed are responsible for caching.
Args:
parts (list of :class:`Transform`): A list of transforms to compose.
"""
def __init__(self, parts):
super(ComposeTransform, self).__init__()
self.parts = parts
def __eq__(self, other):
if not isinstance(other, ComposeTransform):
return False
return self.parts == other.parts
@constraints.dependent_property
def domain(self):
if not self.parts:
return constraints.real
return self.parts[0].domain
@constraints.dependent_property
def codomain(self):
if not self.parts:
return constraints.real
return self.parts[-1].codomain
@lazy_property
def bijective(self):
return all(p.bijective for p in self.parts)
@lazy_property
def sign(self):
sign = 1
for p in self.parts:
sign = sign * p.sign
return sign
@lazy_property
def event_dim(self):
return max(p.event_dim for p in self.parts) if self.parts else 0
@property
def inv(self):
inv = None
if self._inv is not None:
inv = self._inv()
if inv is None:
inv = ComposeTransform([p.inv for p in reversed(self.parts)])
self._inv = weakref.ref(inv)
inv._inv = weakref.ref(self)
return inv
def __call__(self, x):
for part in self.parts:
x = part(x)
return x
def log_abs_det_jacobian(self, x, y):
if not self.parts:
return torch.zeros_like(x)
result = 0
for part in self.parts:
y = part(x)
result = result + _sum_rightmost(part.log_abs_det_jacobian(x, y),
self.event_dim - part.event_dim)
x = y
return result
identity_transform = ComposeTransform([])
class ExpTransform(Transform):
r"""
Transform via the mapping :math:`y = \exp(x)`.
"""
domain = constraints.real
codomain = constraints.positive
bijective = True
sign = +1
def __eq__(self, other):
return isinstance(other, ExpTransform)
def _call(self, x):
return x.exp()
def _inverse(self, y):
return y.log()
def log_abs_det_jacobian(self, x, y):
return x
class PowerTransform(Transform):
r"""
Transform via the mapping :math:`y = x^{\text{exponent}}`.
"""
domain = constraints.positive
codomain = constraints.positive
bijective = True
sign = +1
def __init__(self, exponent, cache_size=0):
super(PowerTransform, self).__init__(cache_size=cache_size)
self.exponent, = broadcast_all(exponent)
def __eq__(self, other):
if not isinstance(other, PowerTransform):
return False
return self.exponent.eq(other.exponent).all().item()
def _call(self, x):
return x.pow(self.exponent)
def _inverse(self, y):
return y.pow(1 / self.exponent)
def log_abs_det_jacobian(self, x, y):
return (self.exponent * y / x).abs().log()
class SigmoidTransform(Transform):
r"""
Transform via the mapping :math:`y = \frac{1}{1 + \exp(-x)}` and :math:`x = \text{logit}(y)`.
"""
domain = constraints.real
codomain = constraints.unit_interval
bijective = True
sign = +1
def __eq__(self, other):
return isinstance(other, SigmoidTransform)
def _call(self, x):
return sigmoid(x)
def _inverse(self, y):
return y.log() - (-y).log1p()
def log_abs_det_jacobian(self, x, y):
return -(y.reciprocal() + (1 - y).reciprocal()).log()
class AbsTransform(Transform):
r"""
Transform via the mapping :math:`y = |x|`.
"""
domain = constraints.real
codomain = constraints.positive
def __eq__(self, other):
return isinstance(other, AbsTransform)
def _call(self, x):
return x.abs()
def _inverse(self, y):
return y
class AffineTransform(Transform):
r"""
Transform via the pointwise affine mapping :math:`y = \text{loc} + \text{scale} \times x`.
Args:
loc (Tensor or float): Location parameter.
scale (Tensor or float): Scale parameter.
event_dim (int): Optional size of `event_shape`. This should be zero
for univariate random variables, 1 for distributions over vectors,
2 for distributions over matrices, etc.
"""
domain = constraints.real
codomain = constraints.real
bijective = True
def __init__(self, loc, scale, event_dim=0, cache_size=0):
super(AffineTransform, self).__init__(cache_size=cache_size)
self.loc = loc
self.scale = scale
self.event_dim = event_dim
def __eq__(self, other):
if not isinstance(other, AffineTransform):
return False
if isinstance(self.loc, numbers.Number) and isinstance(other.loc, numbers.Number):
if self.loc != other.loc:
return False
else:
if not (self.loc == other.loc).all().item():
return False
if isinstance(self.scale, numbers.Number) and isinstance(other.scale, numbers.Number):
if self.scale != other.scale:
return False
else:
if not (self.scale == other.scale).all().item():
return False
return True
@property
def sign(self):
if isinstance(self.scale, numbers.Number):
return 1 if self.scale > 0 else -1 if self.scale < 0 else 0
return self.scale.sign()
def _call(self, x):
return self.loc + self.scale * x
def _inverse(self, y):
return (y - self.loc) / self.scale
def log_abs_det_jacobian(self, x, y):
shape = x.shape
scale = self.scale
if isinstance(scale, numbers.Number):
result = x.new_empty(shape).fill_(math.log(abs(scale)))
else:
result = torch.abs(scale).log()
if self.event_dim:
result_size = result.size()[:-self.event_dim] + (-1,)
result = result.view(result_size).sum(-1)
shape = shape[:-self.event_dim]
return result.expand(shape)
class SoftmaxTransform(Transform):
r"""
Transform from unconstrained space to the simplex via :math:`y = \exp(x)` then
normalizing.
This is not bijective and cannot be used for HMC. However this acts mostly
coordinate-wise (except for the final normalization), and thus is
appropriate for coordinate-wise optimization algorithms.
"""
domain = constraints.real
codomain = constraints.simplex
event_dim = 1
def __eq__(self, other):
return isinstance(other, SoftmaxTransform)
def _call(self, x):
logprobs = x
probs = (logprobs - logprobs.max(-1, True)[0]).exp()
return probs / probs.sum(-1, True)
def _inverse(self, y):
probs = y
return probs.log()
class StickBreakingTransform(Transform):
"""
Transform from unconstrained space to the simplex of one additional
dimension via a stick-breaking process.
This transform arises as an iterated sigmoid transform in a stick-breaking
construction of the `Dirichlet` distribution: the first logit is
transformed via sigmoid to the first probability and the probability of
everything else, and then the process recurses.
This is bijective and appropriate for use in HMC; however it mixes
coordinates together and is less appropriate for optimization.
"""
domain = constraints.real
codomain = constraints.simplex
bijective = True
event_dim = 1
def __eq__(self, other):
return isinstance(other, StickBreakingTransform)
def _call(self, x):
offset = (x.shape[-1] + 1) - x.new([1]).expand(x.shape).cumsum(-1)
z = sigmoid(x - offset.log())
z_cumprod = (1 - z).cumprod(-1)
y = pad(z, (0, 1), value=1) * pad(z_cumprod, (1, 0), value=1)
return y
def _inverse(self, y):
shape = y.shape[:-1] + (y.shape[-1] - 1,)
offset = (shape[-1] + 1) - y.new([1]).expand(shape).cumsum(-1)
sf = (1 - y.cumsum(-1))[..., :-1]
x = y[..., :-1].log() - sf.log() + offset.log()
return x
def log_abs_det_jacobian(self, x, y):
offset = (x.shape[-1] + 1) - x.new([1]).expand(x.shape).cumsum(-1)
z = sigmoid(x - offset.log())
detJ = ((1 - z).log() + y[..., :-1].log()).sum(-1)
return detJ
class LowerCholeskyTransform(Transform):
"""
Transform from unconstrained matrices to lower-triangular matrices with
nonnegative diagonal entries.
This is useful for parameterizing positive definite matrices in terms of
their Cholesky factorization.
"""
domain = constraints.real
codomain = constraints.lower_cholesky
event_dim = 2
def __eq__(self, other):
return isinstance(other, LowerCholeskyTransform)
def _call_on_event(self, x):
return x.tril(-1) + x.diag().exp().diag()
def _inverse_on_event(self, y):
return y.tril(-1) + y.diag().log().diag()
def _call(self, x):
flat_x = x.contiguous().view((-1,) + x.shape[-2:])
return torch.stack([self._call_on_event(z) for z in flat_x]).view(x.shape)
def _inverse(self, y):
flat_y = y.contiguous().view((-1,) + y.shape[-2:])
return torch.stack([self._inverse_on_event(z) for z in flat_y]).view(y.shape) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/transforms.py | 0.928051 | 0.637369 | transforms.py | pypi |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import _finfo, broadcast_all
class Laplace(Distribution):
r"""
Creates a Laplace distribution parameterized by `loc` and 'scale'.
Example::
>>> m = Laplace(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # Laplace distributed with loc=0, scale=1
0.1046
[torch.FloatTensor of size 1]
Args:
loc (float or Tensor): mean of the distribution
scale (float or Tensor): scale of the distribution
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
has_rsample = True
@property
def mean(self):
return self.loc
@property
def variance(self):
return 2 * self.scale.pow(2)
@property
def stddev(self):
return (2 ** 0.5) * self.scale
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Laplace, self).__init__(batch_shape, validate_args=validate_args)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
u = self.loc.new(shape).uniform_(_finfo(self.loc).eps - 1, 1)
# TODO: If we ever implement tensor.nextafter, below is what we want ideally.
# u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1(-(value - self.loc).abs() / self.scale)
def icdf(self, value):
if self._validate_args:
self._validate_sample(value)
term = value - 0.5
return self.loc - self.scale * (term).sign() * torch.log1p(-2 * term.abs())
def entropy(self):
return 1 + torch.log(2 * self.scale) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/laplace.py | 0.897891 | 0.494019 | laplace.py | pypi |
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import _sum_rightmost
class Independent(Distribution):
r"""
Reinterprets some of the batch dims of a distribution as event dims.
This is mainly useful for changing the shape of the result of
:meth:`log_prob`. For example to create a diagonal Normal distribution with
the same shape as a Multivariate Normal distribution (so they are
interchangeable), you can::
>>> loc = torch.zeros(3)
>>> scale = torch.ones(3)
>>> mvn = MultivariateNormal(loc, scale_tril=torch.diag(scale))
>>> [mvn.batch_shape, mvn.event_shape]
[torch.Size(()), torch.Size((3,))]
>>> normal = Normal(loc, scale)
>>> [normal.batch_shape, normal.event_shape]
[torch.Size((3,)), torch.Size(())]
>>> diagn = Independent(normal, 1)
>>> [diagn.batch_shape, diagn.event_shape]
[torch.Size(()), torch.Size((3,))]
Args:
base_distribution (torch.distributions.distribution.Distribution): a
base distribution
reinterpreted_batch_ndims (int): the number of batch dims to
reinterpret as event dims
"""
arg_constraints = {}
def __init__(self, base_distribution, reinterpreted_batch_ndims, validate_args=None):
if reinterpreted_batch_ndims > len(base_distribution.batch_shape):
raise ValueError("Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), "
"actual {} vs {}".format(reinterpreted_batch_ndims,
len(base_distribution.batch_shape)))
shape = base_distribution.batch_shape + base_distribution.event_shape
event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape)
batch_shape = shape[:len(shape) - event_dim]
event_shape = shape[len(shape) - event_dim:]
self.base_dist = base_distribution
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
super(Independent, self).__init__(batch_shape, event_shape, validate_args=validate_args)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
if self.reinterpreted_batch_ndims > 0:
return False
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
log_prob = self.base_dist.log_prob(value)
return _sum_rightmost(log_prob, self.reinterpreted_batch_ndims)
def entropy(self):
entropy = self.base_dist.entropy()
return _sum_rightmost(entropy, self.reinterpreted_batch_ndims)
def enumerate_support(self):
if self.reinterpreted_batch_ndims > 0:
raise NotImplementedError("Enumeration over cartesian product is not implemented")
return self.base_dist.enumerate_support() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/independent.py | 0.945588 | 0.67818 | independent.py | pypi |
import torch
from torch.distributions.distribution import Distribution
from torch.distributions import Categorical
from numbers import Number
from torch.distributions import constraints
from torch.distributions.utils import broadcast_all
class Multinomial(Distribution):
r"""
Creates a Multinomial distribution parameterized by `total_count` and
either `probs` or `logits` (but not both). The innermost dimension of
`probs` indexes over categories. All other dimensions index over batches.
Note that `total_count` need not be specified if only :meth:`log_prob` is
called (see example below)
.. note:: :attr:`probs` will be normalized to be summing to 1.
- :meth:`sample` requires a single shared `total_count` for all
parameters and samples.
- :meth:`log_prob` allows different `total_count` for each parameter and
sample.
Example::
>>> m = Multinomial(100, torch.tensor([ 1, 1, 1, 1]))
>>> x = m.sample() # equal probability of 0, 1, 2, 3
21
24
30
25
[torch.FloatTensor of size 4]]
>>> Multinomial(probs=torch.tensor([1, 1, 1, 1])).log_prob(x)
-4.1338
[torch.FloatTensor of size 1]
Args:
total_count (int): number of trials
probs (Tensor): event probabilities
logits (Tensor): event log probabilities
"""
arg_constraints = {'logits': constraints.real} # Let logits be the canonical parameterization.
@property
def mean(self):
return self.probs * self.total_count
@property
def variance(self):
return self.total_count * self.probs * (1 - self.probs)
def __init__(self, total_count=1, probs=None, logits=None, validate_args=None):
if not isinstance(total_count, Number):
raise NotImplementedError('inhomogeneous total_count is not supported')
self.total_count = total_count
self._categorical = Categorical(probs=probs, logits=logits)
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super(Multinomial, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@constraints.dependent_property
def support(self):
return constraints.integer_interval(0, self.total_count)
@property
def logits(self):
return self._categorical.logits
@property
def probs(self):
return self._categorical.probs
@property
def param_shape(self):
return self._categorical.param_shape
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
samples = self._categorical.sample(torch.Size((self.total_count,)) + sample_shape)
# samples.shape is (total_count, sample_shape, batch_shape), need to change it to
# (sample_shape, batch_shape, total_count)
shifted_idx = list(range(samples.dim()))
shifted_idx.append(shifted_idx.pop(0))
samples = samples.permute(*shifted_idx)
counts = samples.new(self._extended_shape(sample_shape)).zero_()
counts.scatter_add_(-1, samples, torch.ones_like(samples))
return counts.type_as(self.probs)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits.clone(), value)
log_factorial_n = torch.lgamma(value.sum(-1) + 1)
log_factorial_xs = torch.lgamma(value + 1).sum(-1)
logits[(value == 0) & (logits == -float('inf'))] = 0
log_powers = (logits * value).sum(-1)
return log_factorial_n - log_factorial_xs + log_powers | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/multinomial.py | 0.946584 | 0.752649 | multinomial.py | pypi |
from numbers import Number
import torch
import math
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions import Chi2
from torch.distributions.utils import broadcast_all
class StudentT(Distribution):
r"""
Creates a Student's t-distribution parameterized by `df`.
Example::
>>> m = StudentT(torch.tensor([2.0]))
>>> m.sample() # Student's t-distributed with degrees of freedom=2
0.1046
[torch.FloatTensor of size 1]
Args:
df (float or Tensor): degrees of freedom
"""
arg_constraints = {'df': constraints.positive, 'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
has_rsample = True
@property
def mean(self):
m = self.loc.clone()
m[self.df <= 1] = float('nan')
return m
@property
def variance(self):
m = self.df.clone()
m[self.df > 2] = self.scale[self.df > 2].pow(2) * self.df[self.df > 2] / (self.df[self.df > 2] - 2)
m[(self.df <= 2) & (self.df > 1)] = float('inf')
m[self.df <= 1] = float('nan')
return m
def __init__(self, df, loc=0., scale=1., validate_args=None):
self.df, self.loc, self.scale = broadcast_all(df, loc, scale)
self._chi2 = Chi2(df)
batch_shape = torch.Size() if isinstance(df, Number) else self.df.size()
super(StudentT, self).__init__(batch_shape, validate_args=validate_args)
def rsample(self, sample_shape=torch.Size()):
# NOTE: This does not agree with scipy implementation as much as other distributions.
# (see https://github.com/fritzo/notebooks/blob/master/debug-student-t.ipynb). Using DoubleTensor
# parameters seems to help.
# X ~ Normal(0, 1)
# Z ~ Chi2(df)
# Y = X / sqrt(Z / df) ~ StudentT(df)
shape = self._extended_shape(sample_shape)
X = self.df.new(shape).normal_()
Z = self._chi2.rsample(sample_shape)
Y = X * torch.rsqrt(Z / self.df)
return self.loc + self.scale * Y
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
y = (value - self.loc) / self.scale
Z = (self.scale.log() +
0.5 * self.df.log() +
0.5 * math.log(math.pi) +
torch.lgamma(0.5 * self.df) -
torch.lgamma(0.5 * (self.df + 1.)))
return -0.5 * (self.df + 1.) * torch.log1p(y**2. / self.df) - Z
def entropy(self):
lbeta = torch.lgamma(0.5 * self.df) + math.lgamma(0.5) - torch.lgamma(0.5 * (self.df + 1))
return (self.scale.log() +
0.5 * (self.df + 1) *
(torch.digamma(0.5 * (self.df + 1)) - torch.digamma(0.5 * self.df)) +
0.5 * self.df.log() + lbeta) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/studentT.py | 0.953232 | 0.473049 | studentT.py | pypi |
from collections import namedtuple
from functools import update_wrapper
from numbers import Number
import math
import torch
import torch.nn.functional as F
# This follows semantics of numpy.finfo.
_Finfo = namedtuple('_Finfo', ['eps', 'tiny'])
_FINFO = {
torch.HalfStorage: _Finfo(eps=0.00097656, tiny=6.1035e-05),
torch.FloatStorage: _Finfo(eps=1.19209e-07, tiny=1.17549e-38),
torch.DoubleStorage: _Finfo(eps=2.22044604925e-16, tiny=2.22507385851e-308),
torch.cuda.HalfStorage: _Finfo(eps=0.00097656, tiny=6.1035e-05),
torch.cuda.FloatStorage: _Finfo(eps=1.19209e-07, tiny=1.17549e-38),
torch.cuda.DoubleStorage: _Finfo(eps=2.22044604925e-16, tiny=2.22507385851e-308),
}
def _finfo(tensor):
r"""
Return floating point info about a `Tensor`:
- `.eps` is the smallest number that can be added to 1 without being lost.
- `.tiny` is the smallest positive number greater than zero
(much smaller than `.eps`).
Args:
tensor (Tensor): tensor of floating point data.
Returns:
_Finfo: a `namedtuple` with fields `.eps` and `.tiny`.
"""
return _FINFO[tensor.storage_type()]
def expand_n(v, n):
r"""
Cleanly expand float or Tensor parameters.
"""
if isinstance(v, Number):
return torch.Tensor([v]).expand(n, 1)
else:
return v.expand(n, *v.size())
def _broadcast_shape(shapes):
r"""
Given a list of tensor sizes, returns the size of the resulting broadcasted
tensor.
Args:
shapes (list of torch.Size): list of tensor sizes
"""
shape = torch.Size()
for s in shapes:
shape = torch._C._infer_size(s, shape)
return shape
def broadcast_all(*values):
r"""
Given a list of values (possibly containing numbers), returns a list where each
value is broadcasted based on the following rules:
- `torch.Tensor` instances are broadcasted as per the `broadcasting rules
<http://pytorch.org/docs/master/notes/broadcasting.html>`_
- numbers.Number instances (scalars) are upcast to Tensors having
the same size and type as the first tensor passed to `values`. If all the
values are scalars, then they are upcasted to Tensors having size
`(1,)`.
Args:
values (list of `numbers.Number` or `torch.Tensor`)
Raises:
ValueError: if any of the values is not a `numbers.Number` or
`torch.Tensor` instance
"""
values = list(values)
scalar_idxs = [i for i in range(len(values)) if isinstance(values[i], Number)]
tensor_idxs = [i for i in range(len(values)) if isinstance(values[i], torch.Tensor)]
if len(scalar_idxs) + len(tensor_idxs) != len(values):
raise ValueError('Input arguments must all be instances of numbers.Number or torch.Tensor.')
if tensor_idxs:
broadcast_shape = _broadcast_shape([values[i].size() for i in tensor_idxs])
for idx in tensor_idxs:
values[idx] = values[idx].expand(broadcast_shape)
template = values[tensor_idxs[0]]
for idx in scalar_idxs:
values[idx] = template.new(template.size()).fill_(values[idx])
else:
for idx in scalar_idxs:
values[idx] = torch.tensor(float(values[idx]))
return values
def _sum_rightmost(value, dim):
r"""
Sum out ``dim`` many rightmost dimensions of a given tensor.
Args:
value (Tensor): A tensor of ``.dim()`` at least ``dim``.
dim (int): The number of rightmost dims to sum out.
"""
if dim == 0:
return value
return value.contiguous().view(value.shape[:-dim] + (-1,)).sum(-1)
def softmax(tensor):
r"""
Returns the result with softmax applied to :attr:`tensor` along the last
dimension.
"""
return F.softmax(tensor, -1)
def log_sum_exp(tensor, keepdim=True):
r"""
Numerically stable implementation for the `LogSumExp` operation. The
summing is done along the last dimension.
Args:
tensor (torch.Tensor)
keepdim (Boolean): Whether to retain the last dimension on summing.
"""
max_val = tensor.max(dim=-1, keepdim=True)[0]
return max_val + (tensor - max_val).exp().sum(dim=-1, keepdim=keepdim).log()
def logits_to_probs(logits, is_binary=False):
r"""
Converts a tensor of logits into probabilities. Note that for the
binary case, each value denotes log odds, whereas for the
multi-dimensional case, the values along the last dimension denote
the log probabilities (possibly unnormalized) of the events.
"""
if is_binary:
return F.sigmoid(logits)
return softmax(logits)
def clamp_probs(probs):
eps = _finfo(probs).eps
return probs.clamp(min=eps, max=1 - eps)
def probs_to_logits(probs, is_binary=False):
r"""
Converts a tensor of probabilities into logits. For the binary case,
this denotes the probability of occurrence of the event indexed by `1`.
For the multi-dimensional case, the values along the last dimension
denote the probabilities of occurrence of each of the events.
"""
ps_clamped = clamp_probs(probs)
if is_binary:
return torch.log(ps_clamped) - torch.log1p(-ps_clamped)
return torch.log(ps_clamped)
def batch_tril(bmat, diagonal=0):
"""
Given a batch of matrices, returns the lower triangular part of each matrix, with
the other entries set to 0. The argument `diagonal` has the same meaning as in
`torch.tril`.
"""
if bmat.dim() == 2:
return bmat.tril(diagonal=diagonal)
else:
return bmat * torch.tril(bmat.new(*bmat.shape[-2:]).fill_(1.0), diagonal=diagonal)
class lazy_property(object):
r"""
Used as a decorator for lazy loading of class attributes. This uses a
non-data descriptor that calls the wrapped method to compute the property on
first call; thereafter replacing the wrapped method into an instance
attribute.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
update_wrapper(self, wrapped)
def __get__(self, instance, obj_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/utils.py | 0.940993 | 0.583856 | utils.py | pypi |
from numbers import Number
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import _finfo, broadcast_all
def _dirichlet_sample_nograd(concentration):
probs = torch._standard_gamma(concentration)
probs /= probs.sum(-1, True)
eps = _finfo(probs).eps
return probs.clamp_(min=eps, max=1 - eps)
# This helper is exposed for testing.
def _Dirichlet_backward(x, concentration, grad_output):
total = concentration.sum(-1, True).expand_as(concentration)
grad = torch._dirichlet_grad(x, concentration, total)
return grad * (grad_output - (x * grad_output).sum(-1, True))
class _Dirichlet(Function):
@staticmethod
def forward(ctx, concentration):
x = _dirichlet_sample_nograd(concentration)
ctx.save_for_backward(x, concentration)
return x
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
x, concentration = ctx.saved_tensors
return _Dirichlet_backward(x, concentration, grad_output)
class Dirichlet(ExponentialFamily):
r"""
Creates a Dirichlet distribution parameterized by concentration `concentration`.
Example::
>>> m = Dirichlet(torch.tensor([0.5, 0.5]))
>>> m.sample() # Dirichlet distributed with concentrarion concentration
0.1046
0.8954
[torch.FloatTensor of size 2]
Args:
concentration (Tensor): concentration parameter of the distribution
(often referred to as alpha)
"""
arg_constraints = {'concentration': constraints.positive}
support = constraints.simplex
has_rsample = True
def __init__(self, concentration, validate_args=None):
self.concentration, = broadcast_all(concentration)
batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:]
super(Dirichlet, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def rsample(self, sample_shape=()):
shape = self._extended_shape(sample_shape)
concentration = self.concentration.expand(shape)
if isinstance(concentration, torch.Tensor):
return _Dirichlet.apply(concentration)
return _dirichlet_sample_nograd(concentration)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
return self.concentration / self.concentration.sum(-1, True)
@property
def variance(self):
con0 = self.concentration.sum(-1, True)
return self.concentration * (con0 - self.concentration) / (con0.pow(2) * (con0 + 1))
def entropy(self):
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
return (self.concentration, )
def _log_normalizer(self, x):
return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1)) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/dirichlet.py | 0.963109 | 0.506652 | dirichlet.py | pypi |
import torch
from torch.distributions import constraints
from torch.distributions.categorical import Categorical
from torch.distributions.distribution import Distribution
class OneHotCategorical(Distribution):
r"""
Creates a one-hot categorical distribution parameterized by :attr:`probs` or
:attr:`logits`.
Samples are one-hot coded vectors of size ``probs.size(-1)``.
.. note:: :attr:`probs` will be normalized to be summing to 1.
See also: :func:`torch.distributions.Categorical` for specifications of
:attr:`probs` and :attr:`logits`.
Example::
>>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
0
0
1
0
[torch.FloatTensor of size 4]
Args:
probs (Tensor): event probabilities
logits (Tensor): event log probabilities
"""
arg_constraints = {'probs': constraints.simplex}
support = constraints.simplex
has_enumerate_support = True
def __init__(self, probs=None, logits=None, validate_args=None):
self._categorical = Categorical(probs, logits)
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super(OneHotCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@property
def probs(self):
return self._categorical.probs
@property
def logits(self):
return self._categorical.logits
@property
def mean(self):
return self._categorical.probs
@property
def variance(self):
return self._categorical.probs * (1 - self._categorical.probs)
@property
def param_shape(self):
return self._categorical.param_shape
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
probs = self._categorical.probs
one_hot = probs.new(self._extended_shape(sample_shape)).zero_()
indices = self._categorical.sample(sample_shape)
if indices.dim() < one_hot.dim():
indices = indices.unsqueeze(-1)
return one_hot.scatter_(-1, indices, 1)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
def entropy(self):
return self._categorical.entropy()
def enumerate_support(self):
n = self.event_shape[0]
values = self._new((n, n))
torch.eye(n, out=values.data)
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
return values.expand((n,) + self.batch_shape + (n,)) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/one_hot_categorical.py | 0.957755 | 0.647422 | one_hot_categorical.py | pypi |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all
class Exponential(ExponentialFamily):
r"""
Creates a Exponential distribution parameterized by `rate`.
Example::
>>> m = Exponential(torch.tensor([1.0]))
>>> m.sample() # Exponential distributed with rate=1
0.1046
[torch.FloatTensor of size 1]
Args:
rate (float or Tensor): rate = 1 / scale of the distribution
"""
arg_constraints = {'rate': constraints.positive}
support = constraints.positive
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self):
return self.rate.reciprocal()
@property
def stddev(self):
return self.rate.reciprocal()
@property
def variance(self):
return self.rate.pow(-2)
def __init__(self, rate, validate_args=None):
self.rate, = broadcast_all(rate)
batch_shape = torch.Size() if isinstance(rate, Number) else self.rate.size()
super(Exponential, self).__init__(batch_shape, validate_args=validate_args)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
return self.rate.new(shape).exponential_() / self.rate
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return self.rate.log() - self.rate * value
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 1 - torch.exp(-self.rate * value)
def icdf(self, value):
if self._validate_args:
self._validate_sample(value)
return -torch.log(1 - value) / self.rate
def entropy(self):
return 1.0 - torch.log(self.rate)
@property
def _natural_params(self):
return (-self.rate, )
def _log_normalizer(self, x):
return -torch.log(-x) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/exponential.py | 0.952164 | 0.475544 | exponential.py | pypi |
r"""
The following constraints are implemented:
- ``constraints.boolean``
- ``constraints.dependent``
- ``constraints.greater_than(lower_bound)``
- ``constraints.integer_interval(lower_bound, upper_bound)``
- ``constraints.interval(lower_bound, upper_bound)``
- ``constraints.lower_cholesky``
- ``constraints.lower_triangular``
- ``constraints.nonnegative_integer``
- ``constraints.positive``
- ``constraints.positive_definite``
- ``constraints.positive_integer``
- ``constraints.real``
- ``constraints.real_vector``
- ``constraints.simplex``
- ``constraints.unit_interval``
"""
import torch
from torch.distributions.utils import batch_tril
__all__ = [
'Constraint',
'boolean',
'dependent',
'dependent_property',
'greater_than',
'integer_interval',
'interval',
'is_dependent',
'less_than',
'lower_cholesky',
'lower_triangular',
'nonnegative_integer',
'positive',
'positive_definite',
'positive_integer',
'real',
'real_vector',
'simplex',
'unit_interval',
]
class Constraint(object):
"""
Abstract base class for constraints.
A constraint object represents a region over which a variable is valid,
e.g. within which a variable can be optimized.
"""
def check(self, value):
"""
Returns a byte tensor of `sample_shape + batch_shape` indicating
whether each event in value satisfies this constraint.
"""
raise NotImplementedError
class _Dependent(Constraint):
"""
Placeholder for variables whose support depends on other variables.
These variables obey no simple coordinate-wise constraints.
"""
def check(self, x):
raise ValueError('Cannot determine validity of dependent constraint')
def is_dependent(constraint):
return isinstance(constraint, _Dependent)
class _DependentProperty(property, _Dependent):
"""
Decorator that extends @property to act like a `Dependent` constraint when
called on a class and act like a property when called on an object.
Example::
class Uniform(Distribution):
def __init__(self, low, high):
self.low = low
self.high = high
@constraints.dependent_property
def support(self):
return constraints.interval(self.low, self.high)
"""
pass
class _Boolean(Constraint):
"""
Constrain to the two values `{0, 1}`.
"""
def check(self, value):
return (value == 0) | (value == 1)
class _IntegerInterval(Constraint):
"""
Constrain to an integer interval `[lower_bound, upper_bound]`.
"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def check(self, value):
return (value % 1 == 0) & (self.lower_bound <= value) & (value <= self.upper_bound)
class _IntegerLessThan(Constraint):
"""
Constrain to an integer interval `(-inf, upper_bound]`.
"""
def __init__(self, upper_bound):
self.upper_bound = upper_bound
def check(self, value):
return (value % 1 == 0) & (value <= self.upper_bound)
class _IntegerGreaterThan(Constraint):
"""
Constrain to an integer interval `[lower_bound, inf)`.
"""
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def check(self, value):
return (value % 1 == 0) & (value >= self.lower_bound)
class _Real(Constraint):
"""
Trivially constrain to the extended real line `[-inf, inf]`.
"""
def check(self, value):
return value == value # False for NANs.
class _GreaterThan(Constraint):
"""
Constrain to a real half line `(lower_bound, inf]`.
"""
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def check(self, value):
return self.lower_bound < value
class _LessThan(Constraint):
"""
Constrain to a real half line `[-inf, upper_bound)`.
"""
def __init__(self, upper_bound):
self.upper_bound = upper_bound
def check(self, value):
return value < self.upper_bound
class _Interval(Constraint):
"""
Constrain to a real interval `[lower_bound, upper_bound]`.
"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def check(self, value):
return (self.lower_bound <= value) & (value <= self.upper_bound)
class _Simplex(Constraint):
"""
Constrain to the unit simplex in the innermost (rightmost) dimension.
Specifically: `x >= 0` and `x.sum(-1) == 1`.
"""
def check(self, value):
return (value >= 0).all() & ((value.sum(-1, True) - 1).abs() < 1e-6).all()
class _LowerTriangular(Constraint):
"""
Constrain to lower-triangular square matrices.
"""
def check(self, value):
value_tril = batch_tril(value)
return (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]
class _LowerCholesky(Constraint):
"""
Constrain to lower-triangular square matrices with positive diagonals.
"""
def check(self, value):
value_tril = batch_tril(value)
lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]
n = value.size(-1)
diag_mask = torch.eye(n, n, out=value.new(n, n))
positive_diagonal = (value * diag_mask > (diag_mask - 1)).min(-1)[0].min(-1)[0]
return lower_triangular & positive_diagonal
class _PositiveDefinite(Constraint):
"""
Constrain to positive-definite matrices.
"""
def check(self, value):
matrix_shape = value.shape[-2:]
batch_shape = value.unsqueeze(0).shape[:-2]
# TODO: replace with batched linear algebra routine when one becomes available
# note that `symeig()` returns eigenvalues in ascending order
flattened_value = value.contiguous().view((-1,) + matrix_shape)
return torch.stack([v.symeig(eigenvectors=False)[0][:1] > 0.0
for v in flattened_value]).view(batch_shape)
class _RealVector(Constraint):
"""
Constrain to real-valued vectors. This is the same as `constraints.real`,
but additionally reduces across the `event_shape` dimension.
"""
def check(self, value):
return (value == value).all() # False for NANs.
# Public interface.
dependent = _Dependent()
dependent_property = _DependentProperty
boolean = _Boolean()
nonnegative_integer = _IntegerGreaterThan(0)
positive_integer = _IntegerGreaterThan(1)
integer_interval = _IntegerInterval
real = _Real()
real_vector = _RealVector()
positive = _GreaterThan(0.)
greater_than = _GreaterThan
less_than = _LessThan
unit_interval = _Interval(0., 1.)
interval = _Interval
simplex = _Simplex()
lower_triangular = _LowerTriangular()
lower_cholesky = _LowerCholesky()
positive_definite = _PositiveDefinite() | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/constraints.py | 0.939927 | 0.674057 | constraints.py | pypi |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, _finfo
from torch.nn.functional import binary_cross_entropy_with_logits
class Geometric(Distribution):
r"""
Creates a Geometric distribution parameterized by `probs`, where `probs` is the probability of success of Bernoulli
trials. It represents the probability that in k + 1 Bernoulli trials, the first k trials failed, before
seeing a success.
Samples are non-negative integers [0, inf).
Example::
>>> m = Geometric(torch.tensor([0.3]))
>>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0
2
[torch.FloatTensor of size 1]
Args:
probs (Number, Tensor): the probabilty of sampling `1`. Must be in range (0, 1]
logits (Number, Tensor): the log-odds of sampling `1`.
"""
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.nonnegative_integer
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.probs, = broadcast_all(probs)
if not self.probs.gt(0).all():
raise ValueError('All elements of probs must be greater than 0')
else:
self.logits, = broadcast_all(logits)
probs_or_logits = probs if probs is not None else logits
if isinstance(probs_or_logits, Number):
batch_shape = torch.Size()
else:
batch_shape = probs_or_logits.size()
super(Geometric, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return 1. / self.probs - 1.
@property
def variance(self):
return (1. / self.probs - 1.) / self.probs
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
return (u.log() / (-self.probs).log1p()).floor()
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value, probs = broadcast_all(value, self.probs.clone())
probs[(probs == 1) & (value == 0)] = 0
return value * (-probs).log1p() + self.probs.log()
def entropy(self):
return binary_cross_entropy_with_logits(self.logits, self.probs, reduce=False) / self.probs | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/geometric.py | 0.940237 | 0.624036 | geometric.py | pypi |
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import probs_to_logits, logits_to_probs, log_sum_exp, lazy_property, broadcast_all
class Categorical(Distribution):
r"""
Creates a categorical distribution parameterized by either :attr:`probs` or
:attr:`logits` (but not both).
.. note::
It is equivalent to the distribution that :func:`torch.multinomial`
samples from.
Samples are integers from `0 ... K-1` where `K` is probs.size(-1).
If :attr:`probs` is 1D with length-`K`, each element is the relative
probability of sampling the class at that index.
If :attr:`probs` is 2D, it is treated as a batch of relative probability
vectors.
.. note:: :attr:`probs` will be normalized to be summing to 1.
See also: :func:`torch.multinomial`
Example::
>>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
3
[torch.LongTensor of size 1]
Args:
probs (Tensor): event probabilities
logits (Tensor): event log probabilities
"""
arg_constraints = {'probs': constraints.simplex}
has_enumerate_support = True
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.probs = probs / probs.sum(-1, keepdim=True)
else:
self.logits = logits - log_sum_exp(logits)
self._param = self.probs if probs is not None else self.logits
self._num_events = self._param.size()[-1]
batch_shape = self._param.size()[:-1] if self._param.ndimension() > 1 else torch.Size()
super(Categorical, self).__init__(batch_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@constraints.dependent_property
def support(self):
return constraints.integer_interval(0, self._num_events - 1)
@lazy_property
def logits(self):
return probs_to_logits(self.probs)
@lazy_property
def probs(self):
return logits_to_probs(self.logits)
@property
def param_shape(self):
return self._param.size()
@property
def mean(self):
return self.probs.new_tensor(float('nan')).expand(self._extended_shape())
@property
def variance(self):
return self.probs.new_tensor(float('nan')).expand(self._extended_shape())
def sample(self, sample_shape=torch.Size()):
sample_shape = self._extended_shape(sample_shape)
param_shape = sample_shape + torch.Size((self._num_events,))
probs = self.probs.expand(param_shape)
if self.probs.dim() == 1 or self.probs.size(0) == 1:
probs_2d = probs.view(-1, self._num_events)
else:
probs_2d = probs.contiguous().view(-1, self._num_events)
sample_2d = torch.multinomial(probs_2d, 1, True)
return sample_2d.contiguous().view(sample_shape)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value_shape = torch._C._infer_size(value.size(), self.batch_shape) if self.batch_shape else value.size()
param_shape = value_shape + (self._num_events,)
value = value.expand(value_shape)
log_pmf = self.logits.expand(param_shape)
return log_pmf.gather(-1, value.unsqueeze(-1).long()).squeeze(-1)
def entropy(self):
p_log_p = self.logits * self.probs
return -p_log_p.sum(-1)
def enumerate_support(self):
num_events = self._num_events
values = torch.arange(num_events).long()
values = values.view((-1,) + (1,) * len(self._batch_shape))
values = values.expand((-1,) + self._batch_shape)
if self._param.is_cuda:
values = values.cuda(self._param.get_device())
return values | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/categorical.py | 0.941506 | 0.689005 | categorical.py | pypi |
import math
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all
class Normal(ExponentialFamily):
r"""
Creates a normal (also called Gaussian) distribution parameterized by
`loc` and `scale`.
Example::
>>> m = Normal(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # normally distributed with loc=0 and scale=1
0.1046
[torch.FloatTensor of size 1]
Args:
loc (float or Tensor): mean of the distribution (often referred to as mu)
scale (float or Tensor): standard deviation of the distribution
(often referred to as sigma)
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self):
return self.loc
@property
def stddev(self):
return self.scale
@property
def variance(self):
return self.stddev.pow(2)
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Normal, self).__init__(batch_shape, validate_args=validate_args)
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.normal(self.loc.expand(shape), self.scale.expand(shape))
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = self.loc.new(shape).normal_()
return self.loc + eps * self.scale
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
# compute the variance
var = (self.scale ** 2)
log_scale = math.log(self.scale) if isinstance(self.scale, Number) else self.scale.log()
return -((value - self.loc) ** 2) / (2 * var) - log_scale - math.log(math.sqrt(2 * math.pi))
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 0.5 * (1 + torch.erf((value - self.loc) * self.scale.reciprocal() / math.sqrt(2)))
def icdf(self, value):
if self._validate_args:
self._validate_sample(value)
return self.loc + self.scale * torch.erfinv(2 * value - 1) * math.sqrt(2)
def entropy(self):
return 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(self.scale)
@property
def _natural_params(self):
return (self.loc / self.scale.pow(2), -0.5 * self.scale.pow(2).reciprocal())
def _log_normalizer(self, x, y):
return -0.25 * x.pow(2) / y + 0.5 * torch.log(-math.pi / y) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/normal.py | 0.947745 | 0.517083 | normal.py | pypi |
r"""
The ``distributions`` package contains parameterizable probability distributions
and sampling functions. This allows the construction of stochastic computation
graphs and stochastic gradient estimators for optimization. This package
generally follows the design of the `TensorFlow Distributions`_ package.
.. _`TensorFlow Distributions`:
https://arxiv.org/abs/1711.10604
It is not possible to directly backpropagate through random samples. However,
there are two main methods for creating surrogate functions that can be
backpropagated through. These are the score function estimator/likelihood ratio
estimator/REINFORCE and the pathwise derivative estimator. REINFORCE is commonly
seen as the basis for policy gradient methods in reinforcement learning, and the
pathwise derivative estimator is commonly seen in the reparameterization trick
in variational autoencoders. Whilst the score function only requires the value
of samples :math:`f(x)`, the pathwise derivative requires the derivative
:math:`f'(x)`. The next sections discuss these two in a reinforcement learning
example. For more details see
`Gradient Estimation Using Stochastic Computation Graphs`_ .
.. _`Gradient Estimation Using Stochastic Computation Graphs`:
https://arxiv.org/abs/1506.05254
Score function
^^^^^^^^^^^^^^
When the probability density function is differentiable with respect to its
parameters, we only need :meth:`~torch.distributions.Distribution.sample` and
:meth:`~torch.distributions.Distribution.log_prob` to implement REINFORCE:
.. math::
\Delta\theta = \alpha r \frac{\partial\log p(a|\pi^\theta(s))}{\partial\theta}
where :math:`\theta` are the parameters, :math:`\alpha` is the learning rate,
:math:`r` is the reward and :math:`p(a|\pi^\theta(s))` is the probability of
taking action :math:`a` in state :math:`s` given policy :math:`\pi^\theta`.
In practice we would sample an action from the output of a network, apply this
action in an environment, and then use ``log_prob`` to construct an equivalent
loss function. Note that we use a negative because optimizers use gradient
descent, whilst the rule above assumes gradient ascent. With a categorical
policy, the code for implementing REINFORCE would be as follows::
probs = policy_network(state)
# Note that this is equivalent to what used to be called multinomial
m = Categorical(probs)
action = m.sample()
next_state, reward = env.step(action)
loss = -m.log_prob(action) * reward
loss.backward()
Pathwise derivative
^^^^^^^^^^^^^^^^^^^
The other way to implement these stochastic/policy gradients would be to use the
reparameterization trick from the
:meth:`~torch.distributions.Distribution.rsample` method, where the
parameterized random variable can be constructed via a parameterized
deterministic function of a parameter-free random variable. The reparameterized
sample therefore becomes differentiable. The code for implementing the pathwise
derivative would be as follows::
params = policy_network(state)
m = Normal(*params)
# Any distribution with .has_rsample == True could work based on the application
action = m.rsample()
next_state, reward = env.step(action) # Assuming that reward is differentiable
loss = -reward
loss.backward()
"""
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
from .gamma import Gamma
from .geometric import Geometric
from .gumbel import Gumbel
from .independent import Independent
from .kl import kl_divergence, register_kl
from .laplace import Laplace
from .log_normal import LogNormal
from .logistic_normal import LogisticNormal
from .multinomial import Multinomial
from .multivariate_normal import MultivariateNormal
from .normal import Normal
from .one_hot_categorical import OneHotCategorical
from .pareto import Pareto
from .poisson import Poisson
from .relaxed_bernoulli import RelaxedBernoulli
from .relaxed_categorical import RelaxedOneHotCategorical
from .studentT import StudentT
from .transformed_distribution import TransformedDistribution
from .transforms import *
from .uniform import Uniform
__all__ = [
'Bernoulli',
'Beta',
'Binomial',
'Categorical',
'Cauchy',
'Chi2',
'Dirichlet',
'Distribution',
'Exponential',
'ExponentialFamily',
'FisherSnedecor',
'Gamma',
'Geometric',
'Gumbel',
'Independent',
'Laplace',
'LogNormal',
'LogisticNormal',
'Multinomial',
'MultivariateNormal',
'Normal',
'OneHotCategorical',
'Pareto',
'RelaxedBernoulli',
'RelaxedOneHotCategorical',
'StudentT',
'Poisson',
'Uniform',
'TransformedDistribution',
'biject_to',
'kl_divergence',
'register_kl',
'transform_to',
]
__all__.extend(transforms.__all__) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/__init__.py | 0.962848 | 0.902738 | __init__.py | pypi |
from numbers import Number
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import _finfo, broadcast_all, lazy_property
def _standard_gamma(concentration):
return concentration._standard_gamma()
class Gamma(ExponentialFamily):
r"""
Creates a Gamma distribution parameterized by shape `concentration` and `rate`.
Example::
>>> m = Gamma(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # Gamma distributed with concentration=1 and rate=1
0.1046
[torch.FloatTensor of size 1]
Args:
concentration (float or Tensor): shape parameter of the distribution
(often referred to as alpha)
rate (float or Tensor): rate = 1 / scale of the distribution
(often referred to as beta)
"""
arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}
support = constraints.positive
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self):
return self.concentration / self.rate
@property
def variance(self):
return self.concentration / self.rate.pow(2)
def __init__(self, concentration, rate, validate_args=None):
self.concentration, self.rate = broadcast_all(concentration, rate)
if isinstance(concentration, Number) and isinstance(rate, Number):
batch_shape = torch.Size()
else:
batch_shape = self.concentration.size()
super(Gamma, self).__init__(batch_shape, validate_args=validate_args)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape)
value.data.clamp_(min=_finfo(value).tiny) # do not record in autograd graph
return value
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return (self.concentration * torch.log(self.rate) +
(self.concentration - 1) * torch.log(value) -
self.rate * value - torch.lgamma(self.concentration))
def entropy(self):
return (self.concentration - torch.log(self.rate) + torch.lgamma(self.concentration) +
(1.0 - self.concentration) * torch.digamma(self.concentration))
@property
def _natural_params(self):
return (self.concentration - 1, -self.rate)
def _log_normalizer(self, x, y):
return torch.lgamma(x + 1) + (x + 1) * torch.log(-y.reciprocal()) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/gamma.py | 0.956176 | 0.625295 | gamma.py | pypi |
import torch
from numbers import Number
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import SigmoidTransform
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs
class LogitRelaxedBernoulli(Distribution):
r"""
Creates a LogitRelaxedBernoulli distribution parameterized by `probs` or `logits`,
which is the logit of a RelaxedBernoulli distribution.
Samples are logits of values in (0, 1). See [1] for more details.
Args:
temperature (Tensor):
probs (Number, Tensor): the probabilty of sampling `1`
logits (Number, Tensor): the log-odds of sampling `1`
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables
(Maddison et al, 2017)
[2] Categorical Reparametrization with Gumbel-Softmax
(Jang et al, 2017)
"""
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.real
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
self.temperature = temperature
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
is_scalar = isinstance(probs, Number)
self.probs, = broadcast_all(probs)
else:
is_scalar = isinstance(logits, Number)
self.logits, = broadcast_all(logits)
self._param = self.probs if probs is not None else self.logits
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
super(LogitRelaxedBernoulli, self).__init__(batch_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
probs = clamp_probs(self.probs.expand(shape))
uniforms = clamp_probs(self.probs.new(shape).uniform_())
return (uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()) / self.temperature
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
diff = logits - value.mul(self.temperature)
return self.temperature.log() + diff - 2 * diff.exp().log1p()
class RelaxedBernoulli(TransformedDistribution):
r"""
Creates a RelaxedBernoulli distribution, parametrized by `temperature`, and either
`probs` or `logits`. This is a relaxed version of the `Bernoulli` distribution, so
the values are in (0, 1), and has reparametrizable samples.
Example::
>>> m = RelaxedBernoulli(torch.tensor([2.2]),
torch.tensor([0.1, 0.2, 0.3, 0.99]))
>>> m.sample()
0.2951
0.3442
0.8918
0.9021
[torch.FloatTensor of size 4]
Args:
temperature (Tensor):
probs (Number, Tensor): the probabilty of sampling `1`
logits (Number, Tensor): the log-odds of sampling `1`
"""
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.unit_interval
has_rsample = True
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
super(RelaxedBernoulli, self).__init__(LogitRelaxedBernoulli(temperature, probs, logits),
SigmoidTransform(), validate_args=validate_args)
@property
def temperature(self):
return self.base_dist.temperature
@property
def logits(self):
return self.base_dist.logits
@property
def probs(self):
return self.base_dist.probs | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributions/relaxed_bernoulli.py | 0.941088 | 0.694027 | relaxed_bernoulli.py | pypi |
import torch
import sys
import ast
import inspect
import string
from textwrap import dedent
from functools import partial
from collections import namedtuple
from torch._C._jit_tree_views import *
PY2 = sys.version_info[0] == 2
_reserved_prefix = '__jit'
_reserved_names = {'print'}
_identifier_chars = set(string.ascii_lowercase + string.ascii_uppercase + string.digits)
def is_reserved_name(name):
return name.startswith(_reserved_prefix) or name in _reserved_names
pretty_node_names = {
ast.FunctionDef: "function definitions",
ast.For: "for loops",
ast.Delete: "del statements",
ast.ClassDef: "class definitions",
ast.With: "with statements",
ast.Raise: "raise statements",
ast.Assert: "assertions",
ast.Import: "import statements",
ast.ImportFrom: "import statements",
ast.Global: "global variables",
ast.Break: "break statements",
ast.Continue: "continue statements",
}
node_start_tokens = {
ast.FunctionDef: "def",
ast.For: "for",
ast.Delete: "del",
ast.ClassDef: "class",
ast.With: "with",
ast.Raise: "raise",
ast.Assert: "assert",
ast.Import: "import",
ast.ImportFrom: "from",
ast.Global: "global",
ast.Break: "break",
ast.Continue: "continue",
}
if PY2:
pretty_node_names.update({
ast.Print: "print statements",
ast.TryExcept: "try blocks",
ast.TryFinally: "try blocks",
ast.Exec: "exec statements",
})
node_start_tokens.update({
ast.Print: "print",
ast.TryExcept: "try",
ast.TryFinally: "try",
ast.Exec: "exec",
})
else:
pretty_node_names.update({
ast.AsyncFunctionDef: "async function definitions",
ast.AsyncFor: "async for loops",
ast.AsyncWith: "async with statements",
ast.Try: "try blocks",
ast.Nonlocal: "nonlocal variables",
})
node_start_tokens.update({
ast.AsyncFunctionDef: "async def",
ast.AsyncFor: "async for",
ast.AsyncWith: "async with",
ast.Try: "try",
ast.Nonlocal: "nonlocal",
})
if sys.version_info >= (3, 6):
pretty_node_names.update({
ast.AnnAssign: "annotated assignments",
})
# NB: no specific token for AnnAssign
class FrontendError(Exception):
def __init__(self, source_range, msg):
self.source_range = source_range
self.msg = msg
def __str__(self):
result = self.msg
if self.source_range is not None:
result += '\n' + self.source_range.highlight()
return result
class NotSupportedError(FrontendError):
pass
class UnsupportedNodeError(NotSupportedError):
def __init__(self, ctx, offending_node):
# If we don't have a specific token, we default to length of 1
node_type = type(offending_node)
range_len = len(node_start_tokens.get(node_type, ' '))
source_range = ctx.make_range(offending_node.lineno,
offending_node.col_offset,
offending_node.col_offset + range_len)
feature_name = pretty_node_names.get(node_type, node_type.__name__)
msg = "{} aren't supported".format(feature_name)
super(NotSupportedError, self).__init__(source_range, msg)
class FrontendTypeError(FrontendError):
pass
def get_jit_ast(fn):
source = dedent(inspect.getsource(fn))
py_ast = ast.parse(source)
if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
raise RuntimeError("expected a single top-level function")
return build_def(SourceRangeFactory(source), py_ast.body[0])
class Builder(object):
def __call__(self, ctx, node):
method = getattr(self, 'build_' + node.__class__.__name__, None)
if method is None:
raise UnsupportedNodeError(ctx, node)
return method(ctx, node)
def build_def(ctx, py_def):
returns = []
ret_body = []
body = py_def.body
r = ctx.make_range(py_def.lineno, py_def.col_offset,
py_def.col_offset + len("def"))
return Def(Ident(r, py_def.name),
build_param_list(ctx, py_def.args),
[build_stmt(ctx, stmt) for stmt in body])
_vararg_kwarg_err = ("Compiled functions can't take variable number of arguments, "
"have default values for arguments, nor keyword-only arguments")
def build_param_list(ctx, py_args):
if py_args.vararg is not None or py_args.kwarg is not None or py_args.defaults:
raise ValueError(_vararg_kwarg_err)
if not PY2 and (py_args.kw_defaults or py_args.kwonlyargs):
raise ValueError(_vararg_kwarg_err)
return [build_param(ctx, arg) for arg in py_args.args]
def build_param(ctx, py_arg):
# NB: In Python3 py_arg is a pair of (str arg, expr? annotation)
# In Python2 py_arg is a Name (Expr subclass)
if getattr(py_arg, 'annotation', None) is not None:
raise ValueError("Compiled functions don't support annotations")
name = py_arg.id if PY2 else py_arg.arg
r = ctx.make_range(py_arg.lineno, py_arg.col_offset, py_arg.col_offset + len(name))
return Param(TensorType(r), Ident(r, name))
class StmtBuilder(Builder):
augassign_map = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
}
@staticmethod
def build_Expr(ctx, stmt):
return ExprStmt([build_expr(ctx, stmt.value)])
@staticmethod
def get_assign_lhs_expr(ctx, expr):
var = build_expr(ctx, expr)
if not isinstance(var, Var) and not isinstance(var, Starred):
raise NotSupportedError(var.range(),
"the only expressions allowed on the left hand side of "
"assignments are variable names and starred expressions")
return var
@staticmethod
def build_Assign(ctx, stmt):
rhs = build_expr(ctx, stmt.value)
if len(stmt.targets) > 1:
start_point = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + 1)
raise NotSupportedError(ctx.make_raw_range(start_point.start, rhs.range().end),
"Performing multiple assignments in a single line isn't supported")
py_lhs = stmt.targets[0]
py_lhs_exprs = py_lhs.elts if isinstance(py_lhs, ast.Tuple) else [py_lhs]
return Assign([StmtBuilder.get_assign_lhs_expr(ctx, e) for e in py_lhs_exprs], '=', rhs)
@staticmethod
def build_Return(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("return"))
values = (stmt.value,) if not isinstance(stmt.value, ast.Tuple) else stmt.value.elts
return Return(r, [build_expr(ctx, val) for val in values if val is not None])
@staticmethod
def build_AugAssign(ctx, stmt):
lhs = [StmtBuilder.get_assign_lhs_expr(ctx, stmt.target)]
rhs = build_expr(ctx, stmt.value)
op = type(stmt.op)
if op in StmtBuilder.augassign_map:
op_token = StmtBuilder.augassign_map[op]
else:
raise NotSupportedError(
find_before(ctx, rhs.range().start, '=', offsets=(-1, 0)),
"unsupported kind of augumented assignment: " + op.__name__)
return Assign(lhs, op_token, rhs)
@staticmethod
def build_While(ctx, stmt):
if stmt.orelse:
# TODO: try to recover the location of else:? Python doesn't give us useful
# annotations in this case
raise NotSupportedError(None, "else branches of while loops aren't supported")
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("while"))
return While(r, build_expr(ctx, stmt.test), [build_stmt(ctx, s) for s in stmt.body])
@staticmethod
def build_For(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("for"))
return For(
r, [StmtBuilder.get_assign_lhs_expr(ctx, stmt.target)],
[build_expr(ctx, stmt.iter)], [build_stmt(ctx, s) for s in stmt.body])
@staticmethod
def build_If(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("if"))
return If(r, build_expr(ctx, stmt.test),
[build_stmt(ctx, s) for s in stmt.body],
[build_stmt(ctx, s) for s in stmt.orelse])
@staticmethod
def build_Print(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("print"))
if stmt.dest:
raise NotSupportedError(r, "print statements with non-default destinations aren't supported")
args = [build_expr(ctx, val) for val in stmt.values]
return ExprStmt([Apply(Var(Ident(r, "print")), args, [])])
class ExprBuilder(Builder):
binop_map = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.Pow: '**',
}
if not PY2:
binop_map[ast.MatMult] = '@'
unop_map = {
ast.Not: 'not',
ast.USub: '-',
}
boolop_map = {
ast.And: 'and',
ast.Or: 'or',
}
cmpop_map = {
ast.Eq: '==',
ast.NotEq: '!=',
ast.LtE: '<=',
ast.Lt: '<',
ast.GtE: '>=',
ast.Gt: '>',
}
@staticmethod
def build_Attribute(ctx, expr):
# NB: the only attributes we support are for getting methods
value = build_expr(ctx, expr.value)
# <sigh> name is just a string, so it's not annotated in any way.
source = ctx.source
pos = find_after(ctx, value.range().end, '.').end # Start with the dot
while source[pos] in string.whitespace: # Skip whitespace
pos += 1
start_pos = pos
while source[pos] in _identifier_chars: # Find the identifier itself
pos += 1
name_range = ctx.make_raw_range(start_pos, pos)
return Select(value, Ident(name_range, expr.attr))
@staticmethod
def build_Call(ctx, expr):
func = build_expr(ctx, expr.func)
args = [build_expr(ctx, py_arg) for py_arg in expr.args]
if hasattr(expr, 'starargs') and expr.starargs:
stararg_expr = build_expr(ctx, expr.starargs)
args += [Starred(stararg_expr.range(), stararg_expr)]
kwargs = []
for kw in expr.keywords:
kw_expr = build_expr(ctx, kw.value)
# XXX: we could do a better job at figuring out the range for the name here
kwargs.append(Attribute(Ident(kw_expr.range(), kw.arg), kw_expr))
return Apply(func, args, kwargs)
@staticmethod
def build_Name(ctx, expr):
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(expr.id))
if expr.id.startswith(_reserved_prefix):
raise NotSupportedError(r, "names of variables used in JIT-ed functions "
"can't start with " + _reserved_prefix)
if expr.id == "True":
return TrueLiteral(r)
elif expr.id == "False":
return FalseLiteral(r)
return Var(Ident(r, expr.id))
@staticmethod
def build_NameConstant(ctx, expr):
text = "True" if expr.value else "False"
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(text))
if expr.value:
return TrueLiteral(r)
else:
return FalseLiteral(r)
@staticmethod
def build_BinOp(ctx, expr):
lhs = build_expr(ctx, expr.left)
rhs = build_expr(ctx, expr.right)
op = type(expr.op)
op_token = ExprBuilder.binop_map.get(op)
if op_token is None:
err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start)
raise NotSupportedError(err_range, "unsupported binary operator: " + op.__name__)
return BinOp(op_token, lhs, rhs)
@staticmethod
def build_UnaryOp(ctx, expr):
sub_expr = build_expr(ctx, expr.operand)
op = type(expr.op)
op_token = ExprBuilder.unop_map.get(op)
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(op_token))
if op_token is None:
err_range = ctx.make_raw_range(r.start, sub_expr.range().end)
raise NotSupportedError(err_range, "unsupported unary operator: " + op.__name__)
return UnaryOp(r, op_token, sub_expr)
@staticmethod
def build_BoolOp(ctx, expr):
if len(expr.values) < 2:
raise AssertionError("expected at least 2 values in BoolOp, but got " + str(len(expr.values)))
sub_exprs = [build_expr(ctx, sub_expr) for sub_expr in expr.values]
op = type(expr.op)
op_token = ExprBuilder.boolop_map.get(op)
if op_token is None:
err_range = ctx.make_raw_range(sub_exprs[0].range().end, sub_exprs[1].range().start)
raise NotSupportedError(err_range, "unsupported boolean operator: " + op.__name__)
lhs = sub_exprs[0]
for rhs in sub_exprs[1:]:
lhs = BinOp(op_token, lhs, rhs)
return lhs
@staticmethod
def build_IfExp(ctx, expr):
return TernaryIf(build_expr(ctx, expr.test),
build_expr(ctx, expr.body),
build_expr(ctx, expr.orelse))
@staticmethod
def build_Compare(ctx, expr):
operands = [build_expr(ctx, e) for e in [expr.left] + list(expr.comparators)]
result = None
for lhs, op_, rhs in zip(operands, expr.ops, operands[1:]):
op = type(op_)
op_token = ExprBuilder.cmpop_map.get(op)
if op_token is None:
err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start)
raise NotSupportedError(err_range, "unsupported comparison operator: " + op.__name__)
cmp_expr = BinOp(op_token, lhs, rhs)
if result is None:
result = cmp_expr
else:
result = BinOp('and', result, cmp_expr)
return result
@staticmethod
def build_Subscript(ctx, expr):
base = build_expr(ctx, expr.value)
sub_type = type(expr.slice)
if sub_type is ast.Index:
index = build_expr(ctx, expr.slice.value)
return Gather(base, index)
elif sub_type is ast.Slice:
lower = build_expr(ctx, expr.slice.lower) if expr.slice.lower is not None else None
upper = build_expr(ctx, expr.slice.upper) if expr.slice.upper is not None else None
if expr.slice.step is not None:
step = build_expr(ctx, expr.slice.step)
raise NotSupportedError(step.range(), "slices with ranges are not supported yet")
return Slice(base, lower, upper)
elif sub_type is ast.ExtSlice:
raise NotSupportedError(base.range(), "slicing multiple dimensions at the same time isn't supported yet")
else: # Ellipsis (can only happen in Python 2)
raise NotSupportedError(base.range(), "ellipsis is not supported")
@staticmethod
def build_List(ctx, expr):
return ListLiteral(ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1),
[build_expr(ctx, e) for e in expr.elts])
@staticmethod
def build_Tuple(ctx, expr):
return ListLiteral(ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1),
[build_expr(ctx, e) for e in expr.elts])
@staticmethod
def build_Num(ctx, expr):
value = str(expr.n)
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(value))
return Const(r, value)
@staticmethod
def build_Starred(ctx, expr):
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
return Starred(r, build_expr(ctx, expr.value))
build_expr = ExprBuilder()
build_stmt = StmtBuilder()
def find_after(ctx, pos, substr, offsets=(0, 0)):
new_pos = pos + ctx.source[pos:].index(substr)
return ctx.make_raw_range(new_pos + offsets[0], new_pos + len(substr) + offsets[1])
def find_before(ctx, pos, substr, offsets=(0, 0)):
new_pos = ctx.source[:pos].rindex(substr)
return ctx.make_raw_range(new_pos + offsets[0], new_pos + len(substr) + offsets[1]) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/jit/frontend.py | 0.480966 | 0.385143 | frontend.py | pypi |
import re
import sys
import ast
import inspect
import torch
from torch._C import DynamicType, TupleType
from textwrap import dedent
PY35 = sys.version_info >= (3, 5)
try:
import typing
from typing import Tuple
def is_tuple(ann):
# For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
return ann.__module__ == 'typing' and \
(getattr(ann, '__origin__', None) is typing.Tuple or
getattr(ann, '__origin__', None) is tuple)
except ImportError:
# A minimal polyfill for versions of Python that don't have typing.
# Note that this means that they also don't support the fancy annotation syntax, so
# those instances will only be used in our tiny `type: ` comment interpreter.
# The __getitem__ in typing is implemented using metaclasses, but I'm too lazy for that.
class TupleCls(object):
def __getitem__(self, types):
return TupleInstance(types)
class TupleInstance(object):
def __init__(self, types):
setattr(self, '__args__', types)
Tuple = TupleCls()
def is_tuple(ann):
return isinstance(ann, TupleInstance)
class Module(object):
def __init__(self, name, members):
self.name = name
self.members = members
def __getattr__(self, name):
try:
return self.members[name]
except KeyError:
raise RuntimeError("Module {} has no member called {}".format(self.name, name))
_eval_env = {
'torch': Module('torch', {'Tensor': torch.Tensor}),
'Tensor': torch.Tensor,
'typing': Module('typing', {'Tuple': Tuple}),
'Tuple': Tuple,
}
def get_signature(fn, _n_arguments=None, _n_binders=None):
# Python 3.5 adds support for the nice annotation syntax, so try that first.
if PY35:
sig = try_real_annotations(fn)
if sig is not None:
return sig
type_line, source = None, None
try:
source = dedent(inspect.getsource(fn))
type_line = get_type_line(source)
except TypeError:
pass
# This might happen both because we failed to get the source of fn, or
# because it didn't have any annotations.
if type_line is None:
return default_signature(fn, source, _n_arguments, _n_binders)
return parse_type_line(type_line)
def parse_type_line(type_line):
"""Parses a type annotation specified as a comment.
Example inputs:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor]
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor
"""
arg_ann_str, ret_ann_str = split_type_line(type_line)
try:
arg_ann = eval(arg_ann_str, _eval_env)
except SyntaxError:
raise RuntimeError("Failed to parse the argument list of a type annotation")
if not isinstance(arg_ann, tuple):
arg_ann = (arg_ann,)
try:
ret_ann = eval(ret_ann_str, _eval_env)
except SyntaxError:
raise RuntimeError("Failed to parse the return type of a type annotation")
return [ann_to_type(ann) for ann in arg_ann], ann_to_type(ret_ann)
def default_signature(fn, source, _n_arguments, _n_binders):
"""Returns the default signature for fn.
The current formula is to use the source (if available) to determine the
number of inputs and outputs, and set all their types as tensors.
If the source is missing, we fall back to the numbers provided by the compiler,
to make sure we don't cause an error there (although type mismatches can still happen).
This method also accounts for the self argument if fn is a method.
"""
if _n_binders is None:
raise RuntimeError("default_signature needs to know the number of binders")
if source is None and _n_arguments is None:
raise RuntimeError("default_signature needs either the source or the number of arguments")
ret_type = TupleType([DynamicType() for _ in range(_n_binders)])
if source is not None:
py_ast = ast.parse(source)
if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
raise RuntimeError("expected a single top-level function")
py_def = py_ast.body[0]
# TODO: ideally we'd ignore the type of varargs entirely, but we currently don't
# allow passing in anything else than tensors anyway.
if py_def.args.vararg is not None:
arg_types = [DynamicType()] * _n_arguments
else:
arg_types = [DynamicType() for _ in py_def.args.args]
if inspect.ismethod(fn):
arg_types = arg_types[1:]
else:
arg_types = [DynamicType()] * _n_arguments
return arg_types, ret_type
_def_end_regex = re.compile(r'.*\)\s*:.*')
def get_type_line(source):
"""Tries to find the line containing a comment with the type annotation."""
lines = source.split('\n')
def strip_comment(line):
return line[:line.index('#') if '#' in line else None]
i = 0
while not _def_end_regex.match(strip_comment(lines[i])):
i += 1
i += 1
type_line = lines[i].strip()
if not type_line.startswith('# type:'):
return None
return type_line
def split_type_line(type_line):
"""Splits the comment with the type annotation into parts for argument and return types.
For example, for an input of:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor]
This function will return:
("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]")
"""
start_offset = len('# type:')
try:
arrow_pos = type_line.index('->')
except ValueError:
raise RuntimeError("Syntax error in type annotation (cound't find `->`)")
return type_line[start_offset:arrow_pos].strip(), type_line[arrow_pos + 2:].strip()
def try_real_annotations(fn):
"""Tries to use the Py3.5+ annotation syntax to get the type."""
try:
sig = inspect.signature(fn)
except ValueError:
return None
all_annots = [sig.return_annotation] + [p.annotation for p in sig.parameters.values()]
if all(ann is sig.empty for ann in all_annots):
return None
def as_ann(ann):
# sig.empty is really annoying so convert it to None
return ann if ann is not sig.empty else None
param_types = [ann_to_type(as_ann(p.annotation))
for p in sig.parameters.values()]
return_type = ann_to_type(as_ann(sig.return_annotation))
return param_types, return_type
def ann_to_type(ann):
if ann is None:
return DynamicType()
elif ann is torch.Tensor:
return DynamicType()
elif is_tuple(ann):
return TupleType([ann_to_type(a) for a in ann.__args__])
raise ValueError("The only supported annotations kinds are Tensor and Tuple[...]") | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/jit/annotations.py | 0.655557 | 0.295459 | annotations.py | pypi |
import torch
import hashlib
import os
import re
import shutil
import sys
import tempfile
try:
from requests.utils import urlparse
from requests import get as urlopen
requests_available = True
except ImportError:
requests_available = False
if sys.version_info[0] == 2:
from urlparse import urlparse # noqa f811
from urllib2 import urlopen # noqa f811
else:
from urllib.request import urlopen
from urllib.parse import urlparse
try:
from tqdm import tqdm
except ImportError:
tqdm = None # defined below
# matches bfd8deac from resnet18-bfd8deac.pth
HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
def load_url(url, model_dir=None, map_location=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load)
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> state_dict = torch.utils.model_zoo.load_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename).group(1)
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
return torch.load(cached_file, map_location=map_location)
def _download_url_to_file(url, dst, hash_prefix, progress):
if requests_available:
u = urlopen(url, stream=True)
file_size = int(u.headers["Content-Length"])
u = u.raw
else:
u = urlopen(url)
meta = u.info()
if hasattr(meta, 'getheaders'):
file_size = int(meta.getheaders("Content-Length")[0])
else:
file_size = int(meta.get_all("Content-Length")[0])
f = tempfile.NamedTemporaryFile(delete=False)
try:
sha256 = hashlib.sha256()
with tqdm(total=file_size, disable=not progress) as pbar:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
sha256.update(buffer)
pbar.update(len(buffer))
f.close()
digest = sha256.hexdigest()
if digest[:len(hash_prefix)] != hash_prefix:
raise RuntimeError('invalid hash value (expected "{}", got "{}")'
.format(hash_prefix, digest))
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
if tqdm is None:
# fake tqdm if it's not installed
class tqdm(object):
def __init__(self, total, disable=False):
self.total = total
self.disable = disable
self.n = 0
def update(self, n):
if self.disable:
return
self.n += n
sys.stderr.write("\r{0:.1f}%".format(100 * self.n / float(self.total)))
sys.stderr.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.disable:
return
sys.stderr.write('\n') | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/utils/model_zoo.py | 0.50708 | 0.163813 | model_zoo.py | pypi |
import torch
import warnings
def detach_variable(inputs):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
x = inp.detach()
x.requires_grad = inp.requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError(
"Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__)
def check_backward_validity(inputs):
if not any(inp.requires_grad for inp in inputs):
warnings.warn("None of the inputs have requires_grad=True. Gradients will be None")
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, *args):
check_backward_validity(args)
ctx.run_function = run_function
ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*args)
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), please use .backward() if possible")
inputs = ctx.saved_tensors
detached_inputs = detach_variable(inputs)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
return (None,) + tuple(inp.grad for inp in detached_inputs)
def checkpoint(function, *args):
r"""Checkpoint a model or part of the model
Checkpointing works by trading compute for memory. Rather than storing all
intermediate activations of the entire computation graph for computing
backward, the checkpointed part does **not** save intermediate activations,
and instead recomputes them in backward pass. It can be applied on any part
of a model.
Specifically, in the forward pass, :attr:`function` will run in
:func:`torch.no_grad` manner, i.e., not storing the intermediate
activations. Instead, the forward pass saves the inputs tuple and the
:attr:`function` parameter. In the backwards pass, the saved inputs and
:attr:`function` is retreived, and the forward pass is computed on
:attr:`function` again, now tracking the intermediate activations, and then
the gradients are calculated using these activation values.
.. warning::
Checkpointing doesn't work with :func:`torch.autograd.grad`, but only
with :func:`torch.autograd.backward`.
.. warning::
If :attr:`function` invocation during backward does anything different
than the one during forward, e.g., due to some global variable, the
checkpointed version won't be equivalent, and unfortunately it can't be
detected.
.. warning:
At least one of the inputs needs to have :code:`requires_grad=True` if
grads are needed for model inputs, otherwise the checkpointed part of the
model won't have gradients.
Args:
function: describes what to run in the forward pass of the model or
part of the model. It should also know how to handle the inputs
passed as the tuple. For example, in LSTM, if user passes
``(activation, hidden)``, :attr:`function` should correctly use the
first input as ``activation`` and the second input as ``hidden``
args: tuple containing inputs to the :attr:`function`
Returns:
Output of running :attr:`function` on *:attr:`args`
"""
return CheckpointFunction.apply(function, *args)
def checkpoint_sequential(functions, segments, *inputs):
r"""A helper function for checkpointing sequential models.
Sequential models execute a list of modules/functions in order
(sequentially). Therefore, we can divide such a model in various segments
and checkpoint each segment. All segments except the last will run in
:func:`torch.no_grad` manner, i.e., not storing the intermediate
activations. The inputs of each checkpointed segment will be saved for
re-running the segment in the backward pass.
See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.
.. warning::
Checkpointing doesn't work with :func:`torch.autograd.grad`, but only
with :func:`torch.autograd.backward`.
.. warning:
At least one of the inputs needs to have :code:`requires_grad=True` if
grads are needed for model inputs, otherwise the checkpointed part of the
model won't have gradients.
Args:
functions: A :class:`torch.nn.Sequential` or the list of modules or
functions (comprising the model) to run sequentially.
segments: Number of chunks to create in the model
inputs: tuple of Tensors that are inputs to :attr:`functions`
Returns:
Output of running :attr:`functions` sequentially on *:attr:`inputs`
Example:
>>> model = nn.Sequential(...)
>>> input_var = checkpoint_sequential(model, chunks, input_var)
"""
def run_function(start, end, functions):
def forward(*inputs):
input = inputs[0]
for j in range(start, end + 1):
input = functions[j](input)
return input
return forward
if isinstance(functions, torch.nn.Sequential):
functions = list(functions.children())
segment_size = len(functions) // segments
# the last chunk has to be non-volatile
end = -1
for start in range(0, segment_size * (segments - 1), segment_size):
end = start + segment_size - 1
inputs = checkpoint(run_function(start, end, functions), *inputs)
if not isinstance(inputs, tuple):
inputs = (inputs,)
return run_function(end + 1, len(functions) - 1, functions)(*inputs) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/utils/checkpoint.py | 0.894467 | 0.446434 | checkpoint.py | pypi |
import heapq
class Trainer(object):
def __init__(self, model=None, criterion=None, optimizer=None, dataset=None):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.dataset = dataset
self.iterations = 0
self.stats = {}
self.plugin_queues = {
'iteration': [],
'epoch': [],
'batch': [],
'update': [],
}
def register_plugin(self, plugin):
plugin.register(self)
intervals = plugin.trigger_interval
if not isinstance(intervals, list):
intervals = [intervals]
for duration, unit in intervals:
queue = self.plugin_queues[unit]
queue.append((duration, len(queue), plugin))
def call_plugins(self, queue_name, time, *args):
args = (time,) + args
queue = self.plugin_queues[queue_name]
if len(queue) == 0:
return
while queue[0][0] <= time:
plugin = queue[0][2]
getattr(plugin, queue_name)(*args)
for trigger in plugin.trigger_interval:
if trigger[1] == queue_name:
interval = trigger[0]
new_item = (time + interval, queue[0][1], plugin)
heapq.heappushpop(queue, new_item)
def run(self, epochs=1):
for q in self.plugin_queues.values():
heapq.heapify(q)
for i in range(1, epochs + 1):
self.train()
self.call_plugins('epoch', i)
def train(self):
for i, data in enumerate(self.dataset, self.iterations + 1):
batch_input, batch_target = data
self.call_plugins('batch', i, batch_input, batch_target)
input_var = batch_input
target_var = batch_target
plugin_data = [None, None]
def closure():
batch_output = self.model(input_var)
loss = self.criterion(batch_output, target_var)
loss.backward()
if plugin_data[0] is None:
plugin_data[0] = batch_output.data
plugin_data[1] = loss.data
return loss
self.optimizer.zero_grad()
self.optimizer.step(closure)
self.call_plugins('iteration', i, batch_input, batch_target,
*plugin_data)
self.call_plugins('update', i, self.model)
self.iterations += i | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/utils/trainer/trainer.py | 0.747984 | 0.185007 | trainer.py | pypi |
from .plugin import Plugin
class Monitor(Plugin):
def __init__(self, running_average=True, epoch_average=True, smoothing=0.7,
precision=None, number_format=None, unit=''):
if precision is None:
precision = 4
if number_format is None:
number_format = '.{}f'.format(precision)
number_format = ':' + number_format
super(Monitor, self).__init__([(1, 'iteration'), (1, 'epoch')])
self.smoothing = smoothing
self.with_running_average = running_average
self.with_epoch_average = epoch_average
self.log_format = number_format
self.log_unit = unit
self.log_epoch_fields = None
self.log_iter_fields = ['{last' + number_format + '}' + unit]
if self.with_running_average:
self.log_iter_fields += [' ({running_avg' + number_format + '}' + unit + ')']
if self.with_epoch_average:
self.log_epoch_fields = ['{epoch_mean' + number_format + '}' + unit]
def register(self, trainer):
self.trainer = trainer
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['log_format'] = self.log_format
stats['log_unit'] = self.log_unit
stats['log_iter_fields'] = self.log_iter_fields
if self.with_epoch_average:
stats['log_epoch_fields'] = self.log_epoch_fields
if self.with_epoch_average:
stats['epoch_stats'] = (0, 0)
def iteration(self, *args):
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['last'] = self._get_value(*args)
if self.with_epoch_average:
stats['epoch_stats'] = tuple(sum(t) for t in
zip(stats['epoch_stats'], (stats['last'], 1)))
if self.with_running_average:
previous_avg = stats.get('running_avg', 0)
stats['running_avg'] = previous_avg * self.smoothing + \
stats['last'] * (1 - self.smoothing)
def epoch(self, idx):
stats = self.trainer.stats.setdefault(self.stat_name, {})
if self.with_epoch_average:
epoch_stats = stats['epoch_stats']
stats['epoch_mean'] = epoch_stats[0] / epoch_stats[1]
stats['epoch_stats'] = (0, 0) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/utils/trainer/plugins/monitor.py | 0.599016 | 0.205974 | monitor.py | pypi |
import bisect
import warnings
from torch._utils import _accumulate
from torch import randperm
class Dataset(object):
"""An abstract class representing a Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __add__(self, other):
return ConcatDataset([self, other])
class TensorDataset(Dataset):
"""Dataset wrapping tensors.
Each sample will be retrieved by indexing tensors along the first dimension.
Arguments:
*tensors (Tensor): tensors that have the same size of the first dimension.
"""
def __init__(self, *tensors):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
def __getitem__(self, index):
return tuple(tensor[index] for tensor in self.tensors)
def __len__(self):
return self.tensors[0].size(0)
class ConcatDataset(Dataset):
"""
Dataset to concatenate multiple datasets.
Purpose: useful to assemble different existing datasets, possibly
large-scale datasets as the concatenation operation is done in an
on-the-fly manner.
Arguments:
datasets (iterable): List of datasets to be concatenated
"""
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets)
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
class Subset(Dataset):
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def random_split(dataset, lengths):
"""
Randomly split a dataset into non-overlapping new datasets of given lengths
ds
Arguments:
dataset (Dataset): Dataset to be split
lengths (iterable): lengths of splits to be produced
"""
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = randperm(sum(lengths))
return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)] | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/utils/data/dataset.py | 0.859147 | 0.749018 | dataset.py | pypi |
import torch
from torch._six import int_classes as _int_classes
class Sampler(object):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an __iter__ method, providing a way
to iterate over indices of dataset elements, and a __len__ method that
returns the length of the returned iterators.
"""
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class SequentialSampler(Sampler):
r"""Samples elements sequentially, always in the same order.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(range(len(self.data_source)))
def __len__(self):
return len(self.data_source)
class RandomSampler(Sampler):
r"""Samples elements randomly, without replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(torch.randperm(len(self.data_source)).tolist())
def __len__(self):
return len(self.data_source)
class SubsetRandomSampler(Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (list): a list of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
class WeightedRandomSampler(Sampler):
r"""Samples elements from [0,..,len(weights)-1] with given probabilities (weights).
Arguments:
weights (list) : a list of weights, not necessary summing up to one
num_samples (int): number of samples to draw
replacement (bool): if ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
"""
def __init__(self, weights, num_samples, replacement=True):
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integeral "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement))
def __len__(self):
return self.num_samples
class BatchSampler(object):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(int(idx))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/utils/data/sampler.py | 0.920727 | 0.624021 | sampler.py | pypi |
import torch
import atexit
import warnings
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class dist_backend:
UNDEFINED = -1
TCP = 0
MPI = 1
GLOO = 2
NCCL = 3
_INITIALIZED_PG = 1
_INITIALIZED_MW = 2
_initialized = 0
_backend = dist_backend.UNDEFINED
_scope = locals()
def _extend_scope(module):
_scope.update({k: getattr(module, k) for k in dir(module) if not k.startswith('_')})
def is_available():
return torch._C._has_distributed()
def destroy_process_group():
"""
Destroy the initialized distributed package
"""
global _backend
global _initialized
torch._C._dist_destroy_process_group()
_backend = dist_backend.UNDEFINED
_initialized = 0
def is_initialized():
"""Checking if the process group has been initialized
"""
return _initialized == _INITIALIZED_PG
def init_process_group(backend, init_method='env://', **kwargs):
"""Initializes the distributed package.
Arguments:
backend (str): Name of the backend to use. Depending on build-time configuration
valid values include: ``tcp``, ``mpi`` and ``gloo``.
init_method (str, optional): URL specifying how to initialize the package.
world_size (int, optional): Number of processes participating in the job.
rank (int, optional): Rank of the current process.
group_name (str, optional): Group name. See description of init methods.
To enable ``backend == mpi``, PyTorch needs to built from source on a system that
supports MPI.
"""
world_size = kwargs.pop('world_size', -1)
group_name = kwargs.pop('group_name', '')
rank = kwargs.pop('rank', -1)
assert len(kwargs) == 0, "got unexpected keyword arguments: %s" % ",".join(kwargs.keys())
if not is_available():
raise RuntimeError("PyTorch built without distributed support")
global _initialized
if _initialized:
raise RuntimeError("trying to initialize torch.distributed twice!")
# Checking and assigning the distributed backend
global _backend
if backend == "tcp":
_backend = dist_backend.TCP
elif backend == "mpi":
_backend = dist_backend.MPI
elif backend == "gloo":
_backend = dist_backend.GLOO
elif backend == "nccl":
_backend = dist_backend.NCCL
else:
raise RuntimeError("Invalid distributed backend name: " + backend)
torch._C._dist_init_process_group(backend, init_method, world_size,
group_name, rank)
_initialized = _INITIALIZED_PG
if _backend == dist_backend.NCCL:
atexit.register(destroy_process_group)
if not torch._C._dist_init_extension(False, reduce_op, group):
raise RuntimeError("distributed module initialization failed")
def init_master_worker(backend, init_method='env://', **kwargs):
warnings.warn("""
================================================================================
WARNING
================================================================================
Master-worker mode is still experimental. The API will change without
notice and we're can't guarantee full correctness and expected performance yet.
We'll announce it once it's ready.
""")
world_size = kwargs.pop('world_size', -1)
group_name = kwargs.pop('group_name', '')
rank = kwargs.pop('rank', -1)
assert len(kwargs) == 0, "got unexpected keyword arguments: %s" % ",".join(kwargs.keys())
if not is_available():
raise RuntimeError("PyTorch built without distributed support")
global _initialized
if _initialized:
raise RuntimeError("trying to initialize torch.distributed twice!")
torch._C._dist_init_master_worker(backend, init_method, world_size,
group_name, rank)
_initialized = _INITIALIZED_MW
import torch.distributed.collectives as collectives
import torch.distributed.remote_types as remote_types
_extend_scope(collectives)
_extend_scope(remote_types)
if not torch._C._dist_init_extension(True, reduce_op, group):
raise RuntimeError("distributed module initialization failed")
class reduce_op(object):
SUM = object()
PRODUCT = object()
MAX = object()
MIN = object()
class group(object):
WORLD = object()
class _DistributedRequest(object):
def __init__(self, request):
self.request = request
def is_completed(self):
return torch._C._dist_request_is_completed(self.request)
def wait(self):
torch._C._dist_request_wait(self.request)
def get_rank():
"""Returns the rank of current process.
Rank is a unique identifier assigned to each process within a distributed
group. They are always consecutive integers ranging from 0 to ``world_size``.
"""
assert torch.distributed._initialized
return torch._C._dist_get_rank()
def get_world_size():
"""Returns the number of processes in the distributed group."""
assert torch.distributed._initialized
return torch._C._dist_get_num_processes()
def isend(tensor, dst):
"""Sends a tensor asynchronously.
Arguments:
tensor (Tensor): Tensor to send.
dst (int): Destination rank.
Returns:
A distributed request object.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return _DistributedRequest(torch._C._dist_isend(tensor, dst))
def irecv(tensor, src):
"""Receives a tensor asynchronously.
Arguments:
tensor (Tensor): Tensor to fill with received data.
src (int): Source rank.
Returns:
A distributed request object.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return _DistributedRequest(torch._C._dist_irecv(tensor, src))
def send(tensor, dst):
"""Sends a tensor synchronously.
Arguments:
tensor (Tensor): Tensor to send.
dst (int): Destination rank.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return torch._C._dist_send(tensor, dst)
def recv(tensor, src=None):
"""Receives a tensor synchronously.
Arguments:
tensor (Tensor): Tensor to fill with received data.
src (int, optional): Source rank. Will receive from any
process if unspecified.
Returns:
Sender rank.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
if src is None:
return torch._C._dist_recv_any_source(tensor)
return torch._C._dist_recv(tensor, src)
def broadcast_multigpu(tensor_list, src, group=group.WORLD):
"""Broadcasts the tensor to the whole group with multiple GPU tensors
per node.
``tensor`` must have the same number of elements in all the GPUs from
all processes participating in the collective. each tensor in the list must
be on a different GPU
Only nccl backend is currently supported
tensors should only be GPU tensors
Arguments:
tensor_list (List[Tensor]): Tensors that participate in the collective
operation. if ``src`` is the rank, then the first element of
``tensor_list`` (``tensor_list[0]``) will be broadcasted to all
other tensors (on different GPUs) in the src process and all tensors
in ``tensor_list`` of other non-src processes. You also need to make
sure that ``len(tensor_list)`` is the same for all the distributed
processes calling this function.
src (int): Source rank.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return torch._C._dist_broadcast_multigpu(tensor_list, src, group)
def broadcast(tensor, src, group=group.WORLD):
"""Broadcasts the tensor to the whole group.
``tensor`` must have the same number of elements in all processes
participating in the collective.
Arguments:
tensor (Tensor): Data to be sent if ``src`` is the rank of current
process, and tensor to be used to save received data otherwise.
src (int): Source rank.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return torch._C._dist_broadcast(tensor, src, group)
def all_reduce_multigpu(tensor_list, op=reduce_op.SUM, group=group.WORLD):
"""Reduces the tensor data across all machines in such a way that all get
the final result. This function reduces a number of tensors on every node,
while each tensor resides on different GPUs.
Therefore, the input tensor in the tensor list needs to be GPU tensors.
Also, each tensor in the tensor list needs to reside on a different GPU.
After the call, all ``tensor`` in ``tensor_list`` is going to be bitwise
identical in all processes.
Only nccl backend is currently supported
tensors should only be GPU tensors
Arguments:
tensor list (List[Tensor]): List of input and output tensors of
the collective. The function operates in-place and requires that
each tensor to be a GPU tensor on different GPUs.
You also need to make sure that ``len(tensor_list)`` is the same for
all the distributed processes calling this function.
op (optional): One of the values from ``torch.distributed.reduce_op``
enum. Specifies an operation used for element-wise reductions.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return torch._C._dist_all_reduce_multigpu(tensor_list, op, group)
def all_reduce(tensor, op=reduce_op.SUM, group=group.WORLD):
"""Reduces the tensor data across all machines in such a way that all get
the final result.
After the call ``tensor`` is going to be bitwise identical in all processes.
Arguments:
tensor (Tensor): Input and output of the collective. The function
operates in-place.
op (optional): One of the values from ``torch.distributed.reduce_op``
enum. Specifies an operation used for element-wise reductions.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return torch._C._dist_all_reduce(tensor, op, group)
def reduce_multigpu(tensor_list, dst, op=reduce_op.SUM, group=group.WORLD):
"""Reduces the tensor data on multiple GPUs across all machines. Each tensor
in ``tensor_list`` should reside on a separate GPU
Only the GPU of ``tensor_list[0]`` on the process with rank ``dst`` is
going to receive the final result.
Only nccl backend is currently supported
tensors should only be GPU tensors
Arguments:
tensor_list (List[Tensor]): Input and output GPU tensors of the
collective. The function operates in-place.
You also need to make sure that ``len(tensor_list)`` is the same for
all the distributed processes calling this function.
dst (int): Destination rank
op (optional): One of the values from ``torch.distributed.reduce_op``
enum. Specifies an operation used for element-wise reductions.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return torch._C._dist_reduce_multigpu(tensor_list, dst, op, group)
def reduce(tensor, dst, op=reduce_op.SUM, group=group.WORLD):
"""Reduces the tensor data across all machines.
Only the process with rank ``dst`` is going to receive the final result.
Arguments:
tensor (Tensor): Input and output of the collective. The function
operates in-place.
dst (int): Destination rank
op (optional): One of the values from ``torch.distributed.reduce_op``
enum. Specifies an operation used for element-wise reductions.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return torch._C._dist_reduce(tensor, dst, op, group)
def all_gather_multigpu(output_tensor_lists,
input_tensor_list,
group=group.WORLD):
"""Gathers tensors from the whole group in a list.
Each tensor in ``tensor_list`` should reside on a separate GPU
Only nccl backend is currently supported
tensors should only be GPU tensors
Arguments:
output_tensor_lists (List[List[Tensor]]): Output lists. It should
contain correctly-sized tensors on each GPU to be used for output of
the collective.
e.g. ``output_tensor_lists[i]`` contains the all_gather
result that resides on the GPU of ``input_tensor_list[i]``.
Note that each element of ``output_tensor_lists[i]`` has the size of
``world_size * len(input_tensor_list)``, since the function all
gathers the result from every single GPU in the group. To interpret
each element of ``output_tensor_list[i]``, note that
``input_tensor_list[j]`` of rank k will be appear in
``output_tensor_list[i][rank * world_size + j]``
Also note that ``len(output_tensor_lists)``, and the size of each
element in ``output_tensor_lists`` (each element is a list,
therefore ``len(output_tensor_lists[i])``) need to be the same
for all the distributed processes calling this function.
input_tensor_list (List[Tensor]): List of tensors(on different GPUs) to
be broadcast from current process.
Note that ``len(input_tensor_list)`` needs to be the same for
all the distributed processes calling this function.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
flatten_tensor_list = []
for output_tensor_list in output_tensor_lists:
flatten_tensor_list.append(_flatten_dense_tensors(output_tensor_list))
ret = torch._C._dist_all_gather_multigpu(flatten_tensor_list,
input_tensor_list,
group)
for output_tensor_list, flatten_tensor in zip(output_tensor_lists,
flatten_tensor_list):
for tensor, value in zip(output_tensor_list,
_unflatten_dense_tensors(flatten_tensor,
output_tensor_list)):
tensor.copy_(value)
return ret
def all_gather(tensor_list, tensor, group=group.WORLD):
"""Gathers tensors from the whole group in a list.
Arguments:
tensor_list (list[Tensor]): Output list. It should contain
correctly-sized tensors to be used for output of the collective.
tensor (Tensor): Tensor to be broadcast from current process.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
if _backend != dist_backend.NCCL:
return torch._C._dist_all_gather(tensor_list, tensor, group)
else:
return all_gather_multigpu([tensor_list], [tensor], group)
def gather(tensor, **kwargs):
"""Gathers a list of tensors in a single process.
Arguments:
tensor (Tensor): Input tensor.
dst (int): Destination rank. Required in all processes except the one that
is receiveing the data.
gather_list (list[Tensor]): List of appropriately-sized tensors to
use for received data. Required only in the receiving process.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
my_rank = get_rank()
dst = kwargs.pop('dst', my_rank)
gather_list = kwargs.pop('gather_list', None)
_group = kwargs.pop('group', group.WORLD)
if kwargs:
raise RuntimeError("got unexpected kwargs")
if dst == my_rank:
if gather_list is None:
raise RuntimeError("gather_list is a required argument in gather destination")
return torch._C._dist_gather_recv(gather_list, tensor, _group)
else:
if gather_list:
raise RuntimeError("non-empty gather_list can be given only to gather destination")
return torch._C._dist_gather_send(tensor, dst, _group)
def scatter(tensor, **kwargs):
"""Scatters a list of tensors to all processes in a group.
Each process will receive exactly one tensor and store its data in the
``tensor`` argument.
Arguments:
tensor (Tensor): Output tensor.
src (int): Source rank. Required in all processes except the one that
is sending the data.
scatter_list (list[Tensor]): List of tensors to scatter. Required only
in the process that is sending the data.
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
my_rank = get_rank()
src = kwargs.pop('src', my_rank)
scatter_list = kwargs.pop('scatter_list', None)
_group = kwargs.pop('group', group.WORLD)
if kwargs:
raise RuntimeError("got unexpected kwargs")
if src == my_rank:
if scatter_list is None:
raise RuntimeError("scatter_list is a required argument in scatter source")
return torch._C._dist_scatter_send(scatter_list, tensor, _group)
else:
if scatter_list:
raise RuntimeError("non-empty can be given only to scatter source")
return torch._C._dist_scatter_recv(tensor, src, _group)
def barrier(group=group.WORLD):
"""Synchronizes all processes.
This collective blocks processes until the whole group enters this function.
Arguments:
group (optional): Group of the collective.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
return torch._C._dist_barrier(group)
def new_group(ranks=None):
"""Creates a new distributed group.
This function requires that all processes in the main group (i.e. all
processes that are part of the distributed job) enter this function, even
if they are not going to be members of the group. Additionally, groups
should be created in the same order in all processes.
Arguments:
ranks (list[int]): List of ranks of group members.
Returns:
A handle of distributed group that can be given to collective calls.
"""
assert torch.distributed._initialized == _INITIALIZED_PG, \
"collective only supported in process-group mode"
if ranks is None:
ranks = list(range(get_world_size()))
return torch._C._dist_new_group(ranks)
def _clear_group_cache(group=group.WORLD):
"""Clear the created distributed group's cached resource
Only nccl backend is currently supported
Cached resource includes NCCL communicators and CUDA events
Arguments:
group (optional): Group of the collective.
"""
return torch._C._dist_clear_group_cache(group)
def _register_stream(stream):
if not _initialized:
raise RuntimeError("torch.distributed needs to be initialized first")
return torch._C._dist_register_stream(stream) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/distributed/__init__.py | 0.76533 | 0.388966 | __init__.py | pypi |
import torch
class no_grad(object):
r"""Context-manager that disabled gradient calculation.
Disabling gradient calculation is useful for inference, when you are sure
that you will not call :meth:`Tensor.backward()`. It will reduce memory
consumption for computations that would otherwise have `requires_grad=True`.
In this mode, the result of every computation will have
`requires_grad=False`, even when the inputs have `requires_grad=True`.
Example::
>>> x = torch.tensor([1], requires_grad=True)
>>> with torch.no_grad():
... y = x * 2
>>> y.requires_grad
False
"""
def __init__(self):
self.prev = torch.is_grad_enabled()
def __enter__(self):
torch._C.set_grad_enabled(False)
def __exit__(self, *args):
torch.set_grad_enabled(self.prev)
return False
class enable_grad(object):
r"""Context-manager that enables gradient calculation.
Enables gradient calculation inside a :class:`~no_grad` context. This has
no effect outside of :class:`~no_grad`.
Example::
>>> x = torch.tensor([1], requires_grad=True)
>>> with torch.no_grad():
... with torch.enable_grad():
... y = x * 2
>>> y.requires_grad
True
>>> y.backward()
>>> x.grad
"""
def __init__(self):
self.prev = torch.is_grad_enabled()
def __enter__(self):
torch._C.set_grad_enabled(True)
def __exit__(self, *args):
torch.set_grad_enabled(self.prev)
return False
class set_grad_enabled(object):
r"""Context-manager that sets gradient calculation to on or off.
``set_grad_enabled`` will enable or disable grads based on its argument :attr:`mode`.
It can be used as a context-manager or as a function.
Arguments:
mode (bool): Flag whether to enable grad (``True``), or disable
(``False``). This can be used to conditionally enable
gradients.
Example::
>>> x = torch.tensor([1], requires_grad=True)
>>> is_train = False
>>> with torch.set_grad_enabled(is_train):
... y = x * 2
>>> y.requires_grad
False
>>> set_grad_enabled(True)
>>> y = x * 2
>>> y.requires_grad
True
>>> set_grad_enabled(False)
>>> y = x * 2
>>> y.requires_grad
False
"""
def __init__(self, mode):
self.prev = torch.is_grad_enabled()
torch._C.set_grad_enabled(mode)
def __enter__(self):
pass
def __exit__(self, *args):
torch.set_grad_enabled(self.prev)
return False | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/autograd/grad_mode.py | 0.911112 | 0.641142 | grad_mode.py | pypi |
import torch
from collections import Iterable
import torch.testing
import sys
def zero_gradients(x):
if isinstance(x, torch.Tensor):
if x.grad is not None:
x.grad.detach_()
x.grad.data.zero_()
elif isinstance(x, Iterable):
for elem in x:
zero_gradients(elem)
def make_jacobian(input, num_out):
if isinstance(input, torch.Tensor):
if not input.is_floating_point():
return None
if not input.requires_grad:
return None
return torch.zeros(input.nelement(), num_out)
elif isinstance(input, Iterable):
jacobians = list(filter(
lambda x: x is not None, (make_jacobian(elem, num_out) for elem in input)))
if not jacobians:
return None
return type(input)(jacobians)
else:
return None
def iter_tensors(x, only_requiring_grad=False):
if isinstance(x, torch.Tensor):
if x.requires_grad or not only_requiring_grad:
yield x.data
elif isinstance(x, Iterable):
for elem in x:
for result in iter_tensors(elem, only_requiring_grad):
yield result
def iter_tensors_with_grad(x):
if isinstance(x, torch.Tensor):
if x.requires_grad:
yield (x.grad.data, x.data) if x.grad is not None else (None, None)
elif isinstance(x, Iterable):
for elem in x:
for result in iter_tensors_with_grad(elem):
yield result
def contiguous(input):
if isinstance(input, torch.Tensor):
return input.contiguous()
elif isinstance(input, Iterable):
return type(input)(contiguous(e) for e in input)
return input
def get_numerical_jacobian(fn, input, target, eps=1e-3):
# To be able to use .view(-1) input must be contiguous
input = contiguous(input)
target = contiguous(target)
output_size = fn(input).numel()
jacobian = make_jacobian(target, output_size)
# It's much easier to iterate over flattened lists of tensors.
# These are reference to the same objects in jacobian, so any changes
# will be reflected in it as well.
x_tensors = [t for t in iter_tensors(target, True)]
j_tensors = [t for t in iter_tensors(jacobian)]
# TODO: compare structure
for x_tensor, d_tensor in zip(x_tensors, j_tensors):
flat_tensor = x_tensor.view(-1).detach()
for i in range(flat_tensor.nelement()):
orig = flat_tensor[i].item()
flat_tensor[i] = orig - eps
outa = fn(input).clone()
flat_tensor[i] = orig + eps
outb = fn(input).clone()
flat_tensor[i] = orig
r = (outb - outa) / (2 * eps)
d_tensor[i] = r.detach().contiguous().view(-1)
return jacobian
def get_analytical_jacobian(input, output):
input = contiguous(input)
jacobian = make_jacobian(input, output.numel())
jacobian_reentrant = make_jacobian(input, output.numel())
grad_output = torch.zeros_like(output)
flat_grad_output = grad_output.view(-1)
reentrant = True
correct_grad_sizes = True
for i in range(flat_grad_output.numel()):
flat_grad_output.zero_()
flat_grad_output[i] = 1
for jacobian_c in (jacobian, jacobian_reentrant):
zero_gradients(input)
output.backward(grad_output, create_graph=True)
for jacobian_x, (d_x, x) in zip(jacobian_c, iter_tensors_with_grad(input)):
if d_x is not None and d_x.size() != x.size():
correct_grad_sizes = False
elif jacobian_x.numel() != 0:
if d_x is None:
jacobian_x[:, i].zero_()
else:
d_x_dense = d_x.to_dense() if d_x.is_sparse else d_x
assert jacobian_x[:, i].numel() == d_x_dense.numel()
jacobian_x[:, i] = d_x_dense.contiguous().view(-1)
for jacobian_x, jacobian_reentrant_x in zip(jacobian, jacobian_reentrant):
if jacobian_x.numel() != 0 and (jacobian_x - jacobian_reentrant_x).abs().max() != 0:
reentrant = False
return jacobian, reentrant, correct_grad_sizes
def _as_tuple(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
return x,
def _differentiable_outputs(x):
return tuple(o for o in _as_tuple(x) if o.requires_grad)
def gradcheck(func, inputs, eps=1e-6, atol=1e-5, rtol=1e-3, raise_exception=True):
"""Check gradients computed via small finite differences
against analytical gradients
The check between numerical and analytical has the same behaviour as
numpy.allclose https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html
meaning it check that
absolute(a - n) <= (atol + rtol * absolute(n))
is true for all elements of analytical jacobian a and numerical jacobian n.
Args:
func: Python function that takes Tensor inputs and returns
a Tensor or a tuple of Tensors
inputs: tuple of Tensors
eps: perturbation for finite differences
atol: absolute tolerance
rtol: relative tolerance
raise_exception: bool indicating whether to raise an exception if
gradcheck fails. The exception gives more information about the
exact nature of the failure. This is helpful when debugging gradchecks.
Returns:
True if all differences satisfy allclose condition
"""
tupled_inputs = _as_tuple(inputs)
# Make sure that gradients are saved for all inputs
for inp in tupled_inputs:
if isinstance(inp, torch.Tensor):
inp.retain_grad()
output = _differentiable_outputs(func(*inputs))
def fail_test(msg):
if raise_exception:
raise RuntimeError(msg)
return False
for i, o in enumerate(output):
if not o.requires_grad:
continue
def fn(input):
return _as_tuple(func(*input))[i].data
analytical, reentrant, correct_grad_sizes = get_analytical_jacobian(tupled_inputs, o)
numerical = get_numerical_jacobian(fn, inputs, inputs, eps)
if not correct_grad_sizes:
return fail_test('Analytical gradient has incorrect size')
for j, (a, n) in enumerate(zip(analytical, numerical)):
if a.numel() != 0 or n.numel() != 0:
if not ((a - n).abs() <= (atol + rtol * n.abs())).all():
return fail_test('Jacobian mismatch for output %d with respect to input %d,\n'
'numerical:%s\nanalytical:%s\n' % (i, j, n, a))
if not reentrant:
return fail_test('Backward is not reentrant, i.e., running backward with same '
'input and grad_output multiple times gives different values, '
'although analytical gradient matches numerical gradient')
# check if the backward multiplies by grad_output
zero_gradients(inputs)
output = _differentiable_outputs(func(*inputs))
if any([o.requires_grad for o in output]):
torch.autograd.backward(output, [torch.zeros_like(o) for o in output], create_graph=True)
var_inputs = list(filter(lambda i: isinstance(i, torch.Tensor), inputs))
if not var_inputs:
raise RuntimeError("no Tensors found in input")
for i in var_inputs:
if i.grad is None:
continue
if not i.grad.data.eq(0).all():
return fail_test('backward not multiplied by grad_output')
if i.grad.type() != i.type():
return fail_test("grad is incorrect type")
if i.grad.size() != i.size():
return fail_test('grad is incorrect size')
return True
def gradgradcheck(func, inputs, grad_outputs=None, eps=1e-6, atol=1e-5, rtol=1e-3,
gen_non_contig_grad_outputs=False, raise_exception=True):
"""Check gradients of gradients computed via small finite differences
against analytical gradients
This function checks that backpropagating through the gradients computed
to the given grad_outputs are correct.
The check between numerical and analytical has the same behaviour as
numpy.allclose https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html
meaning it check that
absolute(a - n) <= (atol + rtol * absolute(n))
is true for all elements of analytical gradient a and numerical gradient n.
Args:
func (function): Python function that takes Tensor inputs and returns
a Tensor or a tuple of Tensors
inputs (tuple of Tensor): inputs to the function
grad_outputs (tuple of Tensor, optional): The gradients with respect to
the function's outputs.
eps (float, optional): perturbation for finite differences
atol (float, optional): absolute tolerance
rtol (float, optional): relative tolerance
gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is
``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the
randomly generated gradient outputs are made to be noncontiguous
raise_exception: bool indicating whether to raise an exception if
gradcheck fails. The exception gives more information about the
exact nature of the failure. This is helpful when debugging gradchecks.
Returns:
True if all differences satisfy allclose condition. Raises an exception
otherwise.
"""
if grad_outputs is None:
# If grad_outputs is not specified, create random Tensors of the same
# shape, type, and device as the outputs
def randn_like(x):
var = torch.testing.randn_like(x if x.is_floating_point() else x.double())
if gen_non_contig_grad_outputs:
var = torch.testing.make_non_contiguous(var)
var.requires_grad = True
return var
outputs = _as_tuple(func(*inputs))
grad_outputs_gen = (randn_like(x) for x in outputs)
grad_outputs = list(grad_outputs_gen) if not isinstance(inputs, tuple) else tuple(grad_outputs_gen)
def new_func(*input_args):
input_args = input_args[:-len(grad_outputs)]
outputs = _differentiable_outputs(func(*input_args))
input_args = tuple(x for x in input_args if isinstance(x, torch.Tensor) and x.requires_grad)
grad_inputs = torch.autograd.grad(outputs, input_args, grad_outputs, create_graph=True)
return grad_inputs
return gradcheck(new_func, inputs + grad_outputs, eps, atol, rtol, raise_exception) | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/autograd/gradcheck.py | 0.608594 | 0.587766 | gradcheck.py | pypi |
import torch
from functools import reduce
def maybe_view(tensor, size, check_same_size=True):
if check_same_size and tensor.size() == size:
return tensor
return tensor.contiguous().view(size)
def maybe_unexpand(tensor, old_size, check_same_size=True):
if check_same_size and tensor.size() == old_size:
return tensor
num_unsqueezed = tensor.dim() - len(old_size)
expanded_dims = [dim for dim, (expanded, original)
in enumerate(zip(tensor.size()[num_unsqueezed:], old_size))
if expanded != original]
for _ in range(num_unsqueezed):
tensor = tensor.sum(0, keepdim=False)
for dim in expanded_dims:
tensor = tensor.sum(dim, keepdim=True)
return tensor
# Generate paddings in ONNX order based on pad in pytorch.
# Arguments:
# dim: the dimension of the tensor.
# pad: the paddings in pytorch.
# The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ...
def prepare_onnx_paddings(dim, pad):
assert isinstance(dim, int)
# The desired order of paddings is
# dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.
# n is the dimension of input.
assert len(pad) <= dim * 2
# assume zero-dimensions in the beginning
paddings = list(pad[:]) + [0] * (dim * 2 - len(pad))
# reverse order and collate first beginnings and then ends
paddings = paddings[-2::-2] + paddings[-1::-2]
assert len(paddings) == dim * 2
return paddings
# Check whether the op enable broadcasting, and whether it is supported by ONNX.
# If dims1 and dims2 are different, then broadcast is True.
# We always assume the combination of dims1 and dims2 is broadcastable.
# The following types of broadcasting are supported in ONNX:
# 1) Only one element in dims2, such as dims2 = [1, 1]
# 2) dims2 is suffix of dims1, such as dims1 = [2, 3, 4], and dims2 = [3, 4]
# Details can be found here: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm
def check_onnx_broadcast(dims1, dims2):
broadcast = False
supported = True
len1 = len(dims1)
len2 = len(dims2)
numel1 = reduce(lambda x, y: x * y, dims1)
numel2 = reduce(lambda x, y: x * y, dims2)
if len1 < len2:
broadcast = True
if numel2 != 1:
supported = False
elif len1 > len2:
broadcast = True
if numel2 != 1 and dims1[len1 - len2:] != dims2:
supported = False
else:
if dims1 != dims2:
broadcast = True
if numel2 != 1:
supported = False
if not supported:
raise ValueError("Numpy style broadcasting is not supported in ONNX. "
"Input dims are: {}, {}".format(dims1, dims2))
return broadcast | /rpi3.torch-0.1.0-cp35-cp35m-linux_armv7l.whl/torch/autograd/_functions/utils.py | 0.843638 | 0.722111 | utils.py | pypi |
class WeightModel:
def __init__(self,ReceiveID='',MessageID='', ReceiveType=20, BatchID='', FactoryID=0, FactoryName='',
FarmID=0, FarmName='', QRCode='', SpecificationID=0, CartonWeight=0, RealWeight=0, GrossWeight=0, StandardWeight=0,
ReceiveCount=1, ReceiveTime=''):
self.ReceiveID = ReceiveID
self.MessageID = MessageID
self.ReceiveType = ReceiveType
self.BatchID = BatchID
self.FactoryID = FactoryID
self.FactoryName = FactoryName
self.FarmID = FarmID
self.FarmName = FarmName
self.QRCode = QRCode
self.SpecificationID = SpecificationID
self.CartonWeight = CartonWeight
self.RealWeight = RealWeight
self.GrossWeight = GrossWeight
self.StandardWeight = StandardWeight
self.ReceiveCount = ReceiveCount
self.ReceiveTime = ReceiveTime
def setReceiveID(self,ReceiveID=''):
self.ReceiveID = ReceiveID
def setReceiveType(self, ReceiveType=20):
self.ReceiveType = ReceiveType
def setBatchID(self, BatchID=''):
self.BatchID = BatchID
def setFactoryID(self, FactoryID=0):
self.FactoryID = FactoryID
def setFactoryName(self, FactoryName=''):
self.FactoryName = FactoryName
def setFarmID(self, FarmID=0):
self.FarmID = FarmID
def setFarmName(self, FarmName=''):
self.FarmName = FarmName
def setSpecificationID(self, SpecificationID=0):
self.SpecificationID = SpecificationID
def setCartonWeight(self, CartonWeight=0):
self.CartonWeight = CartonWeight
def setRealWeight(self, RealWeight=0):
self.RealWeight = RealWeight
def setGrossWeight(self, GrossWeight=0):
self.GrossWeight = GrossWeight
def setStandardWeight(self, StandardWeight=0):
self.StandardWeight = StandardWeight
def setReceiveCount(self, ReceiveCount=1):
self.ReceiveCount = ReceiveCount
def setReceiveTime(self, ReceiveTime=''):
self.ReceiveTime = ReceiveTime
def setQRCode(self, QRCode=''):
self.QRCode = QRCode | /rpicommon-0.0.1-py3-none-any.whl/common/data/weightmodel.py | 0.613237 | 0.209793 | weightmodel.py | pypi |
import RPi.GPIO as GPIO
import time
import sys
class HX711:
def __init__(self, dout, pd_sck, gain=128):
"""
Set GPIO Mode, and pin for communication with HX711
:param dout: Serial Data Output pin
:param pd_sck: Power Down and Serial Clock Input pin
:param gain: set gain 128, 64, 32
"""
self.GAIN = 0
self.OFFSET = 0
self.SCALE = 1
# Setup the gpio pin numbering system
GPIO.setmode(GPIO.BCM)
# Set the pin numbers
self.PD_SCK = pd_sck
self.DOUT = dout
# Setup the GPIO Pin as output
GPIO.setup(self.PD_SCK, GPIO.OUT)
# Setup the GPIO Pin as input
GPIO.setup(self.DOUT, GPIO.IN)
# Power up the chip
self.power_up()
self.set_gain(gain)
def set_gain(self, gain=128):
try:
if gain is 128:
self.GAIN = 3
elif gain is 64:
self.GAIN = 2
elif gain is 32:
self.GAIN = 1
except:
self.GAIN = 3 # Sets default GAIN at 128
GPIO.output(self.PD_SCK, False)
self.read()
def set_scale(self, scale):
"""
Set scale
:param scale, scale
"""
self.SCALE = scale
def set_offset(self, offset):
"""
Set the offset
:param offset: offset
"""
self.OFFSET = offset
def get_scale(self):
"""
Returns value of scale
"""
return self.SCALE
def get_offset(self):
"""
Returns value of offset
"""
return self.OFFSET
def read(self):
"""
Read data from the HX711 chip
:param void
:return reading from the HX711
"""
# Control if the chip is ready
while not (GPIO.input(self.DOUT) == 0):
# Uncommenting the print below results in noisy output
# print("No input from HX711.")
pass
# Original C source code ported to Python as described in datasheet
# https://cdn.sparkfun.com/datasheets/Sensors/ForceFlex/hx711_english.pdf
# Output from python matched the output of
# different HX711 Arduino library example
# Lastly, behaviour matches while applying pressure
# Please see page 8 of the PDF document
count = 0
for i in range(24):
GPIO.output(self.PD_SCK, True)
count = count << 1
GPIO.output(self.PD_SCK, False)
if(GPIO.input(self.DOUT)):
count += 1
GPIO.output(self.PD_SCK, True)
count = count ^ 0x800000
GPIO.output(self.PD_SCK, False)
# set channel and gain factor for next reading
for i in range(self.GAIN):
GPIO.output(self.PD_SCK, True)
GPIO.output(self.PD_SCK, False)
return count
def read_average(self, times=16):
"""
Calculate average value from
:param times: measure x amount of time to get average
"""
sum = 0
for i in range(times):
sum += self.read()
return sum / times
def get_grams(self, times=16):
"""
:param times: Set value to calculate average,
be aware that high number of times will have a
slower runtime speed.
:return float weight in grams
"""
value = (self.read_average(times) - self.OFFSET)
grams = (value / self.SCALE)
return grams
def tare(self, times=16):
"""
Tare functionality fpr calibration
:param times: set value to calculate average
"""
sum = self.read_average(times)
self.set_offset(sum)
def power_down(self):
"""
Power the chip down
"""
GPIO.output(self.PD_SCK, False)
GPIO.output(self.PD_SCK, True)
def power_up(self):
"""
Power the chip up
"""
GPIO.output(self.PD_SCK, False)
if __name__ == "__main__":
hx = HX711(5,6)
#print('获取重量为:{}'.format(hx.get_grams())) | /rpicommon-0.0.1-py3-none-any.whl/common/fhx711/hx711.py | 0.55254 | 0.482917 | hx711.py | pypi |
"""A python 3 library for various
motors and servos to connect to a raspberry pi"""
# ========================= HEADER ===================================
# title :rpiMotorlib.py
# description :A python 3 library for various motors
# and servos to connect to a raspberry pi
# This file is for stepper motor tested on
# 28BYJ-48 unipolar stepper motor with ULN2003 = BYJMotor class
# Bipolar Nema stepper motor with L298N = BYJMotor class.
# Bipolar Nema Stepper motor TB6612FNG = BYJMotor class
# Bipolar Nema Stepper motor A4988 Driver = A4988Nema class
# Bipolar Nema Stepper motor DRV8825 Driver = A4988Nema class
# Bipolar Nema Stepper motor LV8729 Driver = A4988Nema class
# Bipolar Nema Stepper motor A3967 Easy Driver = A3967EasyNema class
# Main author :Gavin Lyons
# Version :See changelog at url
# url :https://github.com/gavinlyonsrepo/RpiMotorLib
# mail :glyons66@hotmail.com
# python_version :3.5.3
# ========================== IMPORTS ======================
# Import the system modules needed to run rpiMotorlib.py
import sys
import time
import RPi.GPIO as GPIO
# ==================== CLASS SECTION ===============================
class StopMotorInterrupt(Exception):
""" Stop the motor """
pass
class BYJMotor(object):
"""class to control a 28BYJ-48 stepper motor with ULN2003 controller
by a raspberry pi"""
def __init__(self, name="BYJMotorX", motor_type="28BYJ"):
self.name = name
self.motor_type = motor_type
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
self.stop_motor = False
def motor_stop(self):
""" Stop the motor """
self.stop_motor = True
def motor_run(self, gpiopins, wait=.001, steps=512, ccwise=False,
verbose=False, steptype="half", initdelay=.001):
"""motor_run, moves stepper motor based on 7 inputs
(1) GPIOPins, type=list of ints 4 long, help="list of
4 GPIO pins to connect to motor controller
These are the four GPIO pins we will
use to drive the stepper motor, in the order
they are plugged into the controller board. So,
GPIO 18 is plugged into Pin 1 on the stepper motor.
(2) wait, type=float, default=0.001, help=Time to wait
(in seconds) between steps.
(3) steps, type=int, default=512, help=Number of steps sequence's
to execute. Default is one revolution , 512 (for a 28BYJ-48)
(4) counterclockwise, type=bool default=False
help="Turn stepper counterclockwise"
(5) verbose, type=bool type=bool default=False
help="Write pin actions",
(6) steptype, type=string , default=half help= type of drive to
step motor 3 options full step half step or wave drive
where full = fullstep , half = half step , wave = wave drive.
(7) initdelay, type=float, default=1mS, help= Intial delay after
GPIO pins initialized but before motor is moved.
"""
if steps < 0:
print("Error BYJMotor 101: Step number must be greater than 0")
quit()
try:
self.stop_motor = False
for pin in gpiopins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, False)
time.sleep(initdelay)
# select step based on user input
# Each step_sequence is a list containing GPIO pins that should be set to High
if steptype == "half": # half stepping.
step_sequence = list(range(0, 8))
step_sequence[0] = [gpiopins[0]]
step_sequence[1] = [gpiopins[0], gpiopins[1]]
step_sequence[2] = [gpiopins[1]]
step_sequence[3] = [gpiopins[1], gpiopins[2]]
step_sequence[4] = [gpiopins[2]]
step_sequence[5] = [gpiopins[2], gpiopins[3]]
step_sequence[6] = [gpiopins[3]]
step_sequence[7] = [gpiopins[3], gpiopins[0]]
elif steptype == "full": # full stepping.
step_sequence = list(range(0, 4))
step_sequence[0] = [gpiopins[0], gpiopins[1]]
step_sequence[1] = [gpiopins[1], gpiopins[2]]
step_sequence[2] = [gpiopins[2], gpiopins[3]]
step_sequence[3] = [gpiopins[0], gpiopins[3]]
elif steptype == "wave": # wave driving
step_sequence = list(range(0, 4))
step_sequence[0] = [gpiopins[0]]
step_sequence[1] = [gpiopins[1]]
step_sequence[2] = [gpiopins[2]]
step_sequence[3] = [gpiopins[3]]
else:
print("Error: BYJMotor 102 : unknown step type : half, full or wave")
print(steptype)
quit()
# To run motor in reverse we flip the sequence order.
if ccwise:
step_sequence.reverse()
def display_degree():
""" display the degree value at end of run if verbose"""
if self.motor_type == "28BYJ":
degree = 1.422222
print("Size of turn in degrees = {}".format(round(steps/degree, 2)))
elif self.motor_type == "Nema":
degree = 7.2
print("Size of turn in degrees = {}".format(round(steps*degree, 2)))
else:
# Unknown Motor type
print("Warning 201 : Unknown Motor Type : {}".format(self.motor_type))
print("Size of turn in degrees = N/A")
def print_status(enabled_pins):
""" Print status of pins."""
if verbose:
print("Next Step: Step sequence remaining : {} ".format(steps_remaining))
for pin_print in gpiopins:
if pin_print in enabled_pins:
print("GPIO pin on {}".format(pin_print))
else:
print("GPIO pin off {}".format(pin_print))
# Iterate through the pins turning them on and off.
steps_remaining = steps
while steps_remaining > 0:
for pin_list in step_sequence:
for pin in gpiopins:
if self.stop_motor:
raise StopMotorInterrupt
else:
if pin in pin_list:
GPIO.output(pin, True)
else:
GPIO.output(pin, False)
print_status(pin_list)
time.sleep(wait)
steps_remaining -= 1
except KeyboardInterrupt:
print("User Keyboard Interrupt : RpiMotorLib: ")
except StopMotorInterrupt:
print("Stop Motor Interrupt : RpiMotorLib: ")
except Exception as motor_error:
print(sys.exc_info()[0])
print(motor_error)
print("Error : BYJMotor 103 : RpiMotorLib : Unexpected error:")
else:
# print report status if everything went well
if verbose:
print("\nRpiMotorLib, Motor Run finished, Details:.\n")
print("Motor type = {}".format(self.motor_type))
print("Initial delay = {}".format(initdelay))
print("GPIO pins = {}".format(gpiopins))
print("Wait time = {}".format(wait))
print("Number of step sequences = {}".format(steps))
print("Size of step sequence = {}".format(len(step_sequence)))
print("Number of steps = {}".format(steps*len(step_sequence)))
display_degree()
print("Counter clockwise = {}".format(ccwise))
print("Verbose = {}".format(verbose))
print("Steptype = {}".format(steptype))
finally:
# switch off pins at end
for pin in gpiopins:
GPIO.output(pin, False)
class A4988Nema(object):
""" Class to control a Nema bi-polar stepper motor with a A4988 also tested with DRV8825"""
def __init__(self, direction_pin, step_pin, mode_pins, motor_type="A4988"):
""" class init method 3 inputs
(1) direction type=int , help=GPIO pin connected to DIR pin of IC
(2) step_pin type=int , help=GPIO pin connected to STEP of IC
(3) mode_pins type=tuple of 3 ints, help=GPIO pins connected to
Microstep Resolution pins MS1-MS3 of IC, can be set to (-1,-1,-1) to turn off
GPIO resolution.
(4) motor_type type=string, help=Type of motor two options: A4988 or DRV8825
"""
self.motor_type = motor_type
self.direction_pin = direction_pin
self.step_pin = step_pin
if mode_pins[0] != -1:
self.mode_pins = mode_pins
else:
self.mode_pins = False
self.stop_motor = False
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
def motor_stop(self):
""" Stop the motor """
self.stop_motor = True
def resolution_set(self, steptype):
""" method to calculate step resolution
based on motor type and steptype"""
if self.motor_type == "A4988":
resolution = {'Full': (0, 0, 0),
'Half': (1, 0, 0),
'1/4': (0, 1, 0),
'1/8': (1, 1, 0),
'1/16': (1, 1, 1)}
elif self.motor_type == "DRV8825":
resolution = {'Full': (0, 0, 0),
'Half': (1, 0, 0),
'1/4': (0, 1, 0),
'1/8': (1, 1, 0),
'1/16': (0, 0, 1),
'1/32': (1, 0, 1)}
elif self.motor_type == "LV8729":
resolution = {'Full': (0, 0, 0),
'Half': (1, 0, 0),
'1/4': (0, 1, 0),
'1/8': (1, 1, 0),
'1/16': (0, 0, 1),
'1/32': (1, 0, 1),
'1/64': (0, 1, 1),
'1/128': (1, 1, 1)}
else:
print("Error invalid motor_type: {}".format(self.motor_type))
quit()
# error check stepmode
if steptype in resolution:
pass
else:
print("Error invalid steptype: {}".format(steptype))
quit()
if self.mode_pins != False:
GPIO.output(self.mode_pins, resolution[steptype])
def motor_go(self, clockwise=False, steptype="Full",
steps=200, stepdelay=.005, verbose=False, initdelay=.05):
""" motor_go, moves stepper motor based on 6 inputs
(1) clockwise, type=bool default=False
help="Turn stepper counterclockwise"
(2) steptype, type=string , default=Full help= type of drive to
step motor 5 options
(Full, Half, 1/4, 1/8, 1/16) 1/32 for DRV8825 only
(3) steps, type=int, default=200, help=Number of steps sequence's
to execute. Default is one revolution , 200 in Full mode.
(4) stepdelay, type=float, default=0.05, help=Time to wait
(in seconds) between steps.
(5) verbose, type=bool type=bool default=False
help="Write pin actions",
(6) initdelay, type=float, default=1mS, help= Intial delay after
GPIO pins initialized but before motor is moved.
"""
self.stop_motor = False
# setup GPIO
GPIO.setup(self.direction_pin, GPIO.OUT)
GPIO.setup(self.step_pin, GPIO.OUT)
GPIO.output(self.direction_pin, clockwise)
if self.mode_pins != False:
GPIO.setup(self.mode_pins, GPIO.OUT)
try:
# dict resolution
self.resolution_set(steptype)
time.sleep(initdelay)
for i in range(steps):
if self.stop_motor:
raise StopMotorInterrupt
else:
GPIO.output(self.step_pin, True)
time.sleep(stepdelay)
GPIO.output(self.step_pin, False)
time.sleep(stepdelay)
if verbose:
print("Steps count {}".format(i+1), end="\r", flush=True)
except KeyboardInterrupt:
print("User Keyboard Interrupt : RpiMotorLib:")
except StopMotorInterrupt:
print("Stop Motor Interrupt : RpiMotorLib: ")
except Exception as motor_error:
print(sys.exc_info()[0])
print(motor_error)
print("RpiMotorLib : Unexpected error:")
else:
# print report status
if verbose:
print("\nRpiMotorLib, Motor Run finished, Details:.\n")
print("Motor type = {}".format(self.motor_type))
print("Clockwise = {}".format(clockwise))
print("Step Type = {}".format(steptype))
print("Number of steps = {}".format(steps))
print("Step Delay = {}".format(stepdelay))
print("Intial delay = {}".format(initdelay))
print("Size of turn in degrees = {}"
.format(degree_calc(steps, steptype)))
finally:
# cleanup
GPIO.output(self.step_pin, False)
GPIO.output(self.direction_pin, False)
if self.mode_pins != False:
for pin in self.mode_pins:
GPIO.output(pin, False)
class A3967EasyNema(object):
""" Class to control a Nema bi-polar stepper motor with A3967 Easy driver
motor controller """
def __init__(self, direction_pin, step_pin, mode_pins):
""" class init method 3 inputs
(1) direction type=int , help=GPIO pin connected to DIR pin of IC
(2) step_pin type=int , help=GPIO pin connected to STEP of IC
(3) mode_pins type=tuple of 2 ints, help=GPIO pins connected to
Microstep Resolution pins MS1-MS2 of IC, can be set to (-1,-1) to turn off
GPIO resolution.
"""
self.direction_pin = direction_pin
self.step_pin = step_pin
if mode_pins[0] != -1:
self.mode_pins = mode_pins
else:
self.mode_pins = False
self.stop_motor = False
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
def motor_stop(self):
""" Stop the motor """
self.stop_motor = True
def motor_move(self, stepdelay=.05, steps=200, clockwise=False,
verbose=False, steptype="Full", initdelay=.1):
""" motor_move, moves stepper motor based on 6 inputs
(1) stepdelay type=float, default=0.05, help=Time to wait
(in seconds) between steps.
(2) steps, type=int, default=200, help=Number of steps sequence's
to execute. Default is 200 ,
(3) clockwise, type=bool default=False
help="Turn stepper counterclockwise"
(4) verbose, type=bool type=bool default=False
help="Write pin actions",
(5) steptype, type=string , default=Full help= type of drive to
step motor 4 options
(Full, Half, 1/4, 1/8)
(6) initdelay, type=float, default=1mS, help= Intial delay after
GPIO pins initialized but before motor is moved.
"""
def ms_steps_pins():
""" Method to handle MS pins setup """
# dict resolution
resolution = {'Full': (0, 0),
'Half': (1, 0),
'1/4': (0, 1),
'1/8': (1, 1)}
# error check stepmode input
if steptype in resolution:
pass
else:
print("Error invalid steptype: {}".format(steptype))
quit()
if self.mode_pins != False:
GPIO.output(self.mode_pins, resolution[steptype])
# setup GPIO
self.stop_motor = False
GPIO.setup(self.direction_pin, GPIO.OUT)
GPIO.setup(self.step_pin, GPIO.OUT)
GPIO.output(self.direction_pin, clockwise)
if self.mode_pins != False:
GPIO.setup(self.mode_pins, GPIO.OUT)
ms_steps_pins()
time.sleep(initdelay)
try:
for i in range(steps):
if self.stop_motor:
raise StopMotorInterrupt
else:
GPIO.output(self.step_pin, False)
time.sleep(stepdelay)
GPIO.output(self.step_pin, True)
time.sleep(stepdelay)
if verbose:
print("Steps count {}".format(i+1), end="\r", flush=True)
except KeyboardInterrupt:
print("User Keyboard Interrupt : RpiMotorLib:")
except StopMotorInterrupt:
print("Stop Motor Interrupt : RpiMotorLib: ")
except Exception as motor_error:
print(sys.exc_info()[0])
print(motor_error)
print("RpiMotorLib : Unexpected error:")
else:
# print report status
if verbose:
print("\nRpiMotorLib, Motor Run finished, Details:.\n")
print("Clockwise = {}".format(clockwise))
print("Step Type = {}".format(steptype))
print("Number of steps = {}".format(steps))
print("Step Delay = {}".format(stepdelay))
print("Intial delay = {}".format(initdelay))
print("Size of turn in degrees = {}"
.format(degree_calc(steps, steptype)))
finally:
# cleanup
GPIO.output(self.step_pin, False)
GPIO.output(self.direction_pin, False)
if self.mode_pins != False:
for pin in self.mode_pins:
GPIO.output(pin, False)
def degree_calc(steps, steptype):
""" calculate and returns size of turn in degree
, passed number of steps and steptype"""
degree_value = {'Full': 1.8,
'Half': 0.9,
'1/4': .45,
'1/8': .225,
'1/16': 0.1125,
'1/32': 0.05625,
'1/64': 0.028125,
'1/128': 0.0140625}
degree_value = (steps*degree_value[steptype])
return degree_value
def importtest(text):
""" testing import """
# print(text)
text = " "
# ===================== MAIN ===============================
if __name__ == '__main__':
importtest("main")
else:
importtest("Imported {}".format(__name__))
# ===================== END =============================== | /rpimotorlib-3.2.tar.gz/rpimotorlib-3.2/RpiMotorLib/RpiMotorLib.py | 0.407687 | 0.423041 | RpiMotorLib.py | pypi |
# ========================== IMPORTS ======================
# Import the system modules needed to run rpiMotorlib.py
import time
import RPi.GPIO as GPIO
# ==================== CLASS SECTION ===============================
class L298NMDc():
""" Class to control DC motor via L298n motor controller
6 methods 1. __init__ 2. forward
3.backward 4.stop 5 .brake 6.cleanup"""
def __init__(self, pin_one, pin_two,
pwm_pin, freq=50, verbose=False, name="DCMotorX"):
""" init method
(1) pin_one, type=int, GPIO pin connected to IN1 or IN3
(2) Pin two type=int, GPIO pin connected to IN2 or IN4
(3) pwm_pin type=int, GPIO pin connected to EnA or ENB
(4) freq in Hz default 50
(5) verbose, type=bool type=bool default=False
help="Write pin actions"
(6) name, type=string, name attribute
"""
self.name = name
self.pin_one = pin_one
self.pin_two = pin_two
self.pwm_pin = pwm_pin
self.freq = freq
self.verbose = verbose
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin_one, GPIO.OUT)
GPIO.setup(self.pin_two, GPIO.OUT)
GPIO.setup(self.pwm_pin, GPIO.OUT)
self.my_pwm = GPIO.PWM(self.pwm_pin, self.freq)
self.last_pwm = 0
self.my_pwm.start(self.last_pwm)
if self.verbose:
print(" Motor initialized named: {} ".format(self.name))
print(" Pin one In1 or In3: {}".format(self.pin_one))
print(" Pin two In2 or in4: {}".format(self.pin_two))
print(" Pin pwm enA or enB: {}".format(self.pwm_pin))
print(" Frequency: {} ".format(self.freq))
def forward(self, duty_cycle=50):
""" Move motor forwards passed duty cycle for speed control """
GPIO.output(self.pin_one, True)
GPIO.output(self.pin_two, False)
if self.verbose:
print("Moving Motor Forward : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def backward(self, duty_cycle=50):
""" Move motor backwards passed duty cycle for speed control"""
GPIO.output(self.pin_one, False)
GPIO.output(self.pin_two, True)
if self.verbose:
print("Moving Motor Backward : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def stop(self, duty_cycle=0):
""" Stop motor"""
GPIO.output(self.pin_one, False)
GPIO.output(self.pin_two, False)
if self.verbose:
print("Stoping Motor : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def brake(self, duty_cycle=100):
""" brake motor"""
GPIO.output(self.pin_one, True)
GPIO.output(self.pin_two, True)
if self.verbose:
print("Braking Motor : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def cleanup(self, clean_up=False):
""" cleanup all GPIO connections used in event of error by lib user"""
if self.verbose:
print("rpi_dc_lib.py : Cleaning up")
GPIO.output(self.pin_one, False)
GPIO.output(self.pin_two, False)
self.my_pwm.ChangeDutyCycle(0)
self.my_pwm.stop()
if clean_up:
GPIO.cleanup()
class DRV8833NmDc():
""" Class to control DC motor via L9110S and DRV8833 motor controller
6 methods 1. __init__ 2. forward
3.backward 4.stop 5.brake 6.cleanup"""
def __init__(self, pin_one, pin_two,
freq=50, verbose=False, name="DCMotorY"):
""" init method
(1) pin_one, type=int, GPIO pin direction pin connected to IN1 or IN3
(2) Pin two type=int, GPIO pin PWM speed pin connected to IN2 or IN4
(3) freq in Hz default 50
(4) verbose, type=bool type=bool default=False
help="Write pin actions"
(5) name, type=string, name attribute
"""
self.name = name
self.pin_one = pin_one
self.pin_two = pin_two
self.freq = freq
self.verbose = verbose
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin_one, GPIO.OUT)
GPIO.setup(self.pin_two, GPIO.OUT)
self.my_pwm = GPIO.PWM(self.pin_two, self.freq)
self.last_pwm = 0
self.my_pwm.start(self.last_pwm)
if self.verbose:
print(" Motor initialized named: {} ".format(self.name))
print(" Direction pin In1 or In3: {}".format(self.pin_one))
print(" PWM speed pin In2 or in4: {}".format(self.pin_two))
print(" Frequency: {} ".format(self.freq))
def forward(self, duty_cycle=50):
""" Move motor forwards passed duty cycle for speed control """
GPIO.output(self.pin_one, True)
if self.verbose:
print("Moving Motor Forward : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def backward(self, duty_cycle=50):
""" Move motor backwards passed duty cycle for speed control"""
GPIO.output(self.pin_one, False)
if self.verbose:
print("Moving Motor Backward : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def stop(self, duty_cycle=0):
""" Stop motor"""
GPIO.output(self.pin_one, False)
self.my_pwm.ChangeDutyCycle(duty_cycle)
if self.verbose:
print("Stoping Motor : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def brake(self, duty_cycle=100):
""" brake motor"""
GPIO.output(self.pin_one, True)
if self.verbose:
print("Braking Motor : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def cleanup(self, clean_up=False):
""" cleanup all GPIO connections used in event of error by lib user"""
if self.verbose:
print("rpi_dc_lib.py : Cleaning up")
GPIO.output(self.pin_one, False)
GPIO.output(self.pin_two, False)
self.my_pwm.ChangeDutyCycle(0)
self.my_pwm.stop()
if clean_up:
GPIO.cleanup()
class TranDc():
""" Class to control DC motor via a transistor """
def __init__(self, pin, freq=50, verbose=False):
""" init method
(1) pin_one, type=int, GPIO pin connected base of transistor
(2) PWM freq in Hz default 50
(3) verbose, type=bool type=bool default=False
help="Write pin actions"
"""
self.pin = pin
self.freq = freq
self.verbose = verbose
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin, GPIO.OUT)
self.motor_pwm = GPIO.PWM(self.pin, self.freq)
self.motor_pwm.start(0)
def dc_motor_run(self, speed=10, step_delay=1):
""" controls speed of motor passed speed and step_delay
speed is PWm duty cycle in percentage, delay is in seconds """
self.motor_pwm.ChangeDutyCycle(speed)
time.sleep(step_delay)
if self.verbose:
print("Speed PWM duty cycle percentage {}".format(speed))
def dc_clean_up(self):
""" docstring """
self.motor_pwm.ChangeDutyCycle(0)
self.motor_pwm.stop()
GPIO.output(self.pin, False)
class TB6612FNGDc():
""" Class to control DC motor via TB6612FNGDC motor controller
6 methods 1. __init__ 2. forward
3.backward 4.stop 5 .brake 6.cleanup 7.standby"""
def __init__(self, pin_one, pin_two,
pwm_pin, freq=50, verbose=False, name="DCMotorX"):
""" init method
(1) pin_one, type=int, GPIO pin connected to AI1 or BI1
(2) Pin two type=int, GPIO pin connected to AI2 or BI2
(3) pwm_pin type=int, GPIO pin connected to PWA or PWB
(4) freq in Hz default 50
(5) verbose, type=bool type=bool default=False
help="Write pin actions"
(6) name, type=string, name attribute
"""
self.name = name
self.pin_one = pin_one
self.pin_two = pin_two
self.pwm_pin = pwm_pin
self.freq = freq
self.verbose = verbose
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin_one, GPIO.OUT)
GPIO.setup(self.pin_two, GPIO.OUT)
GPIO.setup(self.pwm_pin, GPIO.OUT)
self.my_pwm = GPIO.PWM(self.pwm_pin, self.freq)
self.last_pwm = 0
self.my_pwm.start(self.last_pwm)
if self.verbose:
print(" Motor initialized named: {} ".format(self.name))
print(" Pin one AI1 or BI1: {}".format(self.pin_one))
print(" Pin two AI2 or BI2: {}".format(self.pin_two))
print(" Pin pwm PWA or PWB: {}".format(self.pwm_pin))
print(" Frequency: {} ".format(self.freq))
def standby(standby_pin, standby_on=True):
"""Enables/disables the standby mode of TB661FNG controller"""
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if standby_on:
GPIO.setup(standby_pin, GPIO.OUT)
GPIO.output(standby_pin, True)
else:
GPIO.output(standby_pin, False)
def forward(self, duty_cycle=50):
""" Move motor forwards passed duty cycle for speed control """
GPIO.output(self.pin_one, True)
GPIO.output(self.pin_two, False)
if self.verbose:
print("Moving Motor Forward : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def backward(self, duty_cycle=50):
""" Move motor backwards passed duty cycle for speed control"""
GPIO.output(self.pin_one, False)
GPIO.output(self.pin_two, True)
if self.verbose:
print("Moving Motor Backward : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def stop(self, duty_cycle=0):
""" Stop motor"""
GPIO.output(self.pin_one, False)
GPIO.output(self.pin_two, False)
if self.verbose:
print("Stoping Motor : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def brake(self, duty_cycle=100):
""" brake motor"""
GPIO.output(self.pin_one, True)
GPIO.output(self.pin_two, True)
if self.verbose:
print("Braking Motor : Duty Cycle = {}".format(duty_cycle))
if duty_cycle != self.last_pwm:
self.my_pwm.ChangeDutyCycle(duty_cycle)
self.last_pwm = duty_cycle
def cleanup(self, clean_up=False):
""" cleanup all GPIO connections used in event of error by lib user"""
if self.verbose:
print("rpi_dc_lib.py : Cleaning up : {}".format(self.name))
GPIO.output(self.pin_one, False)
GPIO.output(self.pin_two, False)
self.my_pwm.ChangeDutyCycle(0)
self.my_pwm.stop()
if clean_up:
GPIO.cleanup()
def importtest(text):
"""import print test statement"""
pass
# print(text)
# ===================== MAIN ===============================
if __name__ == '__main__':
importtest("main")
else:
importtest("Imported {}".format(__name__))
# ===================== END =============================== | /rpimotorlib-3.2.tar.gz/rpimotorlib-3.2/RpiMotorLib/rpi_dc_lib.py | 0.549882 | 0.206834 | rpi_dc_lib.py | pypi |
import fnmatch
import os
class GitIgnore(object):
"""A class to manage a .gitignore file"""
def __init__(self, path):
"""Constructor
:param str path: The full path to the .gitignore file. If it does not
exist, the file will be created when running :py:meth:GitIgnore.write for
the first time.
"""
self.path = path
# Lines of the .gitignore file, used to check if entries need to be
# added or already exist.
self.__lines = []
if os.path.exists(self.path):
with open(self.path, 'r') as f:
for line in f:
self.__lines.append(self.__ensure_newline(line))
# Set to True if we end up making any modifications, used to
# prevent unnecessary writes.
self.modified = False
def __ensure_newline(self, line):
return line if line.endswith('\n') else '%s\n' % line
def add(self, line):
"""Add a line
:param str line: The line to add to the file. It will not be added if
it already matches an existing line.
"""
if self.match(line):
return
line = self.__ensure_newline(line)
self.__lines.append(line)
self.modified = True
def match(self, line):
"""Check whether the line matches an existing one
This uses fnmatch to match against wildcards.
:param str line: The new line to match against existing ones.
:return: True if the new line matches, False otherwise.
:rtype: bool
"""
line = line.lstrip('/').rstrip('\n')
for entry in self.__lines:
entry = entry.lstrip('/').rstrip('\n')
if fnmatch.fnmatch(line, entry):
return True
return False
def write(self):
"""Write the file to the disk
This will only actually write if necessary, that is if lines have been
added since the last time the file was written.
"""
if self.modified:
with open(self.path, 'w') as f:
for line in self.__lines:
f.write(line)
self.modified = False | /rpkg-1.66-py3-none-any.whl/pyrpkg/gitignore.py | 0.535827 | 0.205874 | gitignore.py | pypi |
"""rpkincant conjure plugins for RPKI ASPA objects."""
from __future__ import annotations
import logging
import typing
from rpkimancer.cli import Args
from rpkimancer.cli.conjure import (ConjurePlugin,
DEFAULT_CA_AS_RESOURCES,
META_AS,
PluginReturn)
if typing.TYPE_CHECKING:
from rpkimancer.cert import CertificateAuthority
log = logging.getLogger(__name__)
META_AFI = "<afi>"
META_PROVIDER_AS = "<asn[:(4|6)]>"
def provider_as(spec: str) -> typing.Tuple[int, typing.Optional[int]]:
"""Argument type checker for `ASID:AFI` pair."""
afi: typing.Optional[int]
try:
as_id, afi = map(int, spec.split(":", 1))
except ValueError:
as_id = int(spec)
afi = None
return as_id, afi
class ConjureAspa(ConjurePlugin):
"""rpkincant conjure plugin for RPKI ASPA Objects."""
def init_parser(self) -> None:
"""Set up command line argument parser."""
self.parser.add_argument("--aspa-customer-as",
type=int,
default=DEFAULT_CA_AS_RESOURCES[0],
metavar=META_AS,
help="ASPA customer AS "
"(default: %(default)s)")
self.parser.add_argument("--aspa-provider-asns",
nargs="+", type=provider_as,
default=[(65001, None), (65002, 4)],
metavar=META_PROVIDER_AS,
help="ASPA provider ASNs "
"(default: %(default)s)")
def run(self,
parsed_args: Args,
ca: CertificateAuthority,
*args: typing.Any,
**kwargs: typing.Any) -> PluginReturn:
"""Run with the given arguments."""
# create ASPA object
from .sigobj import Aspa
log.info("creating ASPA object")
Aspa(issuer=ca,
customer_as=parsed_args.aspa_customer_as,
provider_as_set=parsed_args.aspa_provider_asns)
return None | /rpkimancer-aspa-0.9.2.tar.gz/rpkimancer-aspa-0.9.2/rpkimancer_aspa/conjure.py | 0.788176 | 0.253151 | conjure.py | pypi |
"""RPKI DOA implementation - draft-spaghetti-sidrops-rpki-doa."""
from __future__ import annotations
import logging
from typing import Any, Dict, Iterable, Optional, Tuple
from rpkimancer.asn1 import Content
from rpkimancer.asn1.mod import RpkiDiscardOriginAuthorization_2021
from rpkimancer.resources import (AFI, IPNetwork,
IpResourcesInfo, net_to_bitstring)
from rpkimancer.sigobj.base import EncapsulatedContent, SignedObject
from .communities import BgpCommunity
log = logging.getLogger(__name__)
DoaNetworkInfo = Tuple[IPNetwork, Optional[Tuple[int, int]]]
IPListRangeInfo = Iterable[DoaNetworkInfo]
class IPListRange(Content):
"""ASN.1 IPListRange type."""
content_syntax = RpkiDiscardOriginAuthorization_2021.IPListRange
def __init__(self, ip_addr_blocks: IPListRangeInfo):
"""Initialise IPListRange instance."""
data = list()
for network, len_range in ip_addr_blocks:
item: Dict[str, Any] = {"addressFamily": AFI[network.version],
"addressOrRange": ("addressPrefix",
net_to_bitstring(network))} # noqa: E501
if len_range is not None:
item["prefixLengthRange"] = {"minLength": len_range[0],
"maxLength": len_range[1]}
data.append(item)
super().__init__(data)
class DiscardOriginAuthorizationEContent(EncapsulatedContent):
"""encapContentInfo for RPKI Discard Origin Authorizations."""
content_type = RpkiDiscardOriginAuthorization_2021.id_ct_discardOriginAuthorization # noqa: E501
content_syntax = RpkiDiscardOriginAuthorization_2021.DiscardOriginAuthorization # noqa: E501
file_ext = "doa"
as_resources = None
def __init__(self, *,
version: int = 0,
ip_addr_blocks: IPListRangeInfo,
origin_as: int,
peer_as_set: Optional[Iterable[int]] = None,
communities: Iterable[BgpCommunity]) -> None:
"""Initialise the encapContentInfo."""
ip_addr_blocks_data = IPListRange(ip_addr_blocks).content_data
data: Dict[str, Any] = {"version": version,
"ipAddrBlocks": ip_addr_blocks_data,
"originAsID": origin_as,
"communities": [c.choice_value()
for c in communities]}
if peer_as_set is not None:
data["peerAsIDs"] = list(peer_as_set)
super().__init__(data)
self._ip_resources = [network for network, _ in ip_addr_blocks]
@property
def ip_resources(self) -> Optional[IpResourcesInfo]:
"""Get the IP Address Resources covered by this DOA."""
return self._ip_resources
class DiscardOriginAuthorization(SignedObject, econtent_type=RpkiDiscardOriginAuthorization_2021.ct_discardOriginAuthorization): # noqa: E501
"""CMS ASN.1 ContentInfo for RPKI Discard Origin Authorizations."""
econtent_cls = DiscardOriginAuthorizationEContent | /rpkimancer-doa-0.0.2.tar.gz/rpkimancer-doa-0.0.2/rpkimancer_doa/sigobj.py | 0.874091 | 0.205256 | sigobj.py | pypi |
"""RPKI Signed Checklist implementation - draft-ietf-sidrops-rpki-rsc."""
from __future__ import annotations
import copy
import ipaddress
import json
import logging
import os
import typing
from rpkimancer.algorithms import DIGEST_ALGORITHMS, SHA256
from rpkimancer.asn1 import Interface
from rpkimancer.asn1.mod import RpkiSignedChecklist_2022
from rpkimancer.resources import (AFI, ASIdOrRange, ASIdOrRangeInfo,
AsResourcesInfo, IPAddressRange,
IPNetwork, IPNetworkBits,
IPRange, IPRangeBits, IpResourcesInfo,
bitstring_to_net, net_to_bitstring)
from rpkimancer.sigobj.base import EncapsulatedContentType, SignedObject
from .eecert import UnpublishedEECertificate
log = logging.getLogger(__name__)
ConstrainedAsResourcesInfo = typing.Iterable[ASIdOrRangeInfo]
ConstrainedIPAddressFamilyInfo = typing.Union[IPNetwork, IPRange]
ConstrainedIpResourcesInfo = typing.Iterable[ConstrainedIPAddressFamilyInfo]
class ConstrainedASIdentifiers(Interface):
"""ASN.1 ConstrainedASIdentifiers type."""
content_syntax = RpkiSignedChecklist_2022.ConstrainedASIdentifiers
def __init__(self, as_resources: ConstrainedAsResourcesInfo) -> None:
"""Initialise instance from python data."""
data = {"asnum": [ASIdOrRange(a).content_data for a in as_resources]}
super().__init__(data)
class ConstrainedIPAddrBlocks(Interface):
"""ASN.1 ConstrainedIPAddrBlocks type."""
content_syntax = RpkiSignedChecklist_2022.ConstrainedIPAddrBlocks
def __init__(self, ip_resources: ConstrainedIpResourcesInfo) -> None:
"""Initialise instance from python data."""
log.info(f"preparing data for {self}")
net_data_type = typing.Tuple[str, IPNetworkBits]
entry_type = typing.Tuple[int, net_data_type]
def _net_entry(data: ConstrainedIPAddressFamilyInfo) -> entry_type:
if isinstance(data, (ipaddress.IPv4Network,
ipaddress.IPv6Network)):
return data.version, ("addressPrefix", net_to_bitstring(data))
elif isinstance(data[0], (ipaddress.IPv4Address,
ipaddress.IPv6Address)):
return data[0].version, ("addressRange",
IPAddressRange(data).content_data)
else:
raise ValueError
by_afi = {afi_data: [net_data
for net_version, net_data
in map(_net_entry, ip_resources)
if net_version == afi_version]
for (afi_version, afi_data) in AFI.items()}
data = [{"addressFamily": afi, "addressesOrRanges": entries}
for afi, entries in by_afi.items() if entries]
super().__init__(data)
class SignedChecklistContentType(EncapsulatedContentType):
"""encapContentInfo for RPKI Signed Checklists."""
asn1_definition = RpkiSignedChecklist_2022.ct_rpkiSignedChecklist
file_ext = "sig"
def __init__(self, *,
paths: typing.Iterable[str],
anon_data: typing.Optional[typing.Iterable[bytes]] = None,
version: int = 0,
as_resources: typing.Optional[ConstrainedAsResourcesInfo] = None, # noqa: E501
ip_resources: typing.Optional[ConstrainedIpResourcesInfo] = None, # noqa: E501
digest_algorithm: typing.Tuple[int, ...] = SHA256) -> None:
"""Initialise the encapContentInfo."""
checklist = list()
alg = DIGEST_ALGORITHMS[digest_algorithm]
for path in paths:
with open(path, "rb") as f:
content = f.read()
digest = alg(content).digest()
checklist.append({"fileName": os.path.basename(path),
"hash": digest})
if anon_data is not None:
for item in anon_data:
digest = alg(item).digest()
checklist.append({"hash": digest})
data: typing.Dict[str, typing.Any] = {"version": version,
"digestAlgorithm": {"algorithm": digest_algorithm}, # noqa: E501
"checkList": checklist,
"resources": {}}
if ip_resources is not None:
data["resources"]["ipAddrBlocks"] = ConstrainedIPAddrBlocks(ip_resources).content_data # noqa: E501
if as_resources is not None:
data["resources"]["asID"] = ConstrainedASIdentifiers(as_resources).content_data # noqa: E501
super().__init__(data)
self._as_resources = as_resources
self._ip_resources = ip_resources
@property
def as_resources(self) -> typing.Optional[AsResourcesInfo]:
"""Get the AS Number Resources covered by this Checklist."""
return self._as_resources
@property
def ip_resources(self) -> typing.Optional[IpResourcesInfo]:
"""Get the IP Address Resources covered by this Checklist."""
return self._ip_resources
def to_json(self) -> str:
"""Serialize as JSON."""
data = copy.deepcopy(self.content_data)
afi_bytes_version_map = {v: k for k, v in AFI.items()}
data["digestAlgorithm"]["algorithm"] = ".".join(str(i)
for i in self.content_data["digestAlgorithm"]["algorithm"]) # noqa: E501
for i, entry in enumerate(self.content_data["checkList"]):
data_entry = data["checkList"][i]
data_entry["hash"] = entry["hash"].hex()
for i, asnum in enumerate(self.content_data["resources"]["asID"]["asnum"]): # noqa: E501
data["resources"]["asID"]["asnum"][i] = {asnum[0]: asnum[1]}
for i, addr_block in enumerate(self.content_data["resources"]["ipAddrBlocks"]): # noqa: E501
data_addr_block = data["resources"]["ipAddrBlocks"][i]
version = afi_bytes_version_map[addr_block["addressFamily"]]
data_addr_block["addressFamily"] = f"ipv{version}"
for j, addr in enumerate(addr_block["addressesOrRanges"]):
addr_info: typing.Union[str, typing.Dict[str, str]]
if addr[0] == "addressPrefix":
network = bitstring_to_net(addr[1], version)
addr_info = str(network)
elif addr[0] == "addressRange":
min_addr, max_addr = bitstring_to_addr_range((addr[1]["min"], # noqa: E501
addr[1]["max"]), # noqa: E501
version)
addr_info = {"min": str(min_addr), "max": str(max_addr)}
else:
raise ValueError
data_addr_block["addressesOrRanges"][j] = {addr[0]: addr_info}
log.info(data)
return json.dumps(data, indent=2)
class SignedChecklist(SignedObject[SignedChecklistContentType]):
"""CMS ASN.1 ContentInfo for RPKI Signed Checklists."""
ee_cert_cls = UnpublishedEECertificate
def publish(self, *,
rsc_output_dir: typing.Optional[str] = None,
**kwargs: typing.Any) -> None:
"""Optionally out-of-tree publication."""
if rsc_output_dir is not None:
os.makedirs(rsc_output_dir, exist_ok=True)
with open(os.path.join(rsc_output_dir, self.file_name),
"wb") as f:
f.write(self.to_der())
else:
super().publish(**kwargs)
# TODO: move into rpkimancer.resources
def bitstring_to_addr_range(bits: IPRangeBits, version: int) -> IPRange:
"""Convert a pair of ASN.1 BIT STRING representations to an IPRange."""
len_map = {4: ipaddress.IPV4LENGTH, 6: ipaddress.IPV6LENGTH}
cls_map = {4: ipaddress.IPv4Address, 6: ipaddress.IPv6Address}
addr_len = len_map[version]
addr_cls = cls_map[version]
(low_bits, low_len), (high_bits, high_len) = bits
low = addr_cls(low_bits << addr_len - low_len)
high = addr_cls((high_bits + 1 << addr_len - high_len) - 1)
return typing.cast(IPRange, (low, high)) | /rpkimancer_sig-0.11.0-py3-none-any.whl/rpkimancer_sig/sigobj.py | 0.804021 | 0.161949 | sigobj.py | pypi |
import time
import json
from typing import Any, Callable
import numpy as np
class NumpyEncoder(json.JSONEncoder):
"""Subclass of json.JSONEncoder used for encoding numpy arrays as json
objects.
"""
def default(self, obj: Any) -> Any:
"""Encode with numpy.ndarray.tolist() if numpy array, otherwise use
json.JSONEncoder.default() encoding.
"""
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def make_array(x: Any, dtype=None) -> np.ndarray:
"""Create numpy array from any native data type and/or data structure.
Used to homogenize inputs to functions acting internally in RPL_WEB_PACK
api.
"""
return np.asarray(x, dtype) if isinstance(x, (list, tuple, set, np.ndarray)) \
else np.array([x])
def encode_arrays(data_keys: list, data: list, dtype=None) -> str:
"""Encode numpy arrays as json objets using NumpyEncoder."""
data_dict = {}
for key, val in zip(data_keys, data):
val_arr = make_array(val, dtype)
data_dict[key] = val_arr
json_dumps = json.dumps(data_dict, cls=NumpyEncoder)
return json_dumps
def decode_arrays(data: Any, dtype=None) -> dict:
"""Decode json objects into numpy arrays."""
if isinstance(data, (str, bytes, bytearray)):
data = json.loads(data)
for key, val in data.items():
data[key] = make_array(val)
return data
def timer(func: Callable) -> Callable:
"""Decorator to time a function."""
def wrapped_func(*args: Any, **kwargs: Any) -> Any:
t0 = time.time()
res = func(*args, **kwargs)
print(f'Time to evaluate {func.__name__}(): {time.time() - t0} s')
return res
return wrapped_func
def scfstb_m3m3(value: float) -> float:
"""Convert from standard cubic feet (scf)/stock tank barrel (stb) to
cubic meter (m3)/cubic meter (m3).
"""
return 0.17811*value
def m3m3_scfstb(value: float) -> float:
"""Convert from from cubic meter (m3)/cubic meter (m3)
standard cubic feet (scf)/stock tank barrel (stb).
"""
return value/0.17811 | /rpl_pack-0.1.1-py3-none-any.whl/rpl_pack/utils.py | 0.780244 | 0.407216 | utils.py | pypi |
import logging
import sys
import time
import codecs
import serial
import struct
from collections import namedtuple
SYNC_BYTE = b'\xA5'
SYNC_BYTE2 = b'\x5A'
GET_INFO_BYTE = b'\x50'
GET_HEALTH_BYTE = b'\x52'
STOP_BYTE = b'\x25'
RESET_BYTE = b'\x40'
_SCAN_TYPE = {
'normal': {'byte': b'\x20', 'response': 129, 'size': 5},
'force': {'byte': b'\x21', 'response': 129, 'size': 5},
'express': {'byte': b'\x82', 'response': 130, 'size': 84},
}
DESCRIPTOR_LEN = 7
INFO_LEN = 20
HEALTH_LEN = 3
INFO_TYPE = 4
HEALTH_TYPE = 6
# Constants & Command to start A2 motor
MAX_MOTOR_PWM = 1023
DEFAULT_MOTOR_PWM = 660
SET_PWM_BYTE = b'\xF0'
_HEALTH_STATUSES = {
0: 'Good',
1: 'Warning',
2: 'Error',
}
class RPLidarException(Exception):
'''Basic exception class for RPLidar'''
def _b2i(byte):
'''Converts byte to integer (for Python 2 compatability)'''
return byte if int(sys.version[0]) == 3 else ord(byte)
def _showhex(signal):
'''Converts string bytes to hex representation (useful for debugging)'''
return [format(_b2i(b), '#02x') for b in signal]
def _process_scan(raw):
'''Processes input raw data and returns measurement data'''
new_scan = bool(_b2i(raw[0]) & 0b1)
inversed_new_scan = bool((_b2i(raw[0]) >> 1) & 0b1)
quality = _b2i(raw[0]) >> 2
if new_scan == inversed_new_scan:
raise RPLidarException('New scan flags mismatch')
check_bit = _b2i(raw[1]) & 0b1
if check_bit != 1:
raise RPLidarException('Check bit not equal to 1')
angle = ((_b2i(raw[1]) >> 1) + (_b2i(raw[2]) << 7)) / 64.
distance = (_b2i(raw[3]) + (_b2i(raw[4]) << 8)) / 4.
return new_scan, quality, angle, distance
def _process_express_scan(data, new_angle, trame):
new_scan = (new_angle < data.start_angle) & (trame == 1)
angle = (data.start_angle + (
(new_angle - data.start_angle) % 360
)/32*trame - data.angle[trame-1]) % 360
distance = data.distance[trame-1]
return new_scan, None, angle, distance
class RPLidar(object):
'''Class for communicating with RPLidar rangefinder scanners'''
def __init__(self, port, baudrate=115200, timeout=1, logger=None):
'''Initilize RPLidar object for communicating with the sensor.
Parameters
----------
port : str
Serial port name to which sensor is connected
baudrate : int, optional
Baudrate for serial connection (the default is 115200)
timeout : float, optional
Serial port connection timeout in seconds (the default is 1)
logger : logging.Logger instance, optional
Logger instance, if none is provided new instance is created
'''
self._serial = None
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self._motor_speed = DEFAULT_MOTOR_PWM
self.scanning = [False, 0, 'normal']
self.express_trame = 32
self.express_data = False
self.motor_running = None
if logger is None:
logger = logging.getLogger('rplidar')
self.logger = logger
self.connect()
def connect(self):
'''Connects to the serial port with the name `self.port`. If it was
connected to another serial port disconnects from it first.'''
if self._serial is not None:
self.disconnect()
try:
self._serial = serial.Serial(
self.port, self.baudrate,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,
timeout=self.timeout)
except serial.SerialException as err:
raise RPLidarException('Failed to connect to the sensor '
'due to: %s' % err)
def disconnect(self):
'''Disconnects from the serial port'''
if self._serial is None:
return
self._serial.close()
def _set_pwm(self, pwm):
payload = struct.pack("<H", pwm)
self._send_payload_cmd(SET_PWM_BYTE, payload)
@property
def motor_speed(self):
return self._motor_speed
@motor_speed.setter
def motor_speed(self, pwm):
assert(0 <= pwm <= MAX_MOTOR_PWM)
self._motor_speed = pwm
if self.motor_running:
self._set_pwm(self._motor_speed)
def start_motor(self):
'''Starts sensor motor'''
self.logger.info('Starting motor')
# For A1
self._serial.setDTR(False)
# For A2
self._set_pwm(self._motor_speed)
self.motor_running = True
def stop_motor(self):
'''Stops sensor motor'''
self.logger.info('Stoping motor')
# For A2
self._set_pwm(0)
time.sleep(.001)
# For A1
self._serial.setDTR(True)
self.motor_running = False
def _send_payload_cmd(self, cmd, payload):
'''Sends `cmd` command with `payload` to the sensor'''
try:
size = struct.pack('B', len(payload))
req = SYNC_BYTE + cmd + size + payload
checksum = 0
for v in struct.unpack('B'*len(req), req):
checksum ^= v
req += struct.pack('B', checksum)
self._serial.write(req)
self.logger.debug('Command sent: %s' % _showhex(req))
except Exception as e:
raise RPLidarException('Did device disconnect?' 'Due to %s' % e)
def _send_cmd(self, cmd):
'''Sends `cmd` command to the sensor'''
req = SYNC_BYTE + cmd
self._serial.write(req)
self.logger.debug('Command sent: %s' % _showhex(req))
def _read_descriptor(self):
'''Reads descriptor packet'''
descriptor = self._serial.read(DESCRIPTOR_LEN)
self.logger.debug('Received descriptor: %s', _showhex(descriptor))
if len(descriptor) != DESCRIPTOR_LEN:
raise RPLidarException('Descriptor length mismatch')
elif not descriptor.startswith(SYNC_BYTE + SYNC_BYTE2):
raise RPLidarException('Incorrect descriptor starting bytes')
is_single = _b2i(descriptor[-2]) == 0
return _b2i(descriptor[2]), is_single, _b2i(descriptor[-1])
def _read_response(self, dsize):
'''Reads response packet with length of `dsize` bytes'''
self.logger.debug('Trying to read response: %d bytes', dsize)
while self._serial.inWaiting() < dsize:
time.sleep(0.001)
data = self._serial.read(dsize)
self.logger.debug('Received data: %s', _showhex(data))
return data
def get_info(self):
'''Get device information
Returns
-------
dict
Dictionary with the sensor information
'''
if self._serial.inWaiting() > 0:
return ('Data in buffer, you can\'t have info ! '
'Run clean_input() to emptied the buffer.')
self._send_cmd(GET_INFO_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != INFO_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != INFO_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
serialnumber = codecs.encode(raw[4:], 'hex').upper()
serialnumber = codecs.decode(serialnumber, 'ascii')
data = {
'model': _b2i(raw[0]),
'firmware': (_b2i(raw[2]), _b2i(raw[1])),
'hardware': _b2i(raw[3]),
'serialnumber': serialnumber,
}
return data
def get_health(self):
'''Get device health state. When the core system detects some
potential risk that may cause hardware failure in the future,
the returned status value will be 'Warning'. But sensor can still work
as normal. When sensor is in the Protection Stop state, the returned
status value will be 'Error'. In case of warning or error statuses
non-zero error code will be returned.
Returns
-------
status : str
'Good', 'Warning' or 'Error' statuses
error_code : int
The related error code that caused a warning/error.
'''
if self._serial.inWaiting() > 0:
self.clean_input()
self.logger.info('Asking for health')
self._send_cmd(GET_HEALTH_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != HEALTH_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != HEALTH_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
status = _HEALTH_STATUSES[_b2i(raw[0])]
error_code = (_b2i(raw[1]) << 8) + _b2i(raw[2])
return status, error_code
def clean_input(self):
'''Clean input buffer by reading all available data'''
if self.scanning[0]:
return 'Cleanning not allowed during scanning process active !'
self._serial.flushInput()
self.express_trame = 32
self.express_data = False
def stop(self):
'''Stops scanning process, disables laser diode and the measurement
system, moves sensor to the idle state.'''
self.logger.info('Stopping scanning')
self._send_cmd(STOP_BYTE)
time.sleep(.1)
self.scanning[0] = False
self.clean_input()
def start(self, scan_type='normal'):
'''Start the scanning process
Parameters
----------
scan : normal, force or express.
'''
if self.scanning[0]:
return 'Scanning already running !'
'''Start the scanning process, enable laser diode and the
measurement system'''
status, error_code = self.get_health()
self.logger.debug('Health status: %s [%d]', status, error_code)
if status == _HEALTH_STATUSES[2]:
self.logger.warning('Trying to reset sensor due to the error. '
'Error code: %d', error_code)
self.reset()
status, error_code = self.get_health()
if status == _HEALTH_STATUSES[2]:
raise RPLidarException('RPLidar hardware failure. '
'Error code: %d' % error_code)
elif status == _HEALTH_STATUSES[1]:
self.logger.warning('Warning sensor status detected! '
'Error code: %d', error_code)
cmd = _SCAN_TYPE[scan_type]['byte']
self.logger.info('starting scan process in %s mode' % scan_type)
if scan_type == 'express':
self._send_payload_cmd(cmd, b'\x00\x00\x00\x00\x00')
else:
self._send_cmd(cmd)
dsize, is_single, dtype = self._read_descriptor()
if dsize != _SCAN_TYPE[scan_type]['size']:
raise RPLidarException('Wrong get_info reply length')
if is_single:
raise RPLidarException('Not a multiple response mode')
if dtype != _SCAN_TYPE[scan_type]['response']:
raise RPLidarException('Wrong response data type')
self.scanning = [True, dsize, scan_type]
def reset(self):
'''Resets sensor core, reverting it to a similar state as it has
just been powered up.'''
self.logger.info('Reseting the sensor')
self._send_cmd(RESET_BYTE)
time.sleep(2)
self.clean_input()
def iter_measures(self, scan_type='normal', max_buf_meas=3000):
'''Iterate over measures. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increasing lag.
Parameters
----------
max_buf_meas : int or False if you want unlimited buffer
Maximum number of bytes to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
Yields
------
new_scan : bool
True if measures belongs to a new scan
quality : int
Reflected laser pulse strength
angle : float
The measure heading angle in degree unit [0, 360)
distance : float
Measured object distance related to the sensor's rotation center.
In millimeter unit. Set to 0 when measure is invalid.
'''
self.start_motor()
if not self.scanning[0]:
self.start(scan_type)
while True:
dsize = self.scanning[1]
if max_buf_meas:
data_in_buf = self._serial.inWaiting()
if data_in_buf > max_buf_meas:
self.logger.warning(
'Too many bytes in the input buffer: %d/%d. '
'Cleaning buffer...',
data_in_buf, max_buf_meas)
self.stop()
self.start(self.scanning[2])
if self.scanning[2] == 'normal':
raw = self._read_response(dsize)
yield _process_scan(raw)
if self.scanning[2] == 'express':
if self.express_trame == 32:
self.express_trame = 0
if not self.express_data:
self.logger.debug('reading first time bytes')
self.express_data = ExpressPacket.from_string(
self._read_response(dsize))
self.express_old_data = self.express_data
self.logger.debug('set old_data with start_angle %f',
self.express_old_data.start_angle)
self.express_data = ExpressPacket.from_string(
self._read_response(dsize))
self.logger.debug('set new_data with start_angle %f',
self.express_data.start_angle)
self.express_trame += 1
self.logger.debug('process scan of frame %d with angle : '
'%f and angle new : %f', self.express_trame,
self.express_old_data.start_angle,
self.express_data.start_angle)
yield _process_express_scan(self.express_old_data,
self.express_data.start_angle,
self.express_trame)
def iter_scans(self, scan_type='normal', max_buf_meas=3000, min_len=5):
'''Iterate over scans. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increasing lag.
Parameters
----------
max_buf_meas : int
Maximum number of measures to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
min_len : int
Minimum number of measures in the scan for it to be yelded.
Yields
------
scan : list
List of the measures. Each measurment is tuple with following
format: (quality, angle, distance). For values description please
refer to `iter_measures` method's documentation.
'''
scan_list = []
iterator = self.iter_measures(scan_type, max_buf_meas)
for new_scan, quality, angle, distance in iterator:
if new_scan:
if len(scan_list) > min_len:
yield scan_list
scan_list = []
if distance > 0:
scan_list.append((quality, angle, distance))
class ExpressPacket(namedtuple('express_packet',
'distance angle new_scan start_angle')):
sync1 = 0xa
sync2 = 0x5
sign = {0: 1, 1: -1}
@classmethod
def from_string(cls, data):
packet = bytearray(data)
if (packet[0] >> 4) != cls.sync1 or (packet[1] >> 4) != cls.sync2:
raise ValueError('try to parse corrupted data ({})'.format(packet))
checksum = 0
for b in packet[2:]:
checksum ^= b
if checksum != (packet[0] & 0b00001111) + ((
packet[1] & 0b00001111) << 4):
raise ValueError('Invalid checksum ({})'.format(packet))
new_scan = packet[3] >> 7
start_angle = (packet[2] + ((packet[3] & 0b01111111) << 8)) / 64
d = a = ()
for i in range(0,80,5):
d += ((packet[i+4] >> 2) + (packet[i+5] << 6),)
a += (((packet[i+8] & 0b00001111) + ((
packet[i+4] & 0b00000001) << 4))/8*cls.sign[(
packet[i+4] & 0b00000010) >> 1],)
d += ((packet[i+6] >> 2) + (packet[i+7] << 6),)
a += (((packet[i+8] >> 4) + (
(packet[i+6] & 0b00000001) << 4))/8*cls.sign[(
packet[i+6] & 0b00000010) >> 1],)
return cls(d, a, new_scan, start_angle) | /rplidar_mrumel-1.1.2-py3-none-any.whl/rplidar.py | 0.499023 | 0.200049 | rplidar.py | pypi |
import logging
import sys
import time
import codecs
import serial
import struct
from collections import namedtuple
SYNC_BYTE = b'\xA5'
SYNC_BYTE2 = b'\x5A'
GET_INFO_BYTE = b'\x50'
GET_HEALTH_BYTE = b'\x52'
STOP_BYTE = b'\x25'
RESET_BYTE = b'\x40'
_SCAN_TYPE = {
'normal': {'byte': b'\x20', 'response': 129, 'size': 5},
'force': {'byte': b'\x21', 'response': 129, 'size': 5},
'express': {'byte': b'\x82', 'response': 130, 'size': 84},
}
DESCRIPTOR_LEN = 7
INFO_LEN = 20
HEALTH_LEN = 3
INFO_TYPE = 4
HEALTH_TYPE = 6
# Constants & Command to start A2 motor
MAX_MOTOR_PWM = 1023
DEFAULT_MOTOR_PWM = 660
SET_PWM_BYTE = b'\xF0'
_HEALTH_STATUSES = {
0: 'Good',
1: 'Warning',
2: 'Error',
}
class RPLidarException(Exception):
'''Basic exception class for RPLidar'''
def _b2i(byte):
'''Converts byte to integer (for Python 2 compatability)'''
return byte if int(sys.version[0]) == 3 else ord(byte)
def _showhex(signal):
'''Converts string bytes to hex representation (useful for debugging)'''
return [format(_b2i(b), '#02x') for b in signal]
def _process_scan(raw):
'''Processes input raw data and returns measurement data'''
new_scan = bool(_b2i(raw[0]) & 0b1)
inversed_new_scan = bool((_b2i(raw[0]) >> 1) & 0b1)
quality = _b2i(raw[0]) >> 2
if new_scan == inversed_new_scan:
raise RPLidarException('New scan flags mismatch')
check_bit = _b2i(raw[1]) & 0b1
if check_bit != 1:
raise RPLidarException('Check bit not equal to 1')
angle = ((_b2i(raw[1]) >> 1) + (_b2i(raw[2]) << 7)) / 64.
distance = (_b2i(raw[3]) + (_b2i(raw[4]) << 8)) / 4.
return new_scan, quality, angle, distance
def _process_express_scan(data, new_angle, trame):
new_scan = (new_angle < data.start_angle) & (trame == 1)
angle = (data.start_angle + (
(new_angle - data.start_angle) % 360
)/32*trame - data.angle[trame-1]) % 360
distance = data.distance[trame-1]
return new_scan, None, angle, distance
class RPLidar(object):
'''Class for communicating with RPLidar rangefinder scanners'''
def __init__(self, port, baudrate=115200, timeout=1, logger=None):
'''Initilize RPLidar object for communicating with the sensor.
Parameters
----------
port : str
Serial port name to which sensor is connected
baudrate : int, optional
Baudrate for serial connection (the default is 115200)
timeout : float, optional
Serial port connection timeout in seconds (the default is 1)
logger : logging.Logger instance, optional
Logger instance, if none is provided new instance is created
'''
self._serial = None
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self._motor_speed = DEFAULT_MOTOR_PWM
self.scanning = [False, 0, 'normal']
self.express_trame = 32
self.express_data = False
self.motor_running = None
if logger is None:
logger = logging.getLogger('rplidar')
self.logger = logger
self.connect()
def connect(self):
'''Connects to the serial port with the name `self.port`. If it was
connected to another serial port disconnects from it first.'''
if self._serial is not None:
self.disconnect()
try:
self._serial = serial.Serial(
self.port, self.baudrate,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,
timeout=self.timeout)
except serial.SerialException as err:
raise RPLidarException('Failed to connect to the sensor '
'due to: %s' % err)
def disconnect(self):
'''Disconnects from the serial port'''
if self._serial is None:
return
self._serial.close()
def _set_pwm(self, pwm):
payload = struct.pack("<H", pwm)
self._send_payload_cmd(SET_PWM_BYTE, payload)
@property
def motor_speed(self):
return self._motor_speed
@motor_speed.setter
def motor_speed(self, pwm):
assert(0 <= pwm <= MAX_MOTOR_PWM)
self._motor_speed = pwm
if self.motor_running:
self._set_pwm(self._motor_speed)
def start_motor(self):
'''Starts sensor motor'''
self.logger.info('Starting motor')
# For A1
self._serial.setDTR(False)
# For A2
self._set_pwm(self._motor_speed)
self.motor_running = True
def stop_motor(self):
'''Stops sensor motor'''
self.logger.info('Stoping motor')
# For A2
self._set_pwm(0)
time.sleep(.001)
# For A1
self._serial.setDTR(True)
self.motor_running = False
def _send_payload_cmd(self, cmd, payload):
'''Sends `cmd` command with `payload` to the sensor'''
size = struct.pack('B', len(payload))
req = SYNC_BYTE + cmd + size + payload
checksum = 0
for v in struct.unpack('B'*len(req), req):
checksum ^= v
req += struct.pack('B', checksum)
self._serial.write(req)
self.logger.debug('Command sent: %s' % _showhex(req))
def _send_cmd(self, cmd):
'''Sends `cmd` command to the sensor'''
req = SYNC_BYTE + cmd
self._serial.write(req)
self.logger.debug('Command sent: %s' % _showhex(req))
def _read_descriptor(self):
'''Reads descriptor packet'''
descriptor = self._serial.read(DESCRIPTOR_LEN)
self.logger.debug('Received descriptor: %s', _showhex(descriptor))
if len(descriptor) != DESCRIPTOR_LEN:
raise RPLidarException('Descriptor length mismatch')
elif not descriptor.startswith(SYNC_BYTE + SYNC_BYTE2):
raise RPLidarException('Incorrect descriptor starting bytes')
is_single = _b2i(descriptor[-2]) == 0
return _b2i(descriptor[2]), is_single, _b2i(descriptor[-1])
def _read_response(self, dsize):
'''Reads response packet with length of `dsize` bytes'''
self.logger.debug('Trying to read response: %d bytes', dsize)
while self._serial.inWaiting() < dsize:
time.sleep(0.001)
data = self._serial.read(dsize)
self.logger.debug('Received data: %s', _showhex(data))
return data
def get_info(self):
'''Get device information
Returns
-------
dict
Dictionary with the sensor information
'''
if self._serial.inWaiting() > 0:
return ('Data in buffer, you can\'t have info ! '
'Run clean_input() to emptied the buffer.')
self._send_cmd(GET_INFO_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != INFO_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != INFO_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
serialnumber = codecs.encode(raw[4:], 'hex').upper()
serialnumber = codecs.decode(serialnumber, 'ascii')
data = {
'model': _b2i(raw[0]),
'firmware': (_b2i(raw[2]), _b2i(raw[1])),
'hardware': _b2i(raw[3]),
'serialnumber': serialnumber,
}
return data
def get_health(self):
'''Get device health state. When the core system detects some
potential risk that may cause hardware failure in the future,
the returned status value will be 'Warning'. But sensor can still work
as normal. When sensor is in the Protection Stop state, the returned
status value will be 'Error'. In case of warning or error statuses
non-zero error code will be returned.
Returns
-------
status : str
'Good', 'Warning' or 'Error' statuses
error_code : int
The related error code that caused a warning/error.
'''
if self._serial.inWaiting() > 0:
return ('Data in buffer, you can\'t have info ! '
'Run clean_input() to emptied the buffer.')
self.logger.info('Asking for health')
self._send_cmd(GET_HEALTH_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != HEALTH_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != HEALTH_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
status = _HEALTH_STATUSES[_b2i(raw[0])]
error_code = (_b2i(raw[1]) << 8) + _b2i(raw[2])
return status, error_code
def clean_input(self):
'''Clean input buffer by reading all available data'''
if self.scanning[0]:
return 'Cleanning not allowed during scanning process active !'
self._serial.flushInput()
self.express_trame = 32
self.express_data = False
def stop(self):
'''Stops scanning process, disables laser diode and the measurement
system, moves sensor to the idle state.'''
self.logger.info('Stopping scanning')
self._send_cmd(STOP_BYTE)
time.sleep(.1)
self.scanning[0] = False
self.clean_input()
def start(self, scan_type='normal'):
'''Start the scanning process
Parameters
----------
scan : normal, force or express.
'''
if self.scanning[0]:
return 'Scanning already running !'
'''Start the scanning process, enable laser diode and the
measurement system'''
status, error_code = self.get_health()
self.logger.debug('Health status: %s [%d]', status, error_code)
if status == _HEALTH_STATUSES[2]:
self.logger.warning('Trying to reset sensor due to the error. '
'Error code: %d', error_code)
self.reset()
status, error_code = self.get_health()
if status == _HEALTH_STATUSES[2]:
raise RPLidarException('RPLidar hardware failure. '
'Error code: %d' % error_code)
elif status == _HEALTH_STATUSES[1]:
self.logger.warning('Warning sensor status detected! '
'Error code: %d', error_code)
cmd = _SCAN_TYPE[scan_type]['byte']
self.logger.info('starting scan process in %s mode' % scan_type)
if scan_type == 'express':
self._send_payload_cmd(cmd, b'\x00\x00\x00\x00\x00')
else:
self._send_cmd(cmd)
dsize, is_single, dtype = self._read_descriptor()
if dsize != _SCAN_TYPE[scan_type]['size']:
raise RPLidarException('Wrong get_info reply length')
if is_single:
raise RPLidarException('Not a multiple response mode')
if dtype != _SCAN_TYPE[scan_type]['response']:
raise RPLidarException('Wrong response data type')
self.scanning = [True, dsize, scan_type]
def reset(self):
'''Resets sensor core, reverting it to a similar state as it has
just been powered up.'''
self.logger.info('Reseting the sensor')
self._send_cmd(RESET_BYTE)
time.sleep(2)
self.clean_input()
def iter_measures(self, scan_type='normal', max_buf_meas=3000):
'''Iterate over measures. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increasing lag.
Parameters
----------
max_buf_meas : int or False if you want unlimited buffer
Maximum number of bytes to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
Yields
------
new_scan : bool
True if measures belongs to a new scan
quality : int
Reflected laser pulse strength
angle : float
The measure heading angle in degree unit [0, 360)
distance : float
Measured object distance related to the sensor's rotation center.
In millimeter unit. Set to 0 when measure is invalid.
'''
self.start_motor()
if not self.scanning[0]:
self.start(scan_type)
while True:
dsize = self.scanning[1]
if max_buf_meas:
data_in_buf = self._serial.inWaiting()
if data_in_buf > max_buf_meas:
self.logger.warning(
'Too many bytes in the input buffer: %d/%d. '
'Cleaning buffer...',
data_in_buf, max_buf_meas)
self.stop()
self.start(self.scanning[2])
if self.scanning[2] == 'normal':
raw = self._read_response(dsize)
yield _process_scan(raw)
if self.scanning[2] == 'express':
if self.express_trame == 32:
self.express_trame = 0
if not self.express_data:
self.logger.debug('reading first time bytes')
self.express_data = ExpressPacket.from_string(
self._read_response(dsize))
self.express_old_data = self.express_data
self.logger.debug('set old_data with start_angle %f',
self.express_old_data.start_angle)
self.express_data = ExpressPacket.from_string(
self._read_response(dsize))
self.logger.debug('set new_data with start_angle %f',
self.express_data.start_angle)
self.express_trame += 1
self.logger.debug('process scan of frame %d with angle : '
'%f and angle new : %f', self.express_trame,
self.express_old_data.start_angle,
self.express_data.start_angle)
yield _process_express_scan(self.express_old_data,
self.express_data.start_angle,
self.express_trame)
def iter_scans(self, scan_type='normal', max_buf_meas=3000, min_len=5):
'''Iterate over scans. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increasing lag.
Parameters
----------
max_buf_meas : int
Maximum number of measures to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
min_len : int
Minimum number of measures in the scan for it to be yelded.
Yields
------
scan : list
List of the measures. Each measurment is tuple with following
format: (quality, angle, distance). For values description please
refer to `iter_measures` method's documentation.
'''
scan_list = []
iterator = self.iter_measures(scan_type, max_buf_meas)
for new_scan, quality, angle, distance in iterator:
if new_scan:
if len(scan_list) > min_len:
yield scan_list
scan_list = []
if distance > 0:
scan_list.append((quality, angle, distance))
class ExpressPacket(namedtuple('express_packet',
'distance angle new_scan start_angle')):
sync1 = 0xa
sync2 = 0x5
sign = {0: 1, 1: -1}
@classmethod
def from_string(cls, data):
packet = bytearray(data)
if (packet[0] >> 4) != cls.sync1 or (packet[1] >> 4) != cls.sync2:
raise ValueError('try to parse corrupted data ({})'.format(packet))
checksum = 0
for b in packet[2:]:
checksum ^= b
if checksum != (packet[0] & 0b00001111) + ((
packet[1] & 0b00001111) << 4):
raise ValueError('Invalid checksum ({})'.format(packet))
new_scan = packet[3] >> 7
start_angle = (packet[2] + ((packet[3] & 0b01111111) << 8)) / 64
d = a = ()
for i in range(0,80,5):
d += ((packet[i+4] >> 2) + (packet[i+5] << 6),)
a += (((packet[i+8] & 0b00001111) + ((
packet[i+4] & 0b00000001) << 4))/8*cls.sign[(
packet[i+4] & 0b00000010) >> 1],)
d += ((packet[i+6] >> 2) + (packet[i+7] << 6),)
a += (((packet[i+8] >> 4) + (
(packet[i+6] & 0b00000001) << 4))/8*cls.sign[(
packet[i+6] & 0b00000010) >> 1],)
return cls(d, a, new_scan, start_angle) | /rplidar-roboticia-0.9.5.tar.gz/rplidar-roboticia-0.9.5/rplidar.py | 0.514156 | 0.190856 | rplidar.py | pypi |
import logging
import sys
import time
import codecs
from typing import Tuple, List
import serial
import struct
from collections import namedtuple
SYNC_BYTE = b"\xA5"
SYNC_BYTE2 = b"\x5A"
GET_INFO_BYTE = b"\x50"
GET_HEALTH_BYTE = b"\x52"
STOP_BYTE = b"\x25"
RESET_BYTE = b"\x40"
_SCAN_TYPE = {
"normal": {"byte": b"\x20", "response": 129, "size": 5},
"force": {"byte": b"\x21", "response": 129, "size": 5},
"express": {"byte": b"\x82", "response": 130, "size": 84},
}
DESCRIPTOR_LEN = 7
INFO_LEN = 20
HEALTH_LEN = 3
INFO_TYPE = 4
HEALTH_TYPE = 6
# Constants & Command to start A2 motor
MAX_MOTOR_PWM = 1023
DEFAULT_MOTOR_PWM = 660
SET_PWM_BYTE = b"\xF0"
_HEALTH_STATUSES = {
0: "Good",
1: "Warning",
2: "Error",
}
class RPLidarException(Exception):
"""Basic exception class for RPLidar"""
def _b2i(byte):
"""Converts byte to integer (for Python 2 compatability)"""
return byte if int(sys.version[0]) == 3 else ord(byte)
def _showhex(signal):
"""Converts string bytes to hex representation (useful for debugging)"""
return [format(_b2i(b), "#02x") for b in signal]
def _process_scan(raw):
"""Processes input raw data and returns measurement data"""
new_scan = bool(_b2i(raw[0]) & 0b1)
inversed_new_scan = bool((_b2i(raw[0]) >> 1) & 0b1)
quality = _b2i(raw[0]) >> 2
if new_scan == inversed_new_scan:
raise RPLidarException("New scan flags mismatch")
check_bit = _b2i(raw[1]) & 0b1
if check_bit != 1:
raise RPLidarException("Check bit not equal to 1")
angle = ((_b2i(raw[1]) >> 1) + (_b2i(raw[2]) << 7)) / 64.0
distance = (_b2i(raw[3]) + (_b2i(raw[4]) << 8)) / 4.0
return new_scan, quality, angle, distance
def _process_express_scan(
data: "ExpressPacket", w_i1: float
) -> List[Tuple[bool, None, float, float]]:
"""See page 31 of https://bucket-download.slamtec.com/6494fd238cf5e0d881f56d914c6d1f355c0f582a/LR001_SLAMTEC_rplidar_protocol_v2.4_en.pdf"""
w_i = data.start_angle
angle_diff = w_i1 - w_i if w_i <= w_i1 else 360 + w_i1 - w_i
angles = []
for k in range(1, len(data.angle) + 1):
dtheta_k = data.angle[k - 1]
angle = w_i + (angle_diff / 32.0) * k - dtheta_k
angles.append(angle)
points = []
found_new_scan = False
for angle, distance in zip(angles, data.distance):
points.append((angle > 360 and not found_new_scan, None, angle, distance))
if angle > 360:
found_new_scan = True
return points
class RPLidar(object):
"""Class for communicating with RPLidar rangefinder scanners"""
def __init__(self, port, baudrate=115200, timeout=1, logger=None):
"""Initilize RPLidar object for communicating with the sensor.
Parameters
----------
port : str
Serial port name to which sensor is connected
baudrate : int, optional
Baudrate for serial connection (the default is 115200)
timeout : float, optional
Serial port connection timeout in seconds (the default is 1)
logger : logging.Logger instance, optional
Logger instance, if none is provided new instance is created
"""
self._serial = None
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self._motor_speed = DEFAULT_MOTOR_PWM
self.scanning = [False, 0, "normal"]
self.express_trame = 32
self.express_data = False
self.motor_running = None
if logger is None:
logger = logging.getLogger("rplidar")
self.logger = logger
self.connect()
def connect(self):
"""Connects to the serial port with the name `self.port`. If it was
connected to another serial port disconnects from it first."""
if self._serial is not None:
self.disconnect()
try:
self._serial = serial.Serial(
self.port,
self.baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=self.timeout,
)
except serial.SerialException as err:
raise RPLidarException(
"Failed to connect to the sensor " "due to: %s" % err
)
def disconnect(self):
"""Disconnects from the serial port"""
if self._serial is None:
return
self._serial.close()
def _set_pwm(self, pwm):
payload = struct.pack("<H", pwm)
self._send_payload_cmd(SET_PWM_BYTE, payload)
@property
def motor_speed(self):
return self._motor_speed
@motor_speed.setter
def motor_speed(self, pwm):
assert 0 <= pwm <= MAX_MOTOR_PWM
self._motor_speed = pwm
if self.motor_running:
self._set_pwm(self._motor_speed)
def start_motor(self):
"""Starts sensor motor"""
self.logger.info("Starting motor")
# For A1
self._serial.setDTR(False)
# For A2
self._set_pwm(self._motor_speed)
self.motor_running = True
def stop_motor(self):
"""Stops sensor motor"""
self.logger.info("Stoping motor")
# For A2
self._set_pwm(0)
time.sleep(0.001)
# For A1
self._serial.setDTR(True)
self.motor_running = False
def _send_payload_cmd(self, cmd, payload):
"""Sends `cmd` command with `payload` to the sensor"""
size = struct.pack("B", len(payload))
req = SYNC_BYTE + cmd + size + payload
checksum = 0
for v in struct.unpack("B" * len(req), req):
checksum ^= v
req += struct.pack("B", checksum)
self._serial.write(req)
self.logger.debug("Command sent: %s" % _showhex(req))
def _send_cmd(self, cmd):
"""Sends `cmd` command to the sensor"""
req = SYNC_BYTE + cmd
self._serial.write(req)
self.logger.debug("Command sent: %s" % _showhex(req))
def _read_descriptor(self):
"""Reads descriptor packet"""
descriptor = self._serial.read(DESCRIPTOR_LEN)
self.logger.debug("Received descriptor: %s", _showhex(descriptor))
if len(descriptor) != DESCRIPTOR_LEN:
raise RPLidarException("Descriptor length mismatch")
elif not descriptor.startswith(SYNC_BYTE + SYNC_BYTE2):
raise RPLidarException("Incorrect descriptor starting bytes")
is_single = _b2i(descriptor[-2]) == 0
return _b2i(descriptor[2]), is_single, _b2i(descriptor[-1])
def _read_response(self, dsize):
"""Reads response packet with length of `dsize` bytes"""
self.logger.debug("Trying to read response: %d bytes", dsize)
while self._serial.inWaiting() < dsize:
time.sleep(0.001)
data = self._serial.read(dsize)
self.logger.debug("Received data: %s", _showhex(data))
return data
def get_info(self):
"""Get device information
Returns
-------
dict
Dictionary with the sensor information
"""
if self._serial.inWaiting() > 0:
return (
"Data in buffer, you can't have info ! "
"Run clean_input() to emptied the buffer."
)
self._send_cmd(GET_INFO_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != INFO_LEN:
raise RPLidarException("Wrong get_info reply length")
if not is_single:
raise RPLidarException("Not a single response mode")
if dtype != INFO_TYPE:
raise RPLidarException("Wrong response data type")
raw = self._read_response(dsize)
serialnumber = codecs.encode(raw[4:], "hex").upper()
serialnumber = codecs.decode(serialnumber, "ascii")
data = {
"model": _b2i(raw[0]),
"firmware": (_b2i(raw[2]), _b2i(raw[1])),
"hardware": _b2i(raw[3]),
"serialnumber": serialnumber,
}
return data
def get_health(self):
"""Get device health state. When the core system detects some
potential risk that may cause hardware failure in the future,
the returned status value will be 'Warning'. But sensor can still work
as normal. When sensor is in the Protection Stop state, the returned
status value will be 'Error'. In case of warning or error statuses
non-zero error code will be returned.
Returns
-------
status : str
'Good', 'Warning' or 'Error' statuses
error_code : int
The related error code that caused a warning/error.
"""
if self._serial.inWaiting() > 0:
return (
"Data in buffer, you can't have info ! "
"Run clean_input() to emptied the buffer."
)
self.logger.info("Asking for health")
self._send_cmd(GET_HEALTH_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != HEALTH_LEN:
raise RPLidarException("Wrong get_info reply length")
if not is_single:
raise RPLidarException("Not a single response mode")
if dtype != HEALTH_TYPE:
raise RPLidarException("Wrong response data type")
raw = self._read_response(dsize)
status = _HEALTH_STATUSES[_b2i(raw[0])]
error_code = (_b2i(raw[1]) << 8) + _b2i(raw[2])
return status, error_code
def clean_input(self):
"""Clean input buffer by reading all available data"""
if self.scanning[0]:
return "Cleanning not allowed during scanning process active !"
self._serial.flushInput()
self.express_trame = 32
self.express_data = False
def stop(self):
"""Stops scanning process, disables laser diode and the measurement
system, moves sensor to the idle state."""
self.logger.info("Stopping scanning")
self._send_cmd(STOP_BYTE)
time.sleep(0.1)
self.scanning[0] = False
self.clean_input()
def start(self, scan_type="normal"):
"""Start the scanning process
Parameters
----------
scan : normal, force or express.
"""
if self.scanning[0]:
return "Scanning already running !"
"""Start the scanning process, enable laser diode and the
measurement system"""
status, error_code = self.get_health()
self.logger.debug("Health status: %s [%d]", status, error_code)
if status == _HEALTH_STATUSES[2]:
self.logger.warning(
"Trying to reset sensor due to the error. " "Error code: %d", error_code
)
self.reset()
status, error_code = self.get_health()
if status == _HEALTH_STATUSES[2]:
raise RPLidarException(
"RPLidar hardware failure. " "Error code: %d" % error_code
)
elif status == _HEALTH_STATUSES[1]:
self.logger.warning(
"Warning sensor status detected! " "Error code: %d", error_code
)
cmd = _SCAN_TYPE[scan_type]["byte"]
self.logger.info("starting scan process in %s mode" % scan_type)
if scan_type == "express":
self._send_payload_cmd(cmd, b"\x00\x00\x00\x00\x00")
else:
self._send_cmd(cmd)
dsize, is_single, dtype = self._read_descriptor()
if dsize != _SCAN_TYPE[scan_type]["size"]:
raise RPLidarException("Wrong get_info reply length")
if is_single:
raise RPLidarException("Not a multiple response mode")
if dtype != _SCAN_TYPE[scan_type]["response"]:
raise RPLidarException("Wrong response data type")
self.scanning = [True, dsize, scan_type]
def reset(self):
"""Resets sensor core, reverting it to a similar state as it has
just been powered up."""
self.logger.info("Reseting the sensor")
self._send_cmd(RESET_BYTE)
time.sleep(2)
self.clean_input()
def iter_measures(self, scan_type="normal", max_buf_meas=3000):
"""Iterate over measures. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increasing lag.
Parameters
----------
max_buf_meas : int or False if you want unlimited buffer
Maximum number of bytes to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
Yields
------
new_scan : bool
True if measures belongs to a new scan
quality : int
Reflected laser pulse strength
angle : float
The measure heading angle in degree unit [0, 360)
distance : float
Measured object distance related to the sensor's rotation center.
In millimeter unit. Set to 0 when measure is invalid.
"""
self.start_motor()
if not self.scanning[0]:
self.start(scan_type)
while True:
dsize = self.scanning[1]
if max_buf_meas:
data_in_buf = self._serial.inWaiting()
if data_in_buf > max_buf_meas:
self.logger.warning(
"Too many bytes in the input buffer: %d/%d. "
"Cleaning buffer...",
data_in_buf,
max_buf_meas,
)
self.stop()
self.start(self.scanning[2])
if self.scanning[2] == "normal":
raw = self._read_response(dsize)
yield _process_scan(raw)
if self.scanning[2] == "express":
if self.express_trame == 32:
self.express_trame = 0
if not self.express_data:
self.logger.debug("reading first time bytes")
self.express_data = ExpressPacket.from_string(
self._read_response(dsize)
)
self.express_old_data = self.express_data
self.logger.debug(
"set old_data with start_angle %f",
self.express_old_data.start_angle,
)
self.express_data = ExpressPacket.from_string(
self._read_response(dsize)
)
self.logger.debug(
"set new_data with start_angle %f",
self.express_data.start_angle,
)
self.express_scan = _process_express_scan(
self.express_old_data, self.express_data.start_angle
)
yield self.express_scan[self.express_trame]
self.express_trame += 1
def iter_scans(self, scan_type="normal", max_buf_meas=3000, min_len=5):
"""Iterate over scans. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increasing lag.
Parameters
----------
max_buf_meas : int
Maximum number of measures to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
min_len : int
Minimum number of measures in the scan for it to be yelded.
Yields
------
scan : list
List of the measures. Each measurment is tuple with following
format: (quality, angle, distance). For values description please
refer to `iter_measures` method's documentation.
"""
scan_list = []
iterator = self.iter_measures(scan_type, max_buf_meas)
for new_scan, quality, angle, distance in iterator:
if new_scan:
if len(scan_list) > min_len:
yield scan_list
scan_list = []
if distance > 0:
scan_list.append((quality, angle, distance))
class ExpressPacket(
namedtuple("express_packet", "distance angle new_scan start_angle")
):
sync1 = 0xA
sync2 = 0x5
sign = {0: 1, 1: -1}
@classmethod
def from_string(cls, data):
packet = bytearray(data)
if (packet[0] >> 4) != cls.sync1 or (packet[1] >> 4) != cls.sync2:
raise ValueError("try to parse corrupted data ({})".format(packet))
checksum = 0
for b in packet[2:]:
checksum ^= b
if checksum != (packet[0] & 0b00001111) + ((packet[1] & 0b00001111) << 4):
raise ValueError("Invalid checksum ({})".format(packet))
new_scan = packet[3] >> 7
start_angle = (packet[2] + ((packet[3] & 0b01111111) << 8)) / 64
d = a = ()
for i in range(0, 80, 5):
d += ((packet[i + 4] >> 2) + (packet[i + 5] << 6),)
a += (
((packet[i + 8] & 0b00001111) + ((packet[i + 4] & 0b00000001) << 4))
/ 8
* cls.sign[(packet[i + 4] & 0b00000010) >> 1],
)
d += ((packet[i + 6] >> 2) + (packet[i + 7] << 6),)
a += (
((packet[i + 8] >> 4) + ((packet[i + 6] & 0b00000001) << 4))
/ 8
* cls.sign[(packet[i + 6] & 0b00000010) >> 1],
)
return cls(d, a, new_scan, start_angle) | /rplidar-sharpattack-0.9.6.tar.gz/rplidar-sharpattack-0.9.6/rplidar.py | 0.584983 | 0.292368 | rplidar.py | pypi |
import logging
import sys
import time
import codecs
import serial
import struct
SYNC_BYTE = b'\xA5'
SYNC_BYTE2 = b'\x5A'
GET_INFO_BYTE = b'\x50'
GET_HEALTH_BYTE = b'\x52'
STOP_BYTE = b'\x25'
RESET_BYTE = b'\x40'
SCAN_BYTE = b'\x20'
FORCE_SCAN_BYTE = b'\x21'
DESCRIPTOR_LEN = 7
INFO_LEN = 20
HEALTH_LEN = 3
INFO_TYPE = 4
HEALTH_TYPE = 6
SCAN_TYPE = 129
#Constants & Command to start A2 motor
MAX_MOTOR_PWM = 1023
DEFAULT_MOTOR_PWM = 660
SET_PWM_BYTE = b'\xF0'
_HEALTH_STATUSES = {
0: 'Good',
1: 'Warning',
2: 'Error',
}
class RPLidarException(Exception):
'''Basic exception class for RPLidar'''
def _b2i(byte):
'''Converts byte to integer (for Python 2 compatability)'''
return byte if int(sys.version[0]) == 3 else ord(byte)
def _process_scan(raw):
'''Processes input raw data and returns measurment data'''
new_scan = bool(_b2i(raw[0]) & 0b1)
inversed_new_scan = bool((_b2i(raw[0]) >> 1) & 0b1)
quality = _b2i(raw[0]) >> 2
if new_scan == inversed_new_scan:
raise RPLidarException('New scan flags mismatch')
check_bit = _b2i(raw[1]) & 0b1
if check_bit != 1:
raise RPLidarException('Check bit not equal to 1')
angle = ((_b2i(raw[1]) >> 1) + (_b2i(raw[2]) << 7)) / 64.
distance = (_b2i(raw[3]) + (_b2i(raw[4]) << 8)) / 4.
return new_scan, quality, angle, distance
class RPLidar(object):
'''Class for communicating with RPLidar rangefinder scanners'''
_serial_port = None #: serial port connection
port = '' #: Serial port name, e.g. /dev/ttyUSB0
timeout = 1 #: Serial port timeout
motor = False #: Is motor running?
baudrate = 115200 #: Baudrate for serial port
def __init__(self, port, baudrate=115200, timeout=1, logger=None):
'''Initilize RPLidar object for communicating with the sensor.
Parameters
----------
port : str
Serial port name to which sensor is connected
baudrate : int, optional
Baudrate for serial connection (the default is 115200)
timeout : float, optional
Serial port connection timeout in seconds (the default is 1)
logger : logging.Logger instance, optional
Logger instance, if none is provided new instance is created
'''
self._serial_port = None
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self.motor_running = None
if logger is None:
logger = logging.getLogger('rplidar')
self.logger = logger
self.connect()
self.start_motor()
def connect(self):
'''Connects to the serial port with the name `self.port`. If it was
connected to another serial port disconnects from it first.'''
if self._serial_port is not None:
self.disconnect()
try:
self._serial_port = serial.Serial(
self.port, self.baudrate,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,
timeout=self.timeout)
except serial.SerialException as err:
raise RPLidarException('Failed to connect to the sensor '
'due to: %s' % err)
def disconnect(self):
'''Disconnects from the serial port'''
if self._serial_port is None:
return
self._serial_port.close()
def set_pwm(self, pwm):
assert(0 <= pwm <= MAX_MOTOR_PWM)
payload = struct.pack("<H", pwm)
self._send_payload_cmd(SET_PWM_BYTE, payload)
def start_motor(self):
'''Starts sensor motor'''
self.logger.info('Starting motor')
# For A1
self._serial_port.setDTR(False)
# For A2
self.set_pwm(DEFAULT_MOTOR_PWM)
self.motor_running = True
def stop_motor(self):
'''Stops sensor motor'''
self.logger.info('Stoping motor')
# For A2
self.set_pwm(0)
time.sleep(.001)
# For A1
self._serial_port.setDTR(True)
self.motor_running = False
def _send_payload_cmd(self, cmd, payload):
'''Sends `cmd` command with `payload` to the sensor'''
size = struct.pack('B', len(payload))
req = SYNC_BYTE + cmd + size + payload
checksum = 0
for v in struct.unpack('B'*len(req), req):
checksum ^= v
req += struct.pack('B', checksum)
self._serial_port.write(req)
self.logger.debug('Command sent: %s' % req)
def _send_cmd(self, cmd):
'''Sends `cmd` command to the sensor'''
req = SYNC_BYTE + cmd
self._serial_port.write(req)
self.logger.debug('Command sent: %s' % req)
def _read_descriptor(self):
'''Reads descriptor packet'''
descriptor = self._serial_port.read(DESCRIPTOR_LEN)
self.logger.debug('Recieved descriptor: %s', descriptor)
if len(descriptor) != DESCRIPTOR_LEN:
raise RPLidarException('Descriptor length mismatch')
elif not descriptor.startswith(SYNC_BYTE + SYNC_BYTE2):
raise RPLidarException('Incorrect descriptor starting bytes')
is_single = _b2i(descriptor[-2]) == 0
return _b2i(descriptor[2]), is_single, _b2i(descriptor[-1])
def _read_response(self, dsize):
'''Reads response packet with length of `dsize` bytes'''
self.logger.debug('Trying to read response: %d bytes', dsize)
data = self._serial_port.read(dsize)
self.logger.debug('Recieved data: %s', data)
if len(data) != dsize:
raise RPLidarException('Wrong body size')
return data
def get_info(self):
'''Get device information
Returns
-------
dict
Dictionary with the sensor information
'''
self._send_cmd(GET_INFO_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != INFO_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != INFO_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
serialnumber = codecs.encode(raw[4:], 'hex').upper()
serialnumber = codecs.decode(serialnumber, 'ascii')
data = {
'model': _b2i(raw[0]),
'firmware': (_b2i(raw[2]), _b2i(raw[1])),
'hardware': _b2i(raw[3]),
'serialnumber': serialnumber,
}
return data
def get_health(self):
'''Get device health state. When the core system detects some
potential risk that may cause hardware failure in the future,
the returned status value will be 'Warning'. But sensor can still work
as normal. When sensor is in the Protection Stop state, the returned
status value will be 'Error'. In case of warning or error statuses
non-zero error code will be returned.
Returns
-------
status : str
'Good', 'Warning' or 'Error' statuses
error_code : int
The related error code that caused a warning/error.
'''
self._send_cmd(GET_HEALTH_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != HEALTH_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != HEALTH_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
status = _HEALTH_STATUSES[_b2i(raw[0])]
error_code = (_b2i(raw[1]) << 8) + _b2i(raw[2])
return status, error_code
def clear_input(self):
'''Clears input buffer by reading all available data'''
self._serial_port.read_all()
def stop(self):
'''Stops scanning process, disables laser diode and the measurment
system, moves sensor to the idle state.'''
self.logger.info('Stoping scanning')
self._send_cmd(STOP_BYTE)
time.sleep(.001)
self.clear_input()
def reset(self):
'''Resets sensor core, reverting it to a similar state as it has
just been powered up.'''
self.logger.info('Reseting the sensor')
self._send_cmd(RESET_BYTE)
time.sleep(.002)
def iter_measurments(self, max_buf_meas=500):
'''Iterate over measurments. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increaing lag.
Parameters
----------
max_buf_meas : int
Maximum number of measurments to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
Yields
------
new_scan : bool
True if measurment belongs to a new scan
quality : int
Reflected laser pulse strength
angle : float
The measurment heading angle in degree unit [0, 360)
distance : float
Measured object distance related to the sensor's rotation center.
In millimeter unit. Set to 0 when measurment is invalid.
'''
self.start_motor()
status, error_code = self.get_health()
self.logger.debug('Health status: %s [%d]', status, error_code)
if status == _HEALTH_STATUSES[2]:
self.logger.warning('Trying to reset sensor due to the error. '
'Error code: %d', error_code)
self.reset()
status, error_code = self.get_health()
if status == _HEALTH_STATUSES[2]:
raise RPLidarException('RPLidar hardware failure. '
'Error code: %d' % error_code)
elif status == _HEALTH_STATUSES[1]:
self.logger.warning('Warning sensor status detected! '
'Error code: %d', error_code)
cmd = SCAN_BYTE
self._send_cmd(cmd)
dsize, is_single, dtype = self._read_descriptor()
if dsize != 5:
raise RPLidarException('Wrong get_info reply length')
if is_single:
raise RPLidarException('Not a multiple response mode')
if dtype != SCAN_TYPE:
raise RPLidarException('Wrong response data type')
while True:
raw = self._read_response(dsize)
self.logger.debug('Recieved scan response: %s' % raw)
if max_buf_meas:
data_in_buf = self._serial_port.in_waiting
if data_in_buf > max_buf_meas*dsize:
self.logger.warning(
'Too many measurments in the input buffer: %d/%d. '
'Clearing buffer...',
data_in_buf//dsize, max_buf_meas)
self._serial_port.read(data_in_buf//dsize*dsize)
yield _process_scan(raw)
def iter_scans(self, max_buf_meas=500, min_len=5):
'''Iterate over scans. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increasing lag.
Parameters
----------
max_buf_meas : int
Maximum number of measurments to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
min_len : int
Minimum number of measurments in the scan for it to be yelded.
Yields
------
scan : list
List of the measurments. Each measurment is tuple with following
format: (quality, angle, distance). For values description please
refer to `iter_measurments` method's documentation.
'''
scan = []
iterator = self.iter_measurments(max_buf_meas)
for new_scan, quality, angle, distance in iterator:
if new_scan:
if len(scan) > min_len:
yield scan
scan = []
if quality > 0 and distance > 0:
scan.append((quality, angle, distance)) | /rplidar-0.9.2.tar.gz/rplidar-0.9.2/rplidar.py | 0.486575 | 0.151938 | rplidar.py | pypi |
from datetime import datetime, timedelta
from typing import Dict, Any, Tuple
import jwt
class JWTToken:
def __init__(
self,
jwt_algorithm: str,
jwt_secret: str,
jwt_lifetime_hour: int,
):
self.algorithm = jwt_algorithm
self.secret = jwt_secret
self.lifetime_hour = jwt_lifetime_hour
@classmethod
def from_config(
cls,
jwt_algorithm: str,
jwt_secret: str,
jwt_lifetime_hour: int = 1,
) -> "JWTToken":
"""Initialize object jwt token based on specified config
:param jwt_algorithm: string algorithm that used
:param jwt_secret: string secret in jwt
:param jwt_lifetime_hour: lifetime hour of token
:return: current object instance
"""
return cls(jwt_algorithm, jwt_secret, jwt_lifetime_hour)
def encode_token(self, data: Dict[str, Any]) -> Tuple[str, str]:
"""Generate token based on specified payload
:param data: dictionary data
:return: string token
"""
if data.get("exp", None) is None:
data["exp"] = datetime.utcnow() + timedelta(hours=self.lifetime_hour)
# generate token with lifetime expired
expiry_token = jwt.encode(
payload=data,
key=self.secret,
algorithm=self.algorithm,
)
# generate 24 hours token lifetime
_ = data.pop("exp", None)
lifetime_token = jwt.encode(
payload=data,
key=self.secret,
algorithm=self.algorithm,
)
return lifetime_token, expiry_token
def decode_token(self, token: str) -> Dict[str, Any]:
"""Decode from string token to dictionary data
:param token: string token
:return: dictionary data
"""
return jwt.decode(
jwt=token.encode("utf-8"),
key=self.secret,
algorithms=[self.algorithm],
)
def is_token_is_valid(self, token: str) -> Tuple[bool, str]:
"""Check if token is valid or not
:param token: string token
:return: tuple data is valid and error message if any
"""
try:
resp = jwt.decode(
token.encode("utf-8"),
self.secret,
algorithms=[self.algorithm],
)
if len(resp) > 0:
return True, "valid"
return False, "invalid token"
except jwt.ExpiredSignatureError:
# Signature has expired
return False, "token is expired"
except jwt.exceptions.DecodeError:
return False, "invalid token" | /rplus_utils_module-0.0.3-py3-none-any.whl/rplus_utils/jwt/jwt.py | 0.884825 | 0.203866 | jwt.py | pypi |
import io
from typing import ClassVar, Dict, Any, List, Set
import boto3
import pandas as pd
from botocore.exceptions import ClientError
from tinydb import Storage, TinyDB
from tinydb.storages import MemoryStorage
from rplus_constants.rplus_utils_module import AWS_ACCESS_KEY, AWS_SECRET_KEY, REGION_NAME, BUCKET_NAME
class S3WrapperStorage(Storage):
def __init__(self, bucket: str, file: str, table_name: str):
self.bucket = bucket
self.file = file
self.table_name = table_name.lower()
self.client = boto3.resource(
"s3",
region_name=REGION_NAME,
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_SECRET_KEY,
)
def construct_load_csv(self, records: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Convert from pandas format to tinydb format
:param records: list of dictionary data
:return:
"""
tmp = {self.table_name: {}}
for idx, value in enumerate(records):
rec_id = value.pop("internal_id", idx)
table_name = value.pop("internal_table", self.table_name)
tmp[table_name][rec_id] = value
return tmp
def read_from_s3_csv(self) -> pd.DataFrame:
"""Read data from bucket and convert as dataframe
:return: pandas dataframe
"""
try:
obj = self.client.Object(self.bucket, self.file)
resp = obj.get()
data = io.BytesIO(resp["Body"].read())
if "csv" in self.file:
return pd.read_csv(data, compression="gzip")
return pd.read_pickle(data, compression="gzip")
except ClientError as e:
if e.response["Error"]["Code"] == "404":
return pd.DataFrame()
return pd.DataFrame()
def read(self) -> Dict[str, Any]:
"""Read data from s3 to buffer
:return: dictionary data
"""
df = self.read_from_s3_csv()
if len(df) == 0:
return {}
resp = df.to_dict(orient="records")
tinydb_formatted = self.construct_load_csv(resp)
return tinydb_formatted
def construct_write_csv(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Convert from tinydb format to pandas csv format
:param data: dictionary data
:return:
"""
tmp = []
for key, value in data[self.table_name].items():
tmp.append({"internal_table": self.table_name, "internal_id": key, **value})
return tmp
def create_writer(self, data: Dict[str, Any]) -> io.BytesIO:
"""Create writer buffer from pandas to bytes
:param data: dictionary data
:return:
"""
csv_format = self.construct_write_csv(data)
df = pd.DataFrame(csv_format)
writer = io.BytesIO()
if "csv" in self.file:
df.to_csv(writer, index=False, compression="gzip")
if "pkl" in self.file:
df.to_pickle(writer, compression="gzip")
writer.seek(0)
return writer
def write(self, data: Dict[str, Any]) -> bool:
"""Write data to s3 storage file
:param data: dictionary data
:return: boolean status
"""
if len(data[self.table_name]) == 0:
return True
writer = self.create_writer(data)
self.client.Object(self.bucket, self.file).put(Body=writer.getvalue())
return True
def close(self):
pass
class S3Database:
def __init__(self, db: TinyDB):
self.db = db
@classmethod
def init_db_from_s3(cls, file_path: str, table_name: str) -> "S3Database":
"""Init tiny db from s3 storage
:param file_path: string file path name
:param table_name: string table name
:return: current object class
"""
db = TinyDB(
bucket=BUCKET_NAME,
file=file_path,
storage=S3WrapperStorage,
table_name=table_name.lower(),
)
return cls(db)
@classmethod
def init_from_memory(cls) -> ClassVar:
"""Initialize new database from memory only
:return: current class object
"""
return cls(TinyDB(storage=MemoryStorage))
def table(self, table_name: str) -> TinyDB:
"""Select for specified database name
:param table_name: string database name
:return: object tony db
"""
self.db.table(table_name.lower())
return self.db
def tables(self) -> Set[str]:
"""Select for specified database name
:return: object tony db
"""
return self.db.tables()
def insert_multi(self, table_name: str, datas: List[Dict[str, Any]]) -> bool:
"""Inserting multiple data in one go
:param table_name: string of table name
:param datas: list of dictionary data
:return: boolean true or false
"""
self.db.table(table_name.lower()).insert_multiple(datas)
return True | /rplus_utils_module-0.0.3-py3-none-any.whl/rplus_utils/services/s3_db_services.py | 0.690037 | 0.176743 | s3_db_services.py | pypi |
import re
from rply.分词器 import 分词器
class 词模式(object):
_attrs_ = ['词名', '匹配参数', '_模式']
def __init__(自身, 词名, 模式, 匹配参数=0):
自身.词名 = 词名
自身.正则 = re.compile(模式, flags=匹配参数)
def 匹配(自身, 源码, 起点, 终点 = None):
m = 自身.正则.match(源码, 起点, 终点) if 终点 else 自身.正则.match(源码, 起点)
return 范围(*m.span(0)) if m is not None else None
def __repr__(自身):
return "词名:{0}, 模式={1}".format(
自身.词名, 自身.正则
)
class 范围(object):
_attrs_ = ["起", "止"]
def __init__(自身, 起, 止):
自身.起 = 起
自身.止 = 止
class 分词器母机(object):
r"""
A LexerGenerator represents a set of 规则 that match pieces of text that
should either be turned into tokens or ignored by the lexer.
规则 are added using the :meth:`add` and :meth:`ignore` methods:
>>> from rply import 分词器母机
>>> lg = 分词器母机()
>>> lg.添了('NUMBER', r'\d+')
>>> lg.添了('ADD', r'\+')
>>> lg.略过(r'\s+')
The 规则 are passed to :func:`re.compile`. If you need additional flags,
e.g. :const:`re.DOTALL`, you can pass them to :meth:`add` and
:meth:`ignore` as an additional optional parameter:
>>> import re
>>> lg.添了('ALL', r'.*', flags=re.DOTALL)
You can then build a lexer with which you can lex a string to produce an
iterator yielding tokens:
>>> lexer = lg.产出()
>>> iterator = lexer.分词('1 + 1')
>>> iterator.next()
Token('NUMBER', '1')
>>> iterator.next()
Token('ADD', '+')
>>> iterator.next()
Token('NUMBER', '1')
>>> iterator.next()
Traceback (most recent call last):
...
StopIteration
"""
def __init__(自身):
自身.规则 = []
自身.略过规则 = []
def 添了(自身, 词名, 模式, 匹配参数=0):
"""
Adds a rule with the given `词名` and `模式`. In case of ambiguity,
the first rule added wins.
"""
自身.规则.append(词模式(词名, 模式, 匹配参数=匹配参数))
def 略过(自身, 模式, 匹配参数=0):
"""
Adds a rule whose matched value will be ignored. Ignored 规则 will be
matched before regular ones.
"""
自身.略过规则.append(词模式("", 模式, 匹配参数=匹配参数))
def 产出(自身):
"""
Returns a lexer instance, which provides a `lex` method that must be
called with a string and returns an iterator yielding
:class:`~rply.Token` instances.
"""
return 分词器(自身.规则, 自身.略过规则) | /rply-ulang-0.8.3.tar.gz/rply-ulang-0.8.3/rply/分词器母机.py | 0.514156 | 0.36591 | 分词器母机.py | pypi |
class BaseBox(object):
"""
A base class for polymorphic boxes that wrap parser results. Simply use
this as a base class for anything you return in a production function of a
parser. This is necessary because RPython unlike Python expects functions
to always return objects of the same type.
既然现在不支持 RPython,此数据结构应不需要。待清理。
"""
_attrs_ = []
class 词(BaseBox):
"""
Represents a syntactically relevant piece of text.
:param name: A string describing the kind of text represented.
:param value: The actual text represented.
:param source_pos: A :class:`SourcePosition` object representing the
position of the first character in the source from which
this token was generated.
"""
def __init__(自身, name, value, 源码位置=None):
自身.name = name
自身.value = value
自身.source_pos = 源码位置
def __repr__(自身):
return "Token(%r, %r)" % (自身.name, 自身.value)
def __eq__(自身, other):
if not isinstance(other, 词):
return NotImplemented
return 自身.name == other.name and 自身.value == other.value
def gettokentype(自身):
"""
Returns the type or name of the token.
"""
return 自身.name
def getsourcepos(自身):
"""
Returns a :class:`SourcePosition` instance, describing the position of
this token's first character in the source.
"""
return 自身.source_pos
def getstr(自身):
"""
Returns the string represented by this token.
"""
return 自身.value
class 字符位置(object):
"""
字符所在源码的位置。
:param idx: The index of the character in the source.
:param lineno: The number of the line in which the character occurs.
:param colno: The number of the column in which the character occurs.
The values passed to this object can be retrieved using the identically
named attributes.
"""
def __init__(自身, idx, lineno, colno):
自身.idx = idx
自身.lineno = lineno
自身.colno = colno
def __repr__(自身):
return "SourcePosition(idx={0}, lineno={1}, colno={2})".format(
自身.idx, 自身.lineno, 自身.colno
) | /rply-ulang-0.8.3.tar.gz/rply-ulang-0.8.3/rply/词.py | 0.78374 | 0.484807 | 词.py | pypi |
import binascii
import struct
from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
import cryptography.hazmat.primitives.hashes as crypto_hashes
import cryptography.hazmat.primitives.asymmetric.ec as crypto_ec
from .extract_rpm_with_filesigs import _extract_filesigs
def parse_ima_signature(sig):
if not isinstance(sig, bytearray):
sig = bytearray(sig)
if len(sig) < 10:
return None
info = {
"type": sig[0],
"version": sig[1],
"alg_id": sig[2],
"key_id": bytes(sig[3:7]),
"sig_size": struct.unpack(">H", sig[7:9])[0],
"error": "Did not finish parsing",
}
readable_key_id = binascii.hexlify(sig[3:7])
if not isinstance(readable_key_id, str):
readable_key_id = readable_key_id.decode("utf8")
info["user_readable_key_id"] = readable_key_id
if info["type"] != 3:
info["error"] = "Unsupported type"
return info
if info["version"] != 2:
info["error"] = "Unsupported version"
return info
if info["alg_id"] == 7: # SHA224
info["alg_name"] = "SHA224"
crypto_algo = crypto_hashes.SHA224()
elif info["alg_id"] == 4: # SHA256
info["alg_name"] = "SHA256"
crypto_algo = crypto_hashes.SHA256()
elif info["alg_id"] == 5: # SHA384
info["alg_name"] = "SHA384"
crypto_algo = crypto_hashes.SHA384()
elif info["alg_id"] == 6: # SHA512
info["alg_name"] = "SHA512"
crypto_algo = crypto_hashes.SHA512()
else:
info["error"] = "Unsupported algorithm %d" % info["alg_id"]
return info
info["hashing_algorithm"] = crypto_algo
crypto_algo = Prehashed(crypto_algo)
info["algorithm"] = crypto_ec.ECDSA(crypto_algo)
if (len(sig) - 9) != info["sig_size"]:
info["error"] = "Signature length mismatch: %d (actual) != %d (expected)" % (
len(sig) - 9,
info["sig_size"],
)
return info
info["signature"] = bytes(sig[9:])
info["error"] = None
return info
def get_rpm_ima_signature_info(rpm_path):
signatures = _extract_filesigs(rpm_path)
if signatures is None:
return None
siginfos = {}
for path in signatures:
siginfos[path] = parse_ima_signature(signatures[path])
return siginfos | /rpm_head_signing-1.7.1.tar.gz/rpm_head_signing-1.7.1/rpm_head_signing/extract_signature_and_ima_info.py | 0.461259 | 0.250715 | extract_signature_and_ima_info.py | pypi |
import base64
import binascii
import struct
from .insertlib import insert_signatures as insertlib_insert_signatures
def _fix_sig_size_byteorder(signature):
sig_size_orig = struct.unpack(">H", signature[6:8])[0]
sig_size_reversed = struct.unpack("<H", signature[6:8])[0]
if sig_size_orig == (len(signature) - 8):
# The byte order was already correct, don't change it
return signature
elif sig_size_reversed == (len(signature) - 8):
print("Reversed byte-order sig_size encountered, fixing")
sig_size_fixed = struct.pack(">H", sig_size_reversed)
return signature[:6] + sig_size_fixed + signature[8:]
else:
raise Exception(
"Signature with invalid sig_size encountered: %d != %d"
% (sig_size_orig, len(signature) - 8)
)
def insert_signature(rpm_path, sig_path, ima_presigned_path=None, return_header=False):
"""
Insert a signature back into an RPM.
Either writes the signature back to the RPM (return_header=False) or returns
the signature header blob (return_header=True)
"""
if return_header:
return_header = 1
else:
return_header = 0
if sig_path:
# Add RSA Header record
with open(sig_path, "rb") as sigfile:
rpm_signature = bytearray(sigfile.read())
else:
rpm_signature = None
# Add IMA signature record
if ima_presigned_path is None:
return insertlib_insert_signatures(
return_header,
rpm_path,
rpm_signature,
)
else:
ima_signature_lookup = {}
with open(ima_presigned_path, "r") as sigpath:
for line in sigpath.readlines():
algo, digest, signature = line.strip().split(" ")
signature = base64.b64decode(signature)
signature = _fix_sig_size_byteorder(signature)
signature = binascii.hexlify(b"\x03" + signature)
if algo not in ima_signature_lookup:
ima_signature_lookup[algo] = {}
ima_signature_lookup[algo][digest.lower()] = signature
return insertlib_insert_signatures(
return_header,
rpm_path,
rpm_signature,
ima_signature_lookup,
)
def _main():
import sys
if len(sys.argv) == 3:
ima_presigned_path = None
elif len(sys.argv) == 5:
ima_presigned_path = sys.argv[4]
else:
raise Exception("Call: %s <rpm-path> <header-signature> [ima_presigned_path]")
insert_signature(sys.argv[1], sys.argv[2], ima_presigned_path)
if __name__ == "__main__":
_main() | /rpm_head_signing-1.7.1.tar.gz/rpm_head_signing-1.7.1/rpm_head_signing/insert_signature.py | 0.418222 | 0.178597 | insert_signature.py | pypi |
import re, os
from abc import (ABCMeta, abstractmethod)
#__all__ = ['Spec', 'replace_macros', 'Package']
class _Tag(metaclass=ABCMeta):
def __init__(self, name, pattern_obj, attr_type):
self.name = name
self.pattern_obj = pattern_obj
self.attr_type = attr_type
def test(self, line):
return re.search(self.pattern_obj, line)
def update(self, spec_obj, context, match_obj, line):
"""Update given spec object and parse context and return them again.
:param spec_obj: An instance of Spec class
:param context: The parse context
:param match_obj: The re.match object
:param line: The original line
:return: Given updated Spec instance and parse context dictionary.
"""
assert spec_obj
assert context
assert match_obj
assert line
return self.update_impl(spec_obj, context, match_obj, line)
@abstractmethod
def update_impl(self, spec_obj, context, match_obj, line):
pass
@staticmethod
def current_target(spec_obj, context):
target_obj = spec_obj
if context['current_subpackage'] is not None:
target_obj = context['current_subpackage']
return target_obj
class _NameValue(_Tag):
"""Parse a simple name → value tag."""
def __init__(self, name, pattern_obj, attr_type=None):
super().__init__(name, pattern_obj, attr_type if attr_type else str)
def update_impl(self, spec_obj, context, match_obj, line):
target_obj = _Tag.current_target(spec_obj, context)
value = match_obj.group(1)
# Sub-packages
if self.name == 'name':
spec_obj.packages = []
spec_obj.packages.append(Package(value))
setattr(target_obj, self.name, self.attr_type(value))
return spec_obj, context
class _MacroDef(_Tag):
"""Parse global macro definitions."""
def __init__(self, name, pattern_obj):
super().__init__(name, pattern_obj, str)
def update_impl(self, spec_obj, context, match_obj, line):
name, value = match_obj.groups()
setattr(spec_obj, name, str(value))
return spec_obj, context
class _List(_Tag):
"""Parse a tag that expands to a list."""
def __init__(self, name, pattern_obj):
super().__init__(name, pattern_obj, list)
def update_impl(self, spec_obj, context, match_obj, line):
target_obj = _Tag.current_target(spec_obj, context)
if not hasattr(target_obj, self.name):
setattr(target_obj, self.name, list())
value = match_obj.group(1)
if self.name == 'packages':
if value == '-n':
subpackage_name = line.rsplit(' ', 1)[-1].rstrip()
else:
subpackage_name = '{}-{}'.format(spec_obj.name, value)
package = Package(subpackage_name)
context['current_subpackage'] = package
package.is_subpackage = True
spec_obj.packages.append(package)
elif self.name in ['requires', 'build_requires']:
requirement = Requirement(value)
getattr(target_obj, self.name).append(requirement)
else:
getattr(target_obj, self.name).append(value)
return spec_obj, context
class _ListAndDict(_Tag):
"""Parse a tag that expands to a list and to a dict."""
def __init__(self, name, pattern_obj):
super().__init__(name, pattern_obj, list)
def update_impl(self, spec_obj, context, match_obj, line):
source_name, value = match_obj.groups()
dictionary = getattr(spec_obj, '{}_dict'.format(self.name))
dictionary[source_name] = value
target_obj = _Tag.current_target(spec_obj, context)
getattr(target_obj, self.name).append(value)
return spec_obj, context
_tags = [
_NameValue('name', re.compile(r'^Name:\s*(\S+)')),
_NameValue('version', re.compile(r'^Version:\s*(\S+)')),
_NameValue('epoch', re.compile(r'^Epoch:\s*(\S+)'), attr_type=int),
_NameValue('release', re.compile(r'^Release:\s*(\S+)')),
_NameValue('summary', re.compile(r'^Summary:\s*(.+)')),
_NameValue('license', re.compile(r'^License:\s*(.+)')),
_NameValue('group', re.compile(r'^Group:\s*(\S+)')),
_NameValue('url', re.compile(r'^URL:\s*(\S+)')),
_NameValue('buildroot', re.compile(r'^BuildRoot:\s*(\S+)')),
_NameValue('buildarch', re.compile(r'^BuildArch:\s*(\S+)')),
_ListAndDict('sources', re.compile(r'^(Source\d*):\s*(\S+)')),
_ListAndDict('patches', re.compile(r'^(Patch\d*):\s*(\S+)')),
_List('build_requires', re.compile(r'^BuildRequires:\s*(.+)')),
_List('requires', re.compile(r'^Requires:\s*(.+)')),
_List('requires_post', re.compile(r'^Requires\(post\):\s*(.+)')),
_List('packages', re.compile(r'^%package\s+(\S+)')),
_List('files', re.compile(r'%attr\(.*\)\s+(\S+)')),
_MacroDef('define', re.compile(r'^%define\s+(\S+)\s+(\S+)')),
_MacroDef('global', re.compile(r'^%global\s+(\S+)\s+(\S+)'))
]
_macro_pattern = re.compile(r'%{(\S+?)\}')
def _parse(spec_obj, context, line):
for tag in _tags:
match = tag.test(line)
if match:
return tag.update(spec_obj, context, match, line)
return spec_obj, context
class Requirement:
"""Represents a single requirement or build requirement in an RPM spec file.
Each spec file contains one or more requirements or build requirements.
For example, consider following spec file::
Name: foo
Version: 0.1
%description
%{name} is the library that everyone needs.
%package devel
Summary: Header files, libraries and development documentation for %{name}
Group: Development/Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
BuildRequires: gstreamer%{?_isa} >= 0.1.0
%description devel
This package contains the header files, static libraries, and development
documentation for %{name}. If you like to develop programs using %{name}, you
will need to install %{name}-devel.
This spec file's requirements have a name and either a required or minimum
version.
"""
expr = re.compile(r'(.*?)\s+([<>]=?|=)\s+(\S+)')
def __init__(self, name):
assert isinstance(name, str)
self.line = name
match = Requirement.expr.match(name)
if match:
self.name = match.group(1)
self.operator = match.group(2)
self.version = match.group(3)
else:
self.name = name
self.operator = None
self.version = None
def __repr__(self):
return self.line
class Package:
"""Represents a single package in a RPM spec file.
Each spec file describes at least one package and can contain one or more subpackages (described
by the %package directive). For example, consider following spec file::
Name: foo
Version: 0.1
%description
%{name} is the library that everyone needs.
%package devel
Summary: Header files, libraries and development documentation for %{name}
Group: Development/Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
%description devel
This package contains the header files, static libraries, and development
documentation for %{name}. If you like to develop programs using %{name}, you
will need to install %{name}-devel.
%package -n bar
Summary: A command line client for foo.
License: GPLv2+
%description -n bar
This package contains a command line client for foo.
This spec file will create three packages:
* A package named foo, the base package.
* A package named foo-devel, a subpackage.
* A package named bar, also a subpackage, but without the foo- prefix.
As you can see above, the name of a subpackage normally includes the main package name. When the
-n option is added to the %package directive, the prefix of the base package name is omitted and
a completely new name is used.
"""
def __init__(self, name):
assert isinstance(name, str)
for tag in _tags:
if tag.attr_type is list and tag.name in ["build_requires", "requires", "conflicts", "obsoletes",
"provides"]:
setattr(self, tag.name, tag.attr_type())
self.name = name
self.is_subpackage = False
def __repr__(self):
return "Package('{}')".format(self.name)
class Spec:
"""Represents a single spec file.
"""
def __init__(self):
for tag in _tags:
if tag.attr_type is list:
setattr(self, tag.name, tag.attr_type())
else:
setattr(self, tag.name, None)
self.sources_dict = dict()
self.patches_dict = dict()
@property
def packages_dict(self):
"""All packages in this RPM spec as a dictionary.
You can access the individual packages by their package name, e.g.,
git_spec.packages_dict['git-doc']
"""
assert self.packages
return dict(zip([package.name for package in self.packages], self.packages))
@staticmethod
def from_file(filename):
"""Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
"""
str = open(filename, 'r', encoding='utf-8').read()
return Spec.from_string(str, os.path.dirname(os.path.abspath(filename)))
@staticmethod
def from_string(string: str, relpath: str):
"""Creates a new Spec object from a given string.
:param string: The contents of a spec file.
:param relpath: The relative path to use for expanding include directives
:return: A new Spec object.
"""
spec = Spec()
parse_context = {
'current_subpackage': None
}
# first pass: expand the "%include" lines:
content_lines = Spec.__resolve_includes(string.splitlines(), relpath)
# second pass: parse the SPEC file:
for line in content_lines:
spec, parse_context = _parse(spec, parse_context, line)
return spec
@staticmethod
def __resolve_includes(content_lines, relpath):
include_pattern = re.compile(r'^%include\s+(\S+)')
while True:
restart_processing = False
for nline in range(0,len(content_lines)):
line = content_lines[nline]
#print("Processing line", line)
match = include_pattern.match(line)
if match:
filename = match.group(1)
#print(" *** At line {} found inclusion of file: {} in range {}-{}".format(nline, filename, match.start(), match.end()))
included_content = open(os.path.join(relpath,filename), 'r', encoding='utf-8').read()
# remove the current line and replace it with included content:
del content_lines[nline]
content_lines[nline:nline] = included_content.splitlines()
restart_processing = True
break
if restart_processing:
break
nline = nline+1
if not restart_processing:
break # no need of another pass
return content_lines
def replace_macros(string, spec=None):
"""Replace all macros in given string with corresponding values.
For example: a string '%{name}-%{version}.tar.gz' will be transformed to 'foo-2.0.tar.gz'.
:param string A string containing macros that you want to be replaced
:param spec An optional spec file. If given, definitions in that spec
file will be used to replace macros.
:return A string where all macros in given input are substituted as good as possible.
"""
if spec:
assert isinstance(spec, Spec)
def _is_conditional(macro: str) -> bool:
return macro.startswith("?") or macro.startswith("!")
def _test_conditional(macro: str) -> bool:
if macro[0] == "?":
return True
if macro[0] == "!":
return False
raise Exception("Given string is not a conditional macro")
def _macro_repl(match):
macro_name = match.group(1)
if _is_conditional(macro_name) and spec:
parts = macro_name[1:].split(sep=":", maxsplit=1)
assert len(parts) > 0
if _test_conditional(macro_name): # ?
if hasattr(spec, parts[0]):
if len(parts) == 2:
return parts[1]
else:
return getattr(spec, parts[0], None)
else:
return ""
else: # !
if not hasattr(spec, parts[0]):
if len(parts) == 2:
return parts[1]
else:
return getattr(spec, parts[0], None)
else:
return ""
if spec:
value = getattr(spec, macro_name, None)
if value:
return str(value)
return match.string[match.start():match.end()]
return re.sub(_macro_pattern, _macro_repl, string) | /rpm-spec-dependency-analyzer-0.5.tar.gz/rpm-spec-dependency-analyzer-0.5/rpm_spec_dependency_analyzer/pyrpm_spec.py | 0.724968 | 0.194904 | pyrpm_spec.py | pypi |
from __future__ import print_function
from __future__ import unicode_literals
import re
class Vercmp(object):
R_NONALNUMTILDE = re.compile(br"^([^a-zA-Z0-9~]*)(.*)$")
R_NUM = re.compile(br"^([\d]+)(.*)$")
R_ALPHA = re.compile(br"^([a-zA-Z]+)(.*)$")
@classmethod
def compare(cls, first, second):
first = first.encode("ascii", "ignore")
second = second.encode("ascii", "ignore")
while first or second:
m1 = cls.R_NONALNUMTILDE.match(first)
m2 = cls.R_NONALNUMTILDE.match(second)
m1_head, first = m1.group(1), m1.group(2)
m2_head, second = m2.group(1), m2.group(2)
if m1_head or m2_head:
# Ignore junk at the beginning
continue
# handle the tilde separator, it sorts before everything else
if first.startswith(b'~'):
if not second.startswith(b'~'):
return -1
first, second = first[1:], second[1:]
continue
if second.startswith(b'~'):
return 1
# If we ran to the end of either, we are finished with the loop
if not first or not second:
break
# grab first completely alpha or completely numeric segment
m1 = cls.R_NUM.match(first)
if m1:
m2 = cls.R_NUM.match(second)
if not m2:
# numeric segments are always newer than alpha segments
return 1
isnum = True
else:
m1 = cls.R_ALPHA.match(first)
m2 = cls.R_ALPHA.match(second)
isnum = False
if not m1:
# this cannot happen, as we previously tested to make sure that
# the first string has a non-null segment
return -1 # arbitrary
if not m2:
return 1 if isnum else -1
m1_head, first = m1.group(1), m1.group(2)
m2_head, second = m2.group(1), m2.group(2)
if isnum:
# throw away any leading zeros - it's a number, right?
m1_head = m1_head.lstrip(b'0')
m2_head = m2_head.lstrip(b'0')
# whichever number has more digits wins
m1hlen = len(m1_head)
m2hlen = len(m2_head)
if m1hlen < m2hlen:
return -1
if m1hlen > m2hlen:
return 1
# Same number of chars
if m1_head < m2_head:
return -1
if m1_head > m2_head:
return 1
# Both segments equal
continue
m1len = len(first)
m2len = len(second)
if m1len == m2len == 0:
return 0
if m1len != 0:
return 1
return -1
def vercmp(first, second):
return Vercmp.compare(first, second) | /rpm_vercmp-0.1.2.tar.gz/rpm_vercmp-0.1.2/rpm_vercmp/vercmp.py | 0.514644 | 0.278783 | vercmp.py | pypi |
import math
from fractions import Fraction
from decimal import Decimal
import yaml
def get_number(num):
"""If possible return a number, else return num as a string"""
for cast in (int, float):
try:
num = cast(num)
return num
except ValueError:
pass
if num[0] == "0":
for base in (2, 8, 16):
try:
num = int(num, base)
return num
except ValueError:
pass
return num
class Calculator:
"""Class Calculator"""
def __init__(self):
self.stack = []
self.last = None
self.loop_flag = True
self.rounding_value = None
self.operation = {
"+": self.add,
"-": self.sub,
"*": self.mul,
"/": self.div,
"//": self.int_div,
"%": self.modulo,
"**": self.pow,
"sqrt": self.sqrt,
"exp": self.exp,
"log10": self.log10,
"log2": self.log2,
"ln": self.loge,
"and": self.and_,
"or": self.or_,
"xor": self.xor,
"<<": self.shift_left,
">>": self.shift_right,
"abs": self.absolute_value,
"inv": self.inv,
"neg": self.neg,
"sin": self.sin,
"cos": self.cos,
"tan": self.tan,
"asin": self.asin,
"acos": self.acos,
"atan": self.atan,
"atan2": self.atan2,
"sinh": self.sinh,
"cosh": self.cosh,
"tanh": self.tanh,
"asinh": self.asinh,
"acosh": self.acosh,
"atanh": self.atanh,
"torad": self.to_radian,
"todeg": self.to_degree,
"switch": self.switch,
"del": self.del_,
"copy": self.copy,
"pi": self.const_pi,
"tau": self.const_tau,
"e": self.const_e,
"sum": self.sum,
"fact": self.factorial,
"round": self.round,
"ave": self.average,
"dec": self.print_dec,
"hex": self.print_hex,
"bin": self.print_bin,
"oct": self.print_oct,
"ratio": self.ratio,
"s": self.print_stack,
"clear": self.clear_stack,
"back": self.back,
"help": self.help,
"q": self.quit
}
self.custom_commands = {}
def loop(self):
"""loop for getting input from user"""
print("Reverse polish notation calculator")
try:
while self.loop_flag:
data = input(">")
self.evaluate(data)
except (KeyboardInterrupt, EOFError):
pass
def evaluate(self, string):
"""Evaluate the string and calls adequate method"""
for i in string.split():
i = get_number(i)
if isinstance(i, int):
self.stack.append(i)
elif isinstance(i, float):
if i.is_integer():
self.stack.append(int(i))
else:
self.stack.append(i)
elif isinstance(i, str):
if i in self.operation:
self.operation[i]()
elif i in self.custom_commands:
self.evaluate(self.custom_commands[i])
else:
print("Unknow command: {}".format(i))
else:
raise RuntimeError("Should never happend")
def add_config(self, existing_path):
"""Add command from existing path
Command must be on the format "{name_of_command} = {command}" """
with open(existing_path, "r", encoding="UTF-8") as file:
try:
config = yaml.safe_load(file)
except yaml.scanner.ScannerError as err:
print("Error in config file : {}", err)
return
if self.rounding_value is None and "rounding" in config:
self.rounding_value = config["rounding"]
if "shortcut" in config:
for i in config["shortcut"]:
try:
name, command = i.split("=")
except ValueError:
print("""Wrong command "{}" in file "{}" """.format(
i, existing_path))
print(
"""Command must be of the format "{name_of_command = command}"\n""")
else:
name = name.strip()
self.custom_commands[name] = command
def check_stack(self, num, command):
"""Check if enough number are in the stack"""
if len(self.stack) < num:
print("Not enough numbers in the stack for {} command".format(command))
return False
return True
def add_stack(self, val):
"""Convert to int if possible and add to stack"""
if isinstance(val, int):
self.stack.append(val)
elif isinstance(val, float):
if val.is_integer():
self.stack.append(int(val))
else:
self.stack.append(val)
else:
raise ValueError("Wrong data type")
def add(self):
"""Take 2 numbers from the stack, add them and put the result in the stack"""
if self.check_stack(2, "+"):
value1 = self.stack.pop()
value2 = self.stack.pop()
self.add_stack(value1 + value2)
def sub(self):
"""Take 2 numbers from the stack, substracte them and put the result in the stack"""
if self.check_stack(2, "-"):
value1 = self.stack.pop()
value2 = self.stack.pop()
self.add_stack(value2 - value1)
def mul(self):
"""Take 2 numbers from the stack, mul them and put the result in the stack"""
if self.check_stack(2, "*"):
value1 = self.stack.pop()
value2 = self.stack.pop()
self.add_stack(value1 * value2)
def div(self):
"""Take 2 numbers from the stack, divise them and put the result in the stack"""
if self.check_stack(2, "/"):
value1 = self.stack.pop()
if value1 == 0:
print("Impossible to divise by 0")
self.stack.append(value1)
else:
value2 = self.stack.pop()
d_res = Decimal(value2) / Decimal(value1)
if d_res.to_integral_value() == d_res:
self.stack.append(int(d_res))
else:
self.stack.append(float(d_res))
def int_div(self):
"""Take 2 numbers from the stack, divise them and put the integer result in the stack"""
if self.check_stack(2, "//"):
value1 = self.stack.pop()
if value1 == 0:
print("Impossible to divise by 0")
self.stack.append(value1)
else:
value2 = self.stack.pop()
self.add_stack(value2 // value1)
def modulo(self):
"""Take 2 numbers from the stack, divise them and put the remainder in the stack"""
if self.check_stack(2, "%"):
value1 = self.stack.pop()
if value1 == 0:
print("Impossible to divise by 0")
self.stack.append(value1)
else:
value2 = self.stack.pop()
self.add_stack(value2 % value1)
def pow(self):
"""Take 2 numbers from the stack, apply power and put the result in the stack"""
if self.check_stack(2, "**"):
if self.stack[-2] == 0 and self.stack[-1] < 0:
print("Impossible to divise by 0")
else:
value1 = self.stack.pop()
value2 = self.stack.pop()
self.add_stack(value2 ** value1)
def sqrt(self):
"""Replace the last number in the stack with the square root of itself"""
if self.check_stack(1, "sqrt"):
value = self.stack.pop()
if value < 0:
print("Square root require non-negative value")
self.stack.append(value)
else:
self.add_stack(math.sqrt(value))
def exp(self):
"""Apply e**x to the last number of the stack"""
if self.check_stack(1, "exp"):
value = self.stack.pop()
self.add_stack(math.exp(value))
def log10(self):
"""Apply log10 to the last number of the stack"""
if self.check_stack(1, "log10"):
value = self.stack.pop()
if value > 0:
self.add_stack(math.log10(value))
else:
print("Number out of domain for logarithm")
self.stack.append(value)
def log2(self):
"""Apply log2 to the last number of the stack"""
if self.check_stack(1, "log2"):
value = self.stack.pop()
if value > 0:
self.add_stack(math.log2(value))
else:
print("Number out of domain for logarithm")
self.stack.append(value)
def loge(self):
"""Apply natural logarithm to the last number of the stack"""
if self.check_stack(1, "loge"):
value = self.stack.pop()
if value > 0:
self.add_stack(math.log(value))
else:
print("Number out of domain for logarithm")
self.stack.append(value)
def and_(self):
"""Take 2 numbers from the stack, apply a bitwise "and" and put the result in the stack"""
if self.check_stack(2, "and"):
value1 = self.stack.pop()
value2 = self.stack.pop()
if isinstance(value1, int) and isinstance(value2, int):
self.stack.append(value1 & value2)
else:
print("This operation requires 2 int")
self.stack.append(value2)
self.stack.append(value1)
def or_(self):
"""Take 2 numbers from the stack, apply a bitwise "or" and put the result in the stack"""
if self.check_stack(2, "or"):
value1 = self.stack.pop()
value2 = self.stack.pop()
if isinstance(value1, int) and isinstance(value2, int):
self.stack.append(value1 | value2)
else:
print("This operation requires 2 int")
self.stack.append(value2)
self.stack.append(value1)
def xor(self):
"""Take 2 numbers from the stack, apply a bitwise "xor" and put the result in the stack"""
if self.check_stack(2, "xor"):
value1 = self.stack.pop()
value2 = self.stack.pop()
if isinstance(value1, int) and isinstance(value2, int):
self.stack.append(value1 ^ value2)
else:
print("This operation requires 2 int")
self.stack.append(value2)
self.stack.append(value1)
def shift_left(self):
"""Take 2 numbers from the stack, apply a left shift and put the result in the stack"""
if self.check_stack(2, "<<"):
value2 = self.stack.pop()
value1 = self.stack.pop()
if isinstance(value1, int) and isinstance(value2, int):
self.stack.append(value1 << value2)
else:
print("This operation requires 2 int")
self.stack.append(value1)
self.stack.append(value2)
def shift_right(self):
"""Take 2 numbers from the stack, apply a right shift and put the result in the stack"""
if self.check_stack(2, ">>"):
value2 = self.stack.pop()
value1 = self.stack.pop()
if isinstance(value1, int) and isinstance(value2, int):
self.stack.append(value1 >> value2)
else:
print("This operation requires 2 int")
self.stack.append(value1)
self.stack.append(value2)
def absolute_value(self):
"""Make absolute the last value of the stack"""
if self.check_stack(1, "abs"):
self.stack.append(abs(self.stack.pop()))
def inv(self):
"""Inverse the last number of the stack"""
if self.check_stack(1, "inv"):
value = self.stack.pop()
self.add_stack(1 / value)
def neg(self):
"""Change the sign of the last number in the stack"""
if self.check_stack(1, "neg"):
value = self.stack.pop()
if value < 0:
self.stack.append(abs(value))
else:
self.stack.append(0 - value)
def sin(self):
"""Replace the last number in the stack with the sine of itself (measured in radians)"""
if self.check_stack(1, "sin"):
self.add_stack(math.sin(self.stack.pop()))
def cos(self):
"""Replace the last number in the stack with the cosine of itself (measured in radians)"""
if self.check_stack(1, "cos"):
self.add_stack(math.cos(self.stack.pop()))
def tan(self):
"""Replace the last number in the stack with the tangent of itself (measured in radians)"""
if self.check_stack(1, "tan"):
self.add_stack(math.tan(self.stack.pop()))
def asin(self):
"""Replace the last number in the stack with the arc sine of itself (measured in radians)"""
if self.check_stack(1, "asin"):
value = self.stack.pop()
if value < -1 or value > 1:
print("Number out of domain for asin")
self.stack.append(value)
else:
self.add_stack(math.asin(value))
def acos(self):
"""Replace the last number in the stack with the arc cosine of itself
(measured in radians)"""
if self.check_stack(1, "acos"):
value = self.stack.pop()
if value < -1 or value > 1:
print("Number out of domain for acos")
self.stack.append(value)
else:
self.add_stack(math.acos(value))
def atan(self):
"""Replace the last number in the stack with the arc tangent of itself
(measured in radians)"""
if self.check_stack(1, "atan"):
value = self.stack.pop()
self.add_stack(math.atan(value))
def atan2(self):
"""Take 2 numbers from the stack, apply a atan2 function and put the result in the stack"""
if self.check_stack(2, "atan2"):
x_val = self.stack.pop()
y_val = self.stack.pop()
self.add_stack(math.atan2(y_val, x_val))
def sinh(self):
"""Replace the last number in the stack with the hyperbolic sine of itself"""
if self.check_stack(1, "sinh"):
value = self.stack.pop()
self.add_stack(math.sinh(value))
def cosh(self):
"""Replace the last number in the stack with the hyperbolic cosine of itself"""
if self.check_stack(1, "cosh"):
value = self.stack.pop()
self.add_stack(math.cosh(value))
def tanh(self):
"""Replace the last number in the stack with the hyperbolic tangent of itself"""
if self.check_stack(1, "tanh"):
value = self.stack.pop()
self.add_stack(math.tanh(value))
def asinh(self):
"""Replace the last number in the stack with the asinh of itself"""
if self.check_stack(1, "asinh"):
value = self.stack.pop()
self.add_stack(math.asinh(value))
def acosh(self):
"""Replace the last number in the stack with the acosh of itself"""
if self.check_stack(1, "acosh"):
value = self.stack.pop()
self.add_stack(math.acosh(value))
def atanh(self):
"""Replace the last number in the stack with the atanh of itself"""
if self.check_stack(1, "atanh"):
value = self.stack.pop()
self.add_stack(math.atanh(value))
def to_radian(self):
"""Convert the last number from degree to radian"""
if self.check_stack(1, "torad"):
value = self.stack.pop()
self.add_stack(value / 180 * math.pi)
def to_degree(self):
"""Convert the last number from radian to degree"""
if self.check_stack(1, "todeg"):
value = self.stack.pop()
self.add_stack(value * 180 / math.pi)
def switch(self):
"""Switch the last 2 numbers of the stack"""
if self.check_stack(2, "switch"):
value1 = self.stack.pop()
value2 = self.stack.pop()
self.stack.append(value1)
self.stack.append(value2)
def del_(self):
"""Delete the last number in the stack"""
if self.check_stack(1, "del"):
self.stack.pop()
def copy(self):
"""Copy the last number of the stack and add it to the stack"""
if self.check_stack(1, "copy"):
self.stack.append(self.stack[-1])
def const_pi(self):
"""Add pi to the stack"""
self.stack.append(math.pi)
def const_tau(self):
"""Add tau to the stack"""
self.stack.append(math.tau)
def const_e(self):
"""Add e to the stack"""
self.stack.append(math.e)
def sum(self):
"""Take all the number of the stack and add the sum"""
if self.check_stack(1, "sum"):
total = sum(self.stack)
self.stack.clear()
self.add_stack(total)
def factorial(self):
"""Replace the last number in the stack with its factorial"""
if self.check_stack(1, "fact"):
value = self.stack.pop()
if value < 0:
print("Impossible to compute factorial for negative number")
self.stack.append(value)
elif isinstance(value, float):
print("Impossible to compute factorial for float number")
self.stack.append(value)
else:
self.stack.append(math.factorial(value))
def round(self):
"""Round the last number in the stack"""
if self.check_stack(1, "round"):
value = self.stack.pop()
self.stack.append(round(value, self.rounding_value))
def average(self):
"""Take all the number of the stack and add the average"""
if self.check_stack(1, "ave"):
size = len(self.stack)
total = sum(self.stack)
self.stack.clear()
self.add_stack(total / size)
def print_dec(self):
"""Print the last number of the stack and remove it"""
if self.check_stack(1, "dec"):
val = self.stack.pop()
self.last = val
print("{}".format(val))
def print_hex(self):
"""Print in hexadecimal format the last number of the stack and remove it"""
if self.check_stack(1, "hex"):
i = self.stack.pop()
self.last = i
if isinstance(i, int):
print("0x{:X}".format(i))
else:
print(float.hex(i))
def print_bin(self):
"""Print in binary format the last number of the stack and remove it"""
if self.check_stack(1, "bin"):
i = self.stack.pop()
if isinstance(i, int):
print("0b{:b}".format(i))
self.last = i
else:
self.stack.append(i)
print("Impossible to print a float in binary")
def print_oct(self):
"""Print in octal format the last number of the stack and remove it"""
if self.check_stack(1, "oct"):
i = self.stack.pop()
if isinstance(i, int):
print("0o{:o}".format(i))
self.last = i
else:
self.stack.append(i)
print("Impossible to print a float in octal")
def ratio(self):
"""Print in integer ratio format the last number of the stack and remove it"""
if self.check_stack(1, "ratio"):
value = self.stack.pop()
self.last = value
if isinstance(value, float):
print(Fraction(value).limit_denominator())
else:
print("{}/1".format(value))
def print_stack(self):
"""Print the stack"""
if len(self.stack) > 0:
for i in range(len(self.stack) - 1):
print("{}, ".format(self.stack[i]), end="")
print("{}".format(self.stack[-1]))
else:
print("Stack is empty")
def clear_stack(self):
"""Empty the stack"""
self.stack = []
self.last = None
def back(self):
"""Put back on the stack the last value that was printed"""
if self.last is not None:
self.stack.append(self.last)
self.last = None
else:
print("No value was remove from the stack")
def help(self):
"""Print help; Same as pol --list"""
doc = ""
for command, method in self.operation.items():
doc += "`{}` : {}\n".format(command, method.__doc__)
print(doc)
def quit(self):
"""Quit the program"""
self.loop_flag = False | /rpn_calc-0.2.3-py3-none-any.whl/rpn_calc/calculator.py | 0.479504 | 0.329351 | calculator.py | pypi |
from math import sqrt, log2, ceil, floor
import random
import sys
if sys.version_info[ 0 ] == 3 and sys.version_info[ 1 ] >= 9:
from math import gcd
else:
from fractions import gcd
import sys
from builtins import ValueError
"""This script factorises a natural number given as a command line
parameter into its prime factors. It first attempts to use trial
division to find very small factors, then uses Brent's version of the
Pollard rho algorithm [1] to find slightly larger factors. If any large
factors remain, it uses the Self-Initializing Quadratic Sieve (SIQS) [2]
to factorise those.
[1] Brent, Richard P. 'An improved Monte Carlo factorization algorithm.'
BIT Numerical Mathematics 20.2 (1980): 176-184.
[2] Contini, Scott Patrick. 'Factoring integers with the self-
initializing quadratic sieve.' (1997).
"""
# Some tuning parameters
MAX_DIGITS_POLLARD = 30
POLLARD_QUICK_ITERATIONS = 20
MIN_DIGITS_POLLARD_QUICK2 = 45
POLLARD_QUICK2_ITERATIONS = 25
SIQS_TRIAL_DIVISION_EPS = 25
SIQS_MIN_PRIME_POLYNOMIAL = 400
SIQS_MAX_PRIME_POLYNOMIAL = 4000
# Number of iterations for the Miller-Rabin primality test
MILLER_RABIN_ITERATIONS = 50
verbose = False
class Polynomial:
"""A polynomial used for the Self-Initializing Quadratic Sieve."""
def __init__(self, coeff=None, a=None, b=None):
if coeff is None:
self.coeff = [ ]
else:
self.coeff = coeff
self.a = a
self.b = b
def eval(self, x):
res = 0
for a in self.coeff[::-1]:
res *= x
res += a
return res
class FactorBasePrime:
"""A factor base prime for the Self-Initializing Quadratic Sieve."""
def __init__(self, p, tmem, lp):
self.p = p
self.soln1 = None
self.soln2 = None
self.tmem = tmem
self.lp = lp
self.ainv = None
def lowest_set_bit(a):
b = (a & -a)
low_bit = -1
while (b):
b >>= 1
low_bit += 1
return low_bit
def to_bits(k):
"""Return a generator that returns the bits of k, starting from the
least significant bit, using True for 1s, and False for 0s.
"""
k_binary = bin(k)[2:]
return (bit == '1' for bit in k_binary[::-1])
def pow_mod(a, k, m):
"""Return a^k mod m."""
r = 1
b = a
for bit in to_bits(k):
if bit:
r = (r * b) % m
b = (b * b) % m
return r
def is_quadratic_residue(a, p):
"""Return whether a is a quadratic residue modulo a prime p."""
return legendre(a, (p - 1) // 2, 1, p) == 1
def legendre(a, q, l, n):
x = q ** l
if x == 0:
return 1
z = 1
a %= n
while x != 0:
if x % 2 == 0:
a = (a ** 2) % n
x //= 2
else:
x -= 1
z = (z * a) % n
return z
def sqrt_mod_prime(a, p):
"""Return the square root of a modulo the prime p. Behaviour is
undefined if a is not a quadratic residue mod p."""
# Algorithm from http://www.mersennewiki.org/index.php/Modular_Square_Root
assert a < p
assert is_probable_prime(p)
if a == 0:
return 0
if p == 2:
return a
if p % 2 == 0:
return None
p_mod_8 = p % 8
if p_mod_8 == 1:
# Shanks method
q = p // 8
e = 3
while q % 2 == 0:
q //= 2
e += 1
while True:
x = random.randint(2, p - 1)
z = pow_mod(x, q, p)
if pow_mod(z, 2 ** (e - 1), p) != 1:
break
y = z
r = e
x = pow_mod(a, (q - 1) // 2, p)
v = (a * x) % p
w = (v * x) % p
while True:
if w == 1:
return v
k = 1
while pow_mod(w, 2 ** k, p) != 1:
k += 1
d = pow_mod(y, 2 ** (r - k - 1), p)
y = (d ** 2) % p
r = k
v = (d * v) % p
w = (w * y) % p
elif p_mod_8 == 5:
v = pow_mod(2 * a, (p - 5) // 8, p)
i = (2 * a * v * v) % p
return (a * v * (i - 1)) % p
else:
return pow_mod(a, (p + 1) // 4, p)
def inv_mod(a, m):
"""Return the modular inverse of a mod m."""
return eea(a, m)[0] % m
def eea(a, b):
"""Solve the equation a*x + b*y = gcd(a,b).
Return (x, y, +/-gcd(a,b)).
"""
if a == 0:
return (0, 1, b)
x = eea(b % a, a)
return (x[1] - b // a * x[0], x[0], x[2])
def is_probable_prime(a):
"""Perform the Miller-Rabin primality test to determine whether the
given number a is a prime. Return True if the number is a prime
with very high probability, and False if it is definitely composite.
"""
if a == 2:
return True
if a == 1 or a % 2 == 0:
return False
return primality_test_miller_rabin(a, MILLER_RABIN_ITERATIONS)
def primality_test_miller_rabin(a, iterations):
m = a - 1
lb = lowest_set_bit(m)
m >>= lb
for _ in range(iterations):
b = random.randint(2, a - 1)
j = 0
z = pow_mod(b, m, a)
while not ((j == 0 and z == 1) or z == a - 1):
if (j > 0 and z == 1 or j + 1 == lb):
return False
j += 1
z = (z * z) % a
return True
def siqs_factor_base_primes(n, nf):
"""Compute and return nf factor base primes suitable for a Quadratic
Sieve on the number n.
"""
global small_primes
factor_base = []
for p in small_primes:
if is_quadratic_residue(n, p):
t = sqrt_mod_prime(n % p, p)
lp = round(log2(p))
factor_base.append(FactorBasePrime(p, t, lp))
if len(factor_base) >= nf:
break
return factor_base
def siqs_find_first_poly(n, m, factor_base):
"""Compute the first of a set of polynomials for the Self-
Initialising Quadratic Sieve.
"""
p_min_i = None
p_max_i = None
for i, fb in enumerate(factor_base):
if p_min_i is None and fb.p >= SIQS_MIN_PRIME_POLYNOMIAL:
p_min_i = i
if p_max_i is None and fb.p > SIQS_MAX_PRIME_POLYNOMIAL:
p_max_i = i - 1
break
# The following may happen if the factor base is small, make sure
# that we have enough primes.
if p_max_i is None:
p_max_i = len(factor_base) - 1
if p_min_i is None or p_max_i - p_min_i < 20:
p_min_i = min(p_min_i, 5)
target = sqrt(2 * float(n)) / m
target1 = target / ((factor_base[p_min_i].p +
factor_base[p_max_i].p) / 2) ** 0.5
# find q such that the product of factor_base[q_i] is approximately
# sqrt(2 * n) / m; try a few different sets to find a good one
best_q, best_a, best_ratio = None, None, None
for _ in range(30):
a = 1
q = []
while a < target1:
p_i = 0
while p_i == 0 or p_i in q:
p_i = random.randint(p_min_i, p_max_i)
p = factor_base[p_i].p
a *= p
q.append(p_i)
ratio = a / target
# ratio too small seems to be not good
if (best_ratio is None or (ratio >= 0.9 and ratio < best_ratio) or
best_ratio < 0.9 and ratio > best_ratio):
best_q = q
best_a = a
best_ratio = ratio
a = best_a
q = best_q
s = len(q)
B = []
for l in range(s):
fb_l = factor_base[q[l]]
q_l = fb_l.p
assert a % q_l == 0
gamma = (fb_l.tmem * inv_mod(a // q_l, q_l)) % q_l
if gamma > q_l // 2:
gamma = q_l - gamma
B.append(a // q_l * gamma)
b = sum(B) % a
b_orig = b
if (2 * b > a):
b = a - b
assert 0 < b
assert 2 * b <= a
assert ((b * b - n) % a == 0)
g = Polynomial([b * b - n, 2 * a * b, a * a], a, b_orig)
h = Polynomial([b, a])
for fb in factor_base:
if a % fb.p != 0:
fb.ainv = inv_mod(a, fb.p)
fb.soln1 = (fb.ainv * (fb.tmem - b)) % fb.p
fb.soln2 = (fb.ainv * (-fb.tmem - b)) % fb.p
return g, h, B
def siqs_find_next_poly(n, factor_base, i, g, B):
"""Compute the (i+1)-th polynomials for the Self-Initialising
Quadratic Sieve, given that g is the i-th polynomial.
"""
v = lowest_set_bit(i) + 1
z = -1 if ceil(i / (2 ** v)) % 2 == 1 else 1
b = (g.b + 2 * z * B[v - 1]) % g.a
a = g.a
b_orig = b
if (2 * b > a):
b = a - b
assert ((b * b - n) % a == 0)
g = Polynomial([b * b - n, 2 * a * b, a * a], a, b_orig)
h = Polynomial([b, a])
for fb in factor_base:
if a % fb.p != 0:
fb.soln1 = (fb.ainv * (fb.tmem - b)) % fb.p
fb.soln2 = (fb.ainv * (-fb.tmem - b)) % fb.p
return g, h
def siqs_sieve(factor_base, m):
"""Perform the sieving step of the SIQS. Return the sieve array."""
sieve_array = [0] * (2 * m + 1)
for fb in factor_base:
if fb.soln1 is None:
continue
p = fb.p
i_start_1 = -((m + fb.soln1) // p)
a_start_1 = fb.soln1 + i_start_1 * p
lp = fb.lp
if p > 20:
for a in range(a_start_1 + m, 2 * m + 1, p):
sieve_array[a] += lp
i_start_2 = -((m + fb.soln2) // p)
a_start_2 = fb.soln2 + i_start_2 * p
for a in range(a_start_2 + m, 2 * m + 1, p):
sieve_array[a] += lp
return sieve_array
def siqs_trial_divide(a, factor_base):
"""Determine whether the given number a can be fully factorised into
primes from the factors base. If so, return the indices of the
factors from the factor base. If not, return None.
"""
divisors_idx = []
for i, fb in enumerate(factor_base):
if a % fb.p == 0:
exp = 0
while a % fb.p == 0:
a //= fb.p
exp += 1
divisors_idx.append((i, exp))
if a == 1:
return divisors_idx
return None
def siqs_trial_division(n, sieve_array, factor_base, smooth_relations, g, h, m,
req_relations):
"""Perform the trial division step of the Self-Initializing
Quadratic Sieve.
"""
sqrt_n = sqrt(float(n))
limit = log2(m * sqrt_n) - SIQS_TRIAL_DIVISION_EPS
for (i, sa) in enumerate(sieve_array):
if sa >= limit:
x = i - m
gx = g.eval(x)
divisors_idx = siqs_trial_divide(gx, factor_base)
if divisors_idx is not None:
u = h.eval(x)
v = gx
assert (u * u) % n == v % n
smooth_relations.append((u, v, divisors_idx))
if (len(smooth_relations) >= req_relations):
return True
return False
def siqs_build_matrix(factor_base, smooth_relations):
"""Build the matrix for the linear algebra step of the Quadratic Sieve."""
fb = len(factor_base)
M = []
for sr in smooth_relations:
mi = [0] * fb
for j, exp in sr[2]:
mi[j] = exp % 2
M.append(mi)
return M
def siqs_build_matrix_opt(M):
"""Convert the given matrix M of 0s and 1s into a list of numbers m
that correspond to the columns of the matrix.
The j-th number encodes the j-th column of matrix M in binary:
The i-th bit of m[i] is equal to M[i][j].
"""
m = len(M[0])
cols_binary = [""] * m
for mi in M:
for j, mij in enumerate(mi):
cols_binary[j] += "1" if mij else "0"
return [int(cols_bin[::-1], 2) for cols_bin in cols_binary], len(M), m
def add_column_opt(M_opt, tgt, src):
"""For a matrix produced by siqs_build_matrix_opt, add the column
src to the column target (mod 2).
"""
M_opt[tgt] ^= M_opt[src]
def find_pivot_column_opt(M_opt, j):
"""For a matrix produced by siqs_build_matrix_opt, return the row of
the first non-zero entry in column j, or None if no such row exists.
"""
if M_opt[j] == 0:
return None
return lowest_set_bit(M_opt[j])
def siqs_solve_matrix_opt(M_opt, n, m):
"""
Perform the linear algebra step of the SIQS. Perform fast
Gaussian elimination to determine pairs of perfect squares mod n.
Use the optimisations described in [1].
[1] Koç, Çetin K., and Sarath N. Arachchige. 'A Fast Algorithm for
Gaussian Elimination over GF (2) and its Implementation on the
GAPP.' Journal of Parallel and Distributed Computing 13.1
(1991): 118-122.
"""
row_is_marked = [False] * n
pivots = [-1] * m
for j in range(m):
i = find_pivot_column_opt(M_opt, j)
if i is not None:
pivots[j] = i
row_is_marked[i] = True
for k in range(m):
if k != j and (M_opt[k] >> i) & 1: # test M[i][k] == 1
add_column_opt(M_opt, k, j)
perf_squares = []
for i in range(n):
if not row_is_marked[i]:
perfect_sq_indices = [i]
for j in range(m):
if (M_opt[j] >> i) & 1: # test M[i][j] == 1
perfect_sq_indices.append(pivots[j])
perf_squares.append(perfect_sq_indices)
return perf_squares
def siqs_calc_sqrts(square_indices, smooth_relations):
"""Given on of the solutions returned by siqs_solve_matrix_opt and
the corresponding smooth relations, calculate the pair [a, b], such
that a^2 = b^2 (mod n).
"""
res = [1, 1]
for idx in square_indices:
res[0] *= smooth_relations[idx][0]
res[1] *= smooth_relations[idx][1]
res[1] = sqrt_int(res[1])
return res
def sqrt_int(n):
"""Return the square root of the given integer, rounded down to the
nearest integer.
"""
a = n
s = 0
o = 1 << (floor(log2(n)) & ~1)
while o != 0:
t = s + o
if a >= t:
a -= t
s = (s >> 1) + o
else:
s >>= 1
o >>= 2
assert s * s == n
return s
def kth_root_int(n, k):
"""Return the k-th root of the given integer n, rounded down to the
nearest integer.
"""
u = n
s = n + 1
while u < s:
s = u
t = (k - 1) * s + n // pow(s, k - 1)
u = t // k
return s
def siqs_factor_from_square(n, square_indices, smooth_relations):
"""Given one of the solutions returned by siqs_solve_matrix_opt,
return the factor f determined by f = gcd(a - b, n), where
a, b are calculated from the solution such that a*a = b*b (mod n).
Return f, a factor of n (possibly a trivial one).
"""
sqrt1, sqrt2 = siqs_calc_sqrts(square_indices, smooth_relations)
assert (sqrt1 * sqrt1) % n == (sqrt2 * sqrt2) % n
return gcd(abs(sqrt1 - sqrt2), n)
def siqs_find_factors(n, perfect_squares, smooth_relations):
"""Perform the last step of the Self-Initialising Quadratic Field.
Given the solutions returned by siqs_solve_matrix_opt, attempt to
identify a number of (not necessarily prime) factors of n, and
return them.
"""
factors = []
rem = n
non_prime_factors = set()
prime_factors = set()
for square_indices in perfect_squares:
fact = siqs_factor_from_square(n, square_indices, smooth_relations)
if fact != 1 and fact != rem:
if is_probable_prime(fact):
if fact not in prime_factors:
if verbose:
print("SIQS: Prime factor found: %d" % fact)
prime_factors.add(fact)
while rem % fact == 0:
factors.append(fact)
rem //= fact
if rem == 1:
break
if is_probable_prime(rem):
factors.append(rem)
rem = 1
break
else:
if fact not in non_prime_factors:
if verbose:
print("SIQS: Non-prime factor found: %d" % fact)
non_prime_factors.add(fact)
if rem != 1 and non_prime_factors:
non_prime_factors.add(rem)
for fact in sorted(siqs_find_more_factors_gcd(non_prime_factors)):
while fact != 1 and rem % fact == 0:
if verbose:
print("SIQS: Prime factor found: %d" % fact)
factors.append(fact)
rem //= fact
if rem == 1 or is_probable_prime(rem):
break
if rem != 1:
factors.append(rem)
return factors
def siqs_find_more_factors_gcd(numbers):
res = set()
for n in numbers:
res.add(n)
for m in numbers:
if n != m:
fact = gcd(n, m)
if fact != 1 and fact != n and fact != m:
if fact not in res:
if verbose:
print("SIQS: GCD found non-trivial factor: %d" % fact)
res.add(fact)
res.add(n // fact)
res.add(m // fact)
return res
def siqs_choose_nf_m(d):
"""Choose parameters nf (sieve of factor base) and m (for sieving
in [-m,m].
"""
# Using similar parameters as msieve-1.52
if d <= 34:
return 200, 65536
if d <= 36:
return 300, 65536
if d <= 38:
return 400, 65536
if d <= 40:
return 500, 65536
if d <= 42:
return 600, 65536
if d <= 44:
return 700, 65536
if d <= 48:
return 1000, 65536
if d <= 52:
return 1200, 65536
if d <= 56:
return 2000, 65536 * 3
if d <= 60:
return 4000, 65536 * 3
if d <= 66:
return 6000, 65536 * 3
if d <= 74:
return 10000, 65536 * 3
if d <= 80:
return 30000, 65536 * 3
if d <= 88:
return 50000, 65536 * 3
if d <= 94:
return 60000, 65536 * 9
return 100000, 65536 * 9
def siqs_factorise(n):
"""Use the Self-Initializing Quadratic Sieve algorithm to identify
one or more non-trivial factors of the given number n. Return the
factors as a list.
"""
dig = len(str(n))
nf, m = siqs_choose_nf_m(dig)
factor_base = siqs_factor_base_primes(n, nf)
required_relations_ratio = 1.05
success = False
smooth_relations = []
prev_cnt = 0
i_poly = 0
factors = None
B = [ ]
g = None
while not success:
if verbose:
print("*** Step 1/2: Finding smooth relations ***")
required_relations = round(len(factor_base) * required_relations_ratio)
if verbose:
print("Target: %d relations" % required_relations)
enough_relations = False
while not enough_relations:
if i_poly == 0:
g, h, B = siqs_find_first_poly(n, m, factor_base)
else:
g, h = siqs_find_next_poly(n, factor_base, i_poly, g, B)
i_poly += 1
if i_poly >= 2 ** (len(B) - 1):
i_poly = 0
sieve_array = siqs_sieve(factor_base, m)
enough_relations = siqs_trial_division(
n, sieve_array, factor_base, smooth_relations,
g, h, m, required_relations)
if (len(smooth_relations) >= required_relations or
i_poly % 8 == 0 and len(smooth_relations) > prev_cnt):
if verbose:
print("Total %d/%d relations." % (len(smooth_relations), required_relations))
prev_cnt = len(smooth_relations)
if verbose:
print("*** Step 2/2: Linear Algebra ***")
print("Building matrix for linear algebra step...")
M = siqs_build_matrix(factor_base, smooth_relations)
M_opt, M_n, M_m = siqs_build_matrix_opt(M)
if verbose:
print("Finding perfect squares using matrix...")
perfect_squares = siqs_solve_matrix_opt(M_opt, M_n, M_m)
if verbose:
print("Finding factors from perfect squares...")
factors = siqs_find_factors(n, perfect_squares, smooth_relations)
if len(factors) > 1:
success = True
else:
if verbose:
print("Failed to find a solution. Finding more relations...")
required_relations_ratio += 0.05
return factors
def check_factor(n, i, factors):
while n % i == 0:
n //= i
factors.append(i)
if is_probable_prime(n):
factors.append(n)
n = 1
return n
def trial_div_init_primes(n, upper_bound):
"""Perform trial division on the given number n using all primes up
to upper_bound. Initialise the global variable small_primes with a
list of all primes <= upper_bound. Return (factors, rem), where
factors is the list of identified prime factors of n, and rem is the
remaining factor. If rem = 1, the function terminates early, without
fully initialising small_primes.
"""
if verbose:
print("Trial division and initialising small primes...")
global small_primes
is_prime = [True] * (upper_bound + 1)
is_prime[0:2] = [False] * 2
factors = []
small_primes = []
max_i = sqrt_int(upper_bound)
rem = n
for i in range(2, max_i + 1):
if is_prime[i]:
small_primes.append(i)
rem = check_factor(rem, i, factors)
if rem == 1:
return factors, 1
for j in (range(i ** 2, upper_bound + 1, i)):
is_prime[j] = False
for i in range(max_i + 1, upper_bound + 1):
if is_prime[i]:
small_primes.append(i)
rem = check_factor(rem, i, factors)
if rem == 1:
return factors, 1
if verbose:
print("Primes initialised.")
return factors, rem
def pollard_brent_f(c, n, x):
"""Return f(x) = (x^2 + c)%n. Assume c < n.
"""
x1 = (x * x) % n + c
if x1 >= n:
x1 -= n
assert x1 >= 0 and x1 < n
return x1
def pollard_brent_find_factor(n, max_iter=None):
"""Perform Brent's variant of the Pollard rho factorisation
algorithm to attempt to a non-trivial factor of the given number n.
If max_iter > 0, return None if no factors were found within
max_iter iterations.
"""
y, c, m = (random.randint(1, n - 1) for _ in range(3))
r, q, g = 1, 1, 1
i = 0
ys = 0
x = 0
while g == 1:
x = y
for _ in range(r):
y = pollard_brent_f(c, n, y)
k = 0
while k < r and g == 1:
ys = y
for _ in range(min(m, r - k)):
y = pollard_brent_f(c, n, y)
q = (q * abs(x - y)) % n
g = gcd(q, n)
k += m
r *= 2
if max_iter:
i += 1
if (i == max_iter):
return None
if g == n:
while True:
ys = pollard_brent_f(c, n, ys)
g = gcd(abs(x - ys), n)
if g > 1:
break
return g
def pollard_brent_quick(n, factors):
"""Perform up to max_iter iterations of Brent's variant of the
Pollard rho factorisation algorithm to attempt to find small
prime factors. Restart the algorithm each time a factor was found.
Add all identified prime factors to factors, and return 1 if all
prime factors were found, or otherwise the remaining factor.
"""
rem = n
while True:
if is_probable_prime(rem):
factors.append(rem)
rem = 1
break
digits = len(str(n))
if digits < MIN_DIGITS_POLLARD_QUICK2:
max_iter = POLLARD_QUICK_ITERATIONS
else:
max_iter = POLLARD_QUICK2_ITERATIONS
f = pollard_brent_find_factor(rem, max_iter)
if f and f < rem:
if is_probable_prime(f):
if verbose:
print("Pollard rho (Brent): Prime factor found: %s" % f)
factors.append(f)
assert rem % f == 0
rem //= f
else:
if verbose:
print("Pollard rho (Brent): Non-prime factor found: %s" % f)
rem_f = pollard_brent_quick(f, factors)
rem = (rem // f) * rem_f
else:
if verbose:
print("No (more) small factors found.")
break
return rem
def check_perfect_power(n):
"""Check if the given integer is a perfect power. If yes, return
(r, b) such that r^b == n. If no, return None. Assume that
global small_primes has already been initialised and that n does
not have any prime factors from small_primes.
"""
largest_checked_prime = small_primes[-1]
for b in small_primes:
bth_root = kth_root_int(n, b)
if bth_root < largest_checked_prime:
break
if (bth_root ** b == n):
return (bth_root, b)
return None
def find_prime_factors(n):
"""Return one or more prime factors of the given number n. Assume
that n is not a prime and does not have very small factors, and that
the global small_primes has already been initialised. Do not return
duplicate factors.
"""
if verbose:
print("Checking whether %d is a perfect power..." % n)
perfect_power = check_perfect_power(n)
if perfect_power:
if verbose:
print("%d is %d^%d" % (n, perfect_power[0], perfect_power[1]))
factors = [perfect_power[0]]
else:
if verbose:
print("Not a perfect power.")
digits = len(str(n))
if digits <= MAX_DIGITS_POLLARD:
if verbose:
print("Using Pollard rho (Brent's variant) to factorise %d (%d digits)..." % (n, digits))
factors = [pollard_brent_find_factor(n)]
else:
if verbose:
print("Using Self-Initializing Quadratic Sieve to factorise" +
" %d (%d digits)..." % (n, digits))
factors = siqs_factorise(n)
prime_factors = []
for f in set(factors):
for pf in find_all_prime_factors(f):
prime_factors.append(pf)
return prime_factors
def find_all_prime_factors(n):
"""Return all prime factors of the given number n. Assume that n
does not have very small factors and that the global small_primes
has already been initialised.
"""
rem = n
factors = []
while rem > 1:
if is_probable_prime(rem):
factors.append(rem)
break
for f in find_prime_factors(rem):
if verbose:
print("Prime factor found: %d" % f)
assert is_probable_prime(f)
assert rem % f == 0
while rem % f == 0:
rem //= f
factors.append(f)
return factors
def product(factors):
"""Return the product of all numbers in the given list."""
prod = 1
for f in factors:
prod *= f
return prod
def factorise(n):
"""Factorise the given integer n >= 1 into its prime factors."""
if type(n) != int or n < 1:
raise ValueError("Number needs to be an integer >= 1")
if verbose:
print("Factorising %d (%d digits)..." % (n, len(str(n))))
if n == 1:
return []
if is_probable_prime(n):
return [n]
factors, rem = trial_div_init_primes(n, 1000000)
if verbose:
if factors:
print("Prime factors found so far: %s" % factors)
else:
print("No small factors found.")
if rem != 1:
digits = len(str(rem))
if digits > MAX_DIGITS_POLLARD:
if verbose:
print("Attempting quick Pollard rho (Brent's variant) to find slightly " +
"larger factors...")
rem = pollard_brent_quick(rem, factors)
if rem > 1:
for fr in find_all_prime_factors(rem):
factors.append(fr)
factors.sort()
assert product(factors) == n
for p in factors:
assert is_probable_prime(p)
return factors
if __name__ == '__main__':
if len(sys.argv) > 1:
N = int(sys.argv[1])
print("\nSuccess. Prime factors: %s" % factorise(N))
else:
print("Usage: factorize.py <N>", file=sys.stderr) | /rpnChilada-8.5.6-py3-none-any.whl/rpn/factorise.py | 0.693577 | 0.571468 | factorise.py | pypi |
__all__ = ()
#-----------------------------------------------------------------------------
# IMPORTANT NOTE: DO NOT import the clnum module into the global namespace of
# this module. This module is imported during the initialization of clnum and
# would create a circular import.
#-----------------------------------------------------------------------------
import re
#-----------------------------------------------------------------------------
# Regular expression patterns to match the various types of numbers.
_base_flt = r'[+-]?\d*\.?\d*(?:E[+-]?\d+)?'
# Pattern for degenerate forms that match _base_flt
_degen_lst = [
r'(^[+-]?\.?E?[+-]?$)',
r'(^[+-]?\.?E[+-]?\d+$)',
]
mpf_degen_re = re.compile('|'.join(_degen_lst))
integer_re = re.compile(r'^[+-]?\d+$')
mpf_re = re.compile(r'^%s$' % _base_flt)
cmpf_re = re.compile(r'^(%s)(%s)?J$' % (_base_flt, _base_flt))
_base_ra = r'[+-]?\d+/?\d*'
# A trailing slash or divide by zero are not allowed
mpq_degen_re = re.compile(r'.*/0*$')
mpq_re = re.compile(r'^%s$' % _base_ra)
cmpq_re = re.compile(r'^(%s)(%s)?J$' % (_base_ra, _base_ra))
#-----------------------------------------------------------------------------
def _clean_str(s):
if not isinstance(s, basestring):
raise ValueError('Input must be a string')
# The string could be unicode but the C++ routines are expecting a regular
# string so convert it and remove any leading and trailing white space.
return str(s).strip().upper()
#-----------------------------------------------------------------------------
def _mpf_clean_str(s):
s = _clean_str(s)
if not mpf_re.match(s) or mpf_degen_re.match(s):
raise ValueError('Invalid floating point format')
# NOTE: The CLN rejects some forms as valid input so append an
# acceptable suffix.
if integer_re.match(s):
s = s + '.0'
elif s.endswith('.'):
s = s + '0'
return s
#-----------------------------------------------------------------------------
def _mpq_clean_str(s):
s = _clean_str(s)
if not mpq_re.match(s) or mpq_degen_re.match(s):
raise ValueError('Invalid rational format')
return s
#-----------------------------------------------------------------------------
# NOTE: The CLN library is never allowed to parse a complex input because it
# uses i to designate the imaginary part. Want to make the interface
# compatible with Python complex conventions.
def _cmpf_clean_str(s):
s = _clean_str(s)
m = cmpf_re.match(s)
msg = 'Invalid complex floating point format'
if not m:
# The input could be floating point format in which case the imaginary
# part is zero.
try:
real = _mpf_clean_str(s)
return real, '0.0'
except ValueError:
raise ValueError(msg)
real, imag = m.groups()
if mpf_degen_re.match(real):
raise ValueError(msg)
if imag:
if mpf_degen_re.match(imag):
raise ValueError(msg)
else:
# The result is pure imaginary so set the real part to zero.
imag = real
real = '0.0'
# NOTE: The CLN rejects some forms as valid input so append an
# acceptable suffix.
if integer_re.match(real):
real = real + '.0'
elif real.endswith('.'):
real = real + '0'
if integer_re.match(imag):
imag = imag + '.0'
elif imag.endswith('.'):
imag = imag + '0'
# Pair of strings that convert to floats using CLN syntax.
return real, imag
#-----------------------------------------------------------------------------
# NOTE: The CLN library is never allowed to parse a complex input because it
# uses i to designate the imaginary part. Want to make the interface
# compatible with Python complex conventions.
def _cmpq_clean_str(s):
s = _clean_str(s)
m = cmpq_re.match(s)
msg = 'Invalid complex rational format'
if not m:
# The input could be rational format in which case the imaginary
# part is zero.
try:
real = _mpq_clean_str(s)
return real, '0'
except ValueError:
raise ValueError(msg)
real, imag = m.groups()
if mpq_degen_re.match(real):
raise ValueError(msg)
if imag:
if mpq_degen_re.match(imag):
raise ValueError(msg)
else:
# The result is pure imaginary so set the real part to zero.
imag = real
real = '0'
# Pair of strings that convert to rationals using CLN syntax.
return real, imag
#-----------------------------------------------------------------------------
def number_str(s, prec=0, prefer_cmpf=False):
'''Given a string, try to convert it to one of the supported number types.
The following are applied in order: int, long, mpq, mpf, cmpq, cmpf unless
prefer_cmpf is True. Then cmpq and cmpf are swapped.
Integers (int,long) can have an optional base prefix:
0x - hex
0o - octal
0b - binary
The floating point forms (mpf,cmpf) accept an optional prec parameter which
defaults to the current clnum module default.
'''
s = _clean_str(s)
# Save and remove the sign since it interferes with the base recognition.
sign = 1
si = s
if s.startswith('-'):
sign = -1
si = s[1:]
elif s.startswith('+'):
si = s[1:]
# Identify the base to use for the conversion.
bases = {'0X':16, '0O':8, '0B':2}
prefix = si[:2]
base = 10
if prefix in bases:
si = si[2:]
base = bases[prefix]
try:
return sign*int(si, base)
except ValueError:
pass
try:
return sign*long(si, base)
except ValueError:
pass
if base != 10:
raise ValueError('Cannot apply base prefix to non-integers')
# Import the clnum module here to avoid circular import when clnum is
# initializing.
import clnum
# NOTE: The original string is used from here on because the modified form
# would cause errors in complex numbers.
try:
return clnum.mpq(s)
except ValueError:
pass
try:
return clnum.mpf(s, prec=prec)
except ValueError:
pass
if prefer_cmpf:
try:
return clnum.cmpf(s, prec=prec)
except ValueError:
pass
try:
return clnum.cmpq(s)
except ValueError:
pass
else:
try:
return clnum.cmpq(s)
except ValueError:
pass
try:
return clnum.cmpf(s, prec=prec)
except ValueError:
pass
raise ValueError('Cannot convert string to a number') | /rpncalc-2.7.tar.gz/rpncalc-2.7/clnum/_clnum_str.py | 0.578567 | 0.2174 | _clnum_str.py | pypi |
from __future__ import print_function
import sys
import os
import readline
import argparse
from rpnpy import Calculator
def setupReadline():
"""Initialize the readline library and command history.
@return: A C{bool} to indicate whether standard input is a terminal
(and therefore interactive).
"""
if not os.isatty(0):
# Standard input is closed or is a pipe etc. So there's no user
# typing at us, and so no point in setting up readline.
return False
# Readline code from https://docs.python.org/3.7/library/readline.html
histfile = os.path.join(os.path.expanduser('~'), '.pycalc_history')
try:
readline.read_history_file(histfile)
historyLen = readline.get_current_history_length()
except FileNotFoundError:
open(histfile, 'wb').close()
historyLen = 0
try:
readline.append_history_file
except AttributeError:
# We won't be able to save readline history. This can happen on
# Python 3.5 at least - not sure why.
pass
else:
import atexit
def saveHistory(prevHistoryLen, histfile):
newHistoryLen = readline.get_current_history_length()
readline.set_history_length(1000)
readline.append_history_file(newHistoryLen - prevHistoryLen,
histfile)
atexit.register(saveHistory, historyLen, histfile)
return True
def parseArgs():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=(
'An RPN calculator for Python. Reads commands from standard input '
'and/or files or interactively with a read-eval-print loop and '
'(optionally) writes the final stack to standard output.'))
parser.add_argument(
'--prompt', default='--> ',
help='The prompt to print at the start of each line when interactive.')
parser.add_argument(
'files', nargs='*',
help=('Files to read input from. If you use this option and you also '
'want standard input to be read at some point, use "-" as a '
'name in the list of file names.'))
parser.add_argument(
'--separator',
help=('The character to use to split lines of standard input into '
'separate commands (unless --noSplit is given).'))
parser.add_argument(
'--debug', action='store_true', default=False,
help='Print verbose information about how commandsa are run.')
parser.add_argument(
'--print', action='store_true', default=False,
help='Print the result of each command')
parser.add_argument(
'--version', action='store_true', default=False,
help='Print the version number and exit.')
parser.add_argument(
'--noSplit', action='store_false', default=True, dest='splitLines',
help=('If given, do not split lines read from standard input into '
'separate commands, treat each line as an entire command.'))
parser.add_argument(
'--noFinalPrint', action='store_false', default=True,
dest='finalPrint',
help=('If given, do not print the stack after processing all commands '
'from standard input.'))
parser.add_argument(
'--stdin', action='store_true', default=False,
help=('If the arguments on the command line are passed as input to '
'the calculator, you can use this option to also read commands '
'from standard input once the command line has been executed.'))
parser.add_argument(
'--startupFile',
help=('Python file to be parsed at startup. Usually used to define '
'custom functions'))
return parser.parse_args()
if __name__ == '__main__':
args = parseArgs()
if args.version:
from rpnpy import __version__
print(__version__)
else:
calc = Calculator(autoPrint=args.print, splitLines=args.splitLines,
separator=args.separator, debug=args.debug)
if args.startupFile:
try:
with open(args.startupFile) as f:
exec(f.read(), globals(), calc._variables)
except FileNotFoundError:
calc.err('Startup file %s not found' % args.startupFile)
interactive = setupReadline()
if args.files:
if all(os.path.exists(f) or f == '-' for f in args.files):
# All arguments are existing files (or are '-', for stdin).
for filename in args.files:
if filename == '-':
calc.repl(args.prompt)
else:
with open(filename) as fp:
calc.batch(fp, False)
else:
# Execute the command line as a set of commands, following
# great suggestion by David Pattinson.
calc.batch((' '.join(args.files),), args.finalPrint)
if args.stdin:
calc.repl(args.prompt)
elif interactive:
calc.repl(args.prompt)
else:
calc.batch(sys.stdin, args.finalPrint) | /rpnpy-1.0.31.tar.gz/rpnpy-1.0.31/rpn.py | 0.45423 | 0.158858 | rpn.py | pypi |
# rportion - data structure and operations for rectilinear polygons
[](https://test.pypi.org/project/rportion/)
[](https://github.com/tilmann-bartsch/portion/actions/workflows/test.yaml)
[](https://coveralls.io/github/tilmann-bartsch/rportion?branch=master)
[](https://lgtm.com/projects/g/tilmann-bartsch/rportion)
[](https://opensource.org/licenses/MIT)
[](https://github.com/tilmann-bartsch/rportion/commits/master)
The `rportion` library provides data structure to represent
2D [rectilinear polygons](https://en.wikipedia.org/wiki/Rectilinear_polygon) (unions of 2D-intervals) in Python 3.7+.
It is built upon the library [`portion`](https://github.com/AlexandreDecan/portion) and follows its concepts.
The following features are provided:
- 2D-Intervals (rectangles) which can be open/closed and finite/infinite at every boundary
- intersection, union, complement and difference of rectilinear polygons
- iterator over all maximum rectangles inside and outside a given polygon
In the case of integers/floats it can be used to keep track of the area resulting
from the union/difference of rectangles:
<p align="center">
<img width="65%" src="https://github.com/tilmann-bartsch/rportion/raw/master/docu/simple-example_solid.gif">
</p>
Internally the library uses an [interval tree](https://en.wikipedia.org/wiki/Interval_tree) to represent a polygon.
## Table of contents
* [Installation](#installation)
* [Documentation & usage](#documentation--usage)
* [Polygon creation](#polygon-creation)
* [Polygon bounds & attributes](#polygon-bounds--attributes)
* [Polygon operations](#polygon-operations)
* [Rectangle partitioning iterator](#rectangle-partitioning-iterator)
* [Maximum rectangle iterator](#maximum-rectangle-iterators)
* [Boundary](#boundary)
* [Internal data structure](#internal-data-structure)
* [Changelog](#changelog)
* [Contributions](#contributions)
* [License](#license)
## Installation
Install `rportion` from [PyPi-test](https://test.pypi.org/project/rportion/) with `pip` using
`pip install -i https://test.pypi.org/simple/ rportion`.
Install `rportion` with the development environment using `pip install -e ".[test]"`.
## Documentation & usage
### Polygon creation
Atomic polygons (rectangles) can be created by one of the following:
```python
>>> import rportion as rp
>>> rp.ropen(0, 2, 0, 1)
(x=(0,2), y=(0,1))
>>> rp.rclosed(0, 2, 0, 1)
(x=[0,2], y=[0,1])
>>> rp.ropenclosed(0, 2, 0, 1)
(x=(0,2], y=(0,1])
>>> rp.rclosedopen(0, 2, 0, 1)
(x=[0,2), y=[0,1))
>>> rp.rsingleton(0, 1)
(x=[0], y=[1])
>>> rp.rempty()
(x=(), y=())
```
Polygons can also be created by using two intervals of the underlying library
[`portion`](https://github.com/AlexandreDecan/portion):
```python
>>> import portion as P
>>> import rportion as rp
>>> rp.RPolygon.from_interval_product(P.openclosed(0, 2), P.closedopen(0, 1))
(x=(0,2], y=[0,1))
```
[↑ back to top](#table-of-contents)
### Polygon bounds & attributes
An `RPolygon` defines the following properties
- `empty` is true if the polygon is empty.
```python
>>> rp.rclosed(0, 2, 1, 2).empty
False
>>> rp.rempty().empty
True
```
- `atomic` is true if the polygon can be expressed by a single rectangle.
```python
>>> rp.rempty().atomic
True
>>> rp.rclosedopen(0, 2, 1, 2).atomic
True
>>> (rp.rclosed(0, 2, 1, 2) | rp.rclosed(0, 2, 1, 3)).atomic
True
>>> (rp.rclosed(0, 2, 1, 2) | rp.rclosed(1, 2, 1, 3)).atomic
False
```
- `enclosure` is the smallest rectangle containing the polygon.
```python
>>> (rp.rclosed(0, 2, 0, 2) | rp.rclosed(1, 3, 0, 1)).enclosure
(x=[0,3], y=[0,2])
>>> (rp.rclosed(0, 1, -3, 3) | rp.rclosed(-P.inf, P.inf, -1, 1)).enclosure
(x=(-inf,+inf), y=[-3,3])
```
- `enclosure_x_interval` is the smallest rectangle containing the polygon's extension in x-dimension.
```python
>>> (rp.rclosed(0, 2, 0, 2) | rp.rclosed(1, 3, 0, 1)).x_enclosure_interval
x=[0,3]
>>> (rp.rclosed(0, 1, -3, 3) | rp.rclosed(-P.inf, P.inf, -1, 1)).x_enclosure_interval
(-inf,+inf)
```
- `enclosure_y_interval` is the smallest interval containing the polygon's extension in y-dimension.
```python
>>> (rp.rclosed(0, 2, 0, 2) | rp.rclosed(1, 3, 0, 1)).y_enclosure_interval
[0,2]
>>> (rp.rclosed(0, 1, -3, 3) | rp.rclosed(-P.inf, P.inf, -1, 1)).y_enclosure_interval
[-3,3]
```
- `x_lower`, `x_upper`, `y_lower` and `y_upper` yield the boundaries of the rectangle enclosing
the polygon.
```python
>>> p = rp.rclosedopen(0, 2, 1, 3)
>>> p.x_lower, p.x_upper, p.y_lower, p.y_upper
(0, 2, 1, 3)
```
- `x_left`, `x_right`, `y_left` and `y_right` yield the type of the boundaries of the rectangle enclosing
the polygon.
```python
>>> p = rp.rclosedopen(0, 2, 1, 3)
>>> p.x_left, p.x_right, p.y_left, p.y_right
(CLOSED, OPEN, CLOSED, OPEN)
```
### Polygon operations
`RPolygon` instances support the following operations:
- `p.intersection(other)` and `p & other` return the intersection of two rectilinear polygons.
```python
>>> rp.rclosed(0, 2, 0, 2) & rp.rclosed(1, 3, 0, 1)
(x=[1,2], y=[0,1])
```
- `p.union(other)` and `p | other` return the union of two rectilinear polygons.
```python
>>> rp.rclosed(0, 2, 0, 2) | rp.rclosed(1, 3, 0, 1)
(x=[0,3], y=[0,1]) | (x=[0,2], y=[0,2])
```
Note that the resulting polygon is represented by the union of all maximal rectangles contained in
in the polygon, see [Maximum rectangle iterators](#maximum-rectangle-iterators).
- `p.complement()` and `~p` return the complement of the rectilinear polygon.
```python
>>> ~rp.ropen(-P.inf, 0, -P.inf, P.inf)
((x=[0,+inf), y=(-inf,+inf))
```
- `p.difference(other)` and `p - other` return the difference of two rectilinear polygons.
```python
rp.rclosed(0, 3, 0, 2) - rp.ropen(2, 4, 1, 3)
(x=[0,3], y=[0,1]) | (x=[0,2], y=[0,2])
```
Note that the resulting polygon is represented by the union of all maximal rectangles contained in
in the polygon, see [Maximum rectangle iterators](#maximum-rectangle-iterators).
[↑ back to top](#table-of-contents)
### Rectangle partitioning iterator
The method `rectangle_partitioning` of a `RPolygon` instance returns an iterator
over rectangles contained in the rectilinear polygon which disjunctively cover it. I.e.
```python
>>> poly = rp.rclosedopen(2, 5, 1, 4) | rp.rclosedopen(1, 8, 2, 3) | rp.rclosedopen(6, 8, 1, 3)
>>> poly = poly - rp.rclosedopen(4, 7, 2, 4)
>>> list(poly.rectangle_partitioning())
[(x=[1,4), y=[2,3)), (x=[2,5), y=[1,2)), (x=[6,8), y=[1,2)), (x=[2,4), y=[3,4)), (x=[7,8), y=[2,3))]
```
which can be visualized as follows:
<p align="center">
<img width="95%" src="https://github.com/tilmann-bartsch/rportion/raw/master/docu/simple-example_partitioning.png">
</p>
**Left:** Simple Rectilinear polygon. The red areas are part of the polygon.<br>
**Right:** Rectangles in the portion are shown with black borderlines. As it is visible
`rectangle_partitioning` prefers rectangles with long x-interval over
rectangles with long y-interval.
[↑ back to top](#table-of-contents)
### Maximum rectangle iterator
The method `maximal_rectangles` of a `RPolygon` instance returns an iterator over all maximal rectangles contained
in the rectilinear polygon.
A maximal rectangle is rectangle in the polygon which is not a real subset of any other rectangle contained in
the rectilinear polygon. I.e.
```python
>>> poly = rp.rclosedopen(2, 5, 1, 4) | rp.rclosedopen(1, 8, 2, 3) | rp.rclosedopen(6, 8, 1, 3)
>>> poly = poly - rp.rclosedopen(4, 7, 2, 4)
>>> list(poly.maximal_rectangles())
[(x=[1, 4), y = [2, 3)), (x=[2, 5), y = [1, 2)), (x=[6, 8), y = [1, 2)), (x=[2, 4), y = [1, 4)), (x=[7, 8), y = [1, 3))]
```
which can be visualized as follows:
<p align="center">
<img width="95%" src="https://github.com/tilmann-bartsch/rportion/raw/master/docu/simple-example_max-rectangles.png">
</p>
**Left:** Simple Rectilinear polygon. The red areas are part of the polygon.<br>
**Right:** Maximal contained rectangles are drawn above each other transparently.
[↑ back to top](#table-of-contents)
## Boundary
The method `boundary` of a `RPolygon` instance returns another `RPolygon` instance representing the boundary of
the polygon. I.e.
```python
>>> poly = rp.closed(0, 1, 2, 3)
>>> poly.boundary()
(x=[1,2], y=[3]) | (x=[1,2], y=[4]) | (x=[1], y=[3,4]) | (x=[2], y=[3,4])
```
[↑ back to top](#table-of-contents)
## Internal data structure
The polygon is internally stored using an [interval tree](https://en.wikipedia.org/wiki/Interval_tree). Every
node of the tree corresponds to an interval in x-dimension which is representable by boundaries (in x-dimension)
present in the polygon. Each node contains an 1D-interval (by using the library
[`portion`](https://github.com/AlexandreDecan/portion)) in y-dimension. Combining those 1D-intervals
yields a rectangle contained in the polygon.
I.e. for the rectangle `(x=[0, 2), y=[1, 3))` this can be visualized as follows.
```
interval tree with x-interval corresponding y-interval stored in
a lattice-like shape to each node each node
┌─x─┐ ┌─(-∞,+∞)─┐ ┌─()──┐
│ │ │ │ │ │
┌─x─┬─x─┐ ┌─(-∞,2)──┬──[0,+∞)─┐ ┌─()──┬──()─┐
│ │ │ │ │ │ │ │ │
x x x (-∞,0] [0,2) [2,+∞) () [1,3) ()
```
The class `RPolygon` used this model by holding three data structures.
- `_x_boundaries`: Sorted list of necessary boundaries in x-dimension with type (`OPEN` or `CLOSED`)
- `_used_y_ranges`: List of lists in a triangular shape representing the interval tree for the
space occupied by the rectilinear polygon.
- `_free_y_ranges`: List of list in a triangular shape representing the interval tree of
for the space not occupied by the rectilinear polygon.
Note that a separate data structure for the area outside the polygon is kept.
This is done in order to be able to obtain the complement of a polygon efficiently.
For the example shown above this is:
```python
>>> poly = rp.rclosedopen(0, 2, 1, 3)
>>> poly._x_boundaries
SortedList([(-inf, OPEN), (0, OPEN), (2, OPEN), (+inf, OPEN)])
>>> poly._used_y_ranges
[[(), (), ()],
[(), [1,3)],
[()]]
>>> poly._free_y_ranges
[[(-inf,1) | [3,+inf), (-inf,1) | [3,+inf), (-inf,+inf)],
[(-inf,1) | [3,+inf), (-inf,1) | [3,+inf)],
[(-inf,+inf)]]
```
You can use the function `data_tree_to_string` as noted below to print the internal data structure in a tabular format:
```python
>>> poly = rp.rclosedopen(0, 2, 1, 3)
>>> print(data_tree_to_string(poly._x_boundaries, poly._used_y_ranges, 6))
| +inf 2 0
----------------+------------------
-inf (OPEN)| () () ()
0 (CLOSED)| () [1,3)
2 (CLOSED)| ()
```
```python
>>> poly = rp.rclosedopen(2, 5, 1, 4) | rp.rclosedopen(1, 8, 2, 3) | rp.rclosedopen(6, 8, 1, 3)
>>> poly = poly - rp.rclosedopen(4, 7, 2, 4)
>>> print(data_tree_to_string(poly._x_boundaries, poly._used_y_ranges, 6))
| +inf 8 7 6 5 4 2 1
----------------+------------------------------------------------
-inf (OPEN)| () () () () () () () ()
1 (CLOSED)| () () () () () [2,3) [2,3)
2 (CLOSED)| () () () () [1,2) [1,4)
4 (CLOSED)| () () () () [1,2)
5 (CLOSED)| () () () ()
6 (CLOSED)| () [1,2) [1,2)
7 (CLOSED)| () [1,3)
```
```python
def data_tree_to_string(x_boundaries,
y_intervals,
spacing: int):
col_space = 10
n = len(y_intervals)
msg = " " * (spacing + col_space) + "|"
for x_b in x_boundaries[-1:0:-1]:
msg += f"{str(x_b.val):>{spacing}}"
msg += "\n" + f"-" * (spacing+col_space) + "+"
for i in range(n):
msg += f"-" * spacing
msg += "\n"
for i, row in enumerate(y_intervals):
x_b = x_boundaries[i]
msg += f"{str((~x_b).val) + ' (' + str((~x_b).btype) + ')':>{spacing+ col_space}}|"
for val in row:
msg += f"{str(val):>{spacing}}"
msg += "\n"
return msg
```
[↑ back to top](#table-of-contents)
## Changelog
This library adheres to a [semantic versioning](https://semver.org/) scheme.
See [CHANGELOG.md](https://github.com/tilmann-bartsch/rportion/blob/master/CHANGELOG.md) for the list of changes.
## Contributions
Contributions are very welcome! Feel free to report bugs or suggest new features using GitHub issues and/or pull requests.
## License
Distributed under [MIT License](https://github.com/tilmann-bartsch/rportion/blob/master/LICENSE). | /rportion-0.1.0.tar.gz/rportion-0.1.0/README.md | 0.888517 | 0.992192 | README.md | pypi |
from .RpqLua import RpqLua
class RpqQueue:
def __init__(self, redis, queueName):
"""
Registers RpqLua queue from a Redis connection
"""
# RpqLua instance
self.queue = RpqLua(redis)
# Set queue name
self.setqueueName(queueName)
def eval(self, args):
return self.queue.eval(*args)
def bytesDecode(self, item):
"""
Decode Bytes to utf-8 for Python 3 compatibility
"""
return item.decode("utf-8")
def setqueueName(self, queueName):
"""
Define loaded queue name at class level
"""
self.queueName = queueName
def push(self, item, priority=None):
"""
Push an item
"""
# Build arg list
args = ['push', self.queueName, item]
if priority is not None:
args.append(priority)
return self.eval(args)
def pop(self, orderBy='desc'):
"""
alias for popOne()
"""
return self.popOne(orderBy)
def popOne(self, orderBy='desc'):
"""
Pop an item
"""
item = self.popMany(orderBy, 1)
if item: # There is an item
return item[0]
else:
return None
def popMany(self, orderBy='desc', numberOfItems=1):
"""
Pop many items
"""
items = self.eval(['pop', self.queueName, orderBy, numberOfItems])
if items:
# Decode all items from bytes to utf-8
items = tuple(map(self.bytesDecode, items))
return items
else:
return None
def peek(self, orderBy='desc'):
"""
alias for peekOne()
"""
return self.peekOne(orderBy)
def peekOne(self, orderBy='desc'):
"""
Peek an item
"""
item = self.peekMany(orderBy, 1)
if item: # There is an item
return item[0]
else:
return None
def peekMany(self, orderBy='desc', numberOfItems=1):
"""
Peek many items
"""
items = self.eval(['peek', self.queueName, orderBy, numberOfItems])
if items:
# Decode all items from bytes to utf-8
items = tuple(map(self.bytesDecode, items))
return items
else:
return None
def count(self, priorityMin=None, priorityMax=None):
"""
Get queue size
"""
# Build arg list
args = ['size', self.queueName]
if priorityMin is not None:
args.append(priorityMin)
if priorityMax is not None:
args.append(priorityMax)
return self.eval(args) | /rpq-2.2.tar.gz/rpq-2.2/clients/python/lib/RpqQueue.py | 0.674694 | 0.176707 | RpqQueue.py | pypi |
import hashlib
class RpqLua:
def __init__(self, redisConnection):
'''Sets Redis connection, load LUA from path'''
# Set Redis connection
self.setRedisConnection(redisConnection)
# Load LUA source
self.loadSource(self.getLuaPath())
# Register LUA script
self.register()
def getLuaPath(self):
"""
Returns the LUA path in the filesystem
"""
import rpq_src
import pkg_resources
return pkg_resources.resource_filename('rpq_src', 'redis-priority-queue.lua')
def file_get_contents(self, filename):
"""
Get a file content
"""
with open(filename) as f:
return f.read()
def setRedisConnection(self, connection):
"""
Set a Redis connection
"""
self.connection = connection
return True
def loadSource(self, path):
"""
Load LUA script source code from a file
"""
self.source = self.file_get_contents(path)
return True
def getSha1(self):
"""
Calculate sha1 of the LUA source
"""
d = hashlib.sha1(self.source.encode('utf-8'))
d.digest()
return d.hexdigest()
def exists(self):
"""
Returns `True` if the LUA script is already loaded into Redis
"""
# Check if the script exists
t = self.connection.script_exists(self.sha1)
if t and t[0]:
return True
return False
def load(self):
"""
Load the script into Redis
"""
self.connection.script_load(self.source)
return True
def register(self):
"""
Registers the script
"""
# Set LUA sha1
self.sha1 = self.getSha1()
# Load if needed
if not self.exists():
self.load()
return True
def eval(self, *args):
"""
Call the LUA script with the desired arguments and returns the output
"""
return self.connection.evalsha(self.sha1, 0, *args) | /rpq-2.2.tar.gz/rpq-2.2/clients/python/lib/RpqLua.py | 0.739705 | 0.18939 | RpqLua.py | pypi |
import numpy as np
import typing as ty
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import FloatVector
def symmetry_test(
x: ty.Union[tuple, list, np.ndarray],
test_statistic: str = 'MI',
test_k: int = 1,
_module: str = 'symmetry',
_method: str = 'symmetry_test',
**kw,
) -> dict:
"""Perform symmetry test of dataset x
See https://cran.r-project.org/web/packages/symmetry/symmetry.pdf for more
info.
Args:
x (ty.Union[tuple, list, np.ndarray]): Dataset to test
test_statistic (str, optional): Which method to use (see ref). Defaults to 'MI'.
test_k (int, optional): Required argument, only relevant for some test_statistics (but not
MI, the default). Defaults to 1.
_module (str, optional): Which module to load. Defaults to 'symmetry'.
_method (str, optional): Wich function to call. Defaults to 'symmetry_test'.
Returns:
dict: results dictionary of the test.
"""
test = getattr(get_module(_module), _method)
result = test(
x=FloatVector(x),
stat=test_statistic,
k=test_k,
**kw,
)
return _result_to_dict(result)
def p_symmetry(x: np.ndarray, **kw) -> float:
"""Get the p-value of the symmetry test, see symmetry_test"""
return float(symmetry_test(x, **kw)['p.value'])
def get_module(name: str = 'symmetry'):
if not rpackages.isinstalled(name):
if name == 'symmetry':
_install_package_on_the_fly(name)
else:
# I'm not doing this for any package for safety reasons
raise ModuleNotFoundError(
f'{name} is not installed in R, you can use '
f'_install_package_on_the_fly(\'{name}\')"'
)
return rpackages.importr(name)
def _install_package_on_the_fly(package: str) -> None:
from rpy2.robjects.vectors import StrVector
from rpy2.rinterface_lib.embedded import RRuntimeError
utils = rpackages.importr('utils')
packnames = (package,)
utils.chooseCRANmirror(ind=1)
try:
utils.install_packages(StrVector(packnames))
except RRuntimeError as e:
raise RuntimeError(
f'Cannot install {package} on the fly, please make sure that R is properly installed'
) from e
def _float_or_str(x) -> ty.Union[str, float]:
"""Try making x a float, or a string otherwise"""
try:
return float(x)
except ValueError:
pass
return str(x)
def _result_to_dict(res) -> dict:
"""Extract htest results from a rpy2.robjects.vectors.ListVector object"""
res_dict = {}
for i, n in enumerate(res.names):
v = res[i]
if len(v) == 1:
v = _float_or_str(v[0])
res_dict[n] = v
return res_dict | /rpy_symmetry-0.1.0.tar.gz/rpy_symmetry-0.1.0/rpy_symmetry/rpy_symmetry.py | 0.703651 | 0.646125 | rpy_symmetry.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.