repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
pixyz
|
pixyz-main/pixyz/flows/conv.py
|
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import scipy as sp
from .flows import Flow
class ChannelConv(Flow):
"""
Invertible 1 × 1 convolution.
Notes
-----
This is implemented with reference to the following code.
https://github.com/chaiyujin/glow-pytorch/blob/master/glow/modules.py
"""
def __init__(self, in_channels, decomposed=False):
super().__init__(in_channels)
w_shape = [in_channels, in_channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(np.float32)
if not decomposed:
# Sample a random orthogonal matrix:
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
else:
# LU decomposition
np_p, np_l, np_u = sp.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_shape, dtype=np.float32), -1)
eye = np.eye(*w_shape, dtype=np.float32)
self.register_buffer('p', torch.Tensor(np_p.astype(np.float32)))
self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(np.float32)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(np.float32)))
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(np.float32)))
self.u = nn.Parameter(torch.Tensor(np_u.astype(np.float32)))
self.l_mask = torch.Tensor(l_mask)
self.eye = torch.Tensor(eye)
self.w_shape = w_shape
self.decomposed = decomposed
def get_parameters(self, x, inverse):
w_shape = self.w_shape
pixels = np.prod(x.size()[2:])
device = x.device
if not self.decomposed:
logdet_jacobian = torch.slogdet(self.weight.cpu())[1].to(device) * pixels
if not inverse:
weight = self.weight.view(w_shape[0], w_shape[1], 1, 1)
else:
weight = torch.inverse(self.weight.double()).float().view(w_shape[0], w_shape[1], 1, 1)
return weight, logdet_jacobian
else:
self.p = self.p.to(device)
self.sign_s = self.sign_s.to(device)
self.l_mask = self.l_mask.to(device)
self.eye = self.eye.to(device)
l = self.l * self.l_mask + self.eye
u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s))
logdet_jacobian = torch.sum(self.log_s) * pixels
if not inverse:
w = torch.matmul(self.p, torch.matmul(l, u))
else:
l = torch.inverse(l.double()).float()
u = torch.inverse(u.double()).float()
w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
return w.view(w_shape[0], w_shape[1], 1, 1), logdet_jacobian
def forward(self, x, y=None, compute_jacobian=True):
weight, logdet_jacobian = self.get_parameters(x, inverse=False)
z = F.conv2d(x, weight)
if compute_jacobian:
self._logdet_jacobian = logdet_jacobian
return z
def inverse(self, x, y=None):
weight, _ = self.get_parameters(x, inverse=True)
z = F.conv2d(x, weight)
return z
| 3,370
| 36.455556
| 115
|
py
|
pixyz
|
pixyz-main/pixyz/flows/normalizations.py
|
import torch
from torch import nn
import numpy as np
from .flows import Flow
from ..utils import epsilon
class BatchNorm1d(Flow):
"""
A batch normalization with the inverse transformation.
Notes
-----
This is implemented with reference to the following code.
https://github.com/ikostrikov/pytorch-flows/blob/master/flows.py#L205
Examples
--------
>>> x = torch.randn(20, 100)
>>> f = BatchNorm1d(100)
>>> # transformation
>>> z = f(x)
>>> # reconstruction
>>> _x = f.inverse(f(x))
>>> # check this reconstruction
>>> diff = torch.sum(torch.abs(_x-x)).item()
>>> diff < 0.1
True
"""
def __init__(self, in_features, momentum=0.0):
super().__init__(in_features)
self.log_gamma = nn.Parameter(torch.zeros(in_features))
self.beta = nn.Parameter(torch.zeros(in_features))
self.momentum = momentum
self.register_buffer('running_mean', torch.zeros(in_features))
self.register_buffer('running_var', torch.ones(in_features))
def forward(self, x, y=None, compute_jacobian=True):
if self.training:
self.batch_mean = x.mean(0)
self.batch_var = (x - self.batch_mean).pow(2).mean(0) + epsilon()
self.running_mean = self.running_mean * self.momentum
self.running_var = self.running_var * self.momentum
self.running_mean = self.running_mean + (self.batch_mean.data * (1 - self.momentum))
self.running_var = self.running_var + (self.batch_var.data * (1 - self.momentum))
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (x - mean) / var.sqrt()
z = torch.exp(self.log_gamma) * x_hat + self.beta
if compute_jacobian:
self._logdet_jacobian = (self.log_gamma - 0.5 * torch.log(var)).sum(-1)
return z
def inverse(self, z, y=None):
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (z - self.beta) / torch.exp(self.log_gamma)
x = x_hat * var.sqrt() + mean
return x
class BatchNorm2d(BatchNorm1d):
"""
A batch normalization with the inverse transformation.
Notes
-----
This is implemented with reference to the following code.
https://github.com/ikostrikov/pytorch-flows/blob/master/flows.py#L205
Examples
--------
>>> x = torch.randn(20, 100, 35, 45)
>>> f = BatchNorm2d(100)
>>> # transformation
>>> z = f(x)
>>> # reconstruction
>>> _x = f.inverse(f(x))
>>> # check this reconstruction
>>> diff = torch.sum(torch.abs(_x-x)).item()
>>> diff < 0.1
True
"""
def __init__(self, in_features, momentum=0.0):
super().__init__(in_features, momentum)
self.log_gamma = nn.Parameter(self._unsqueeze(self.log_gamma.data))
self.beta = nn.Parameter(self._unsqueeze(self.beta.data))
self.register_buffer('running_mean', self._unsqueeze(self.running_mean))
self.register_buffer('running_var', self._unsqueeze(self.running_var))
def _unsqueeze(self, x):
return x.unsqueeze(1).unsqueeze(2)
class ActNorm2d(Flow):
"""
Activation Normalization
Initialize the bias and scale with a given minibatch,
so that the output per-channel have zero mean and unit variance for that.
After initialization, `bias` and `logs` will be trained as parameters.
Notes
-----
This is implemented with reference to the following code.
https://github.com/chaiyujin/glow-pytorch/blob/master/glow/modules.py
"""
def __init__(self, in_features, scale=1.):
super().__init__(in_features)
# register mean and scale
size = [1, in_features, 1, 1]
self.register_parameter("bias", nn.Parameter(torch.zeros(*size)))
self.register_parameter("logs", nn.Parameter(torch.zeros(*size)))
self.scale = float(scale)
self.inited = False
def initialize_parameters(self, x):
if not self.training:
return
assert x.device == self.bias.device
with torch.no_grad():
bias = torch.mean(x.clone(), dim=[0, 2, 3], keepdim=True) * -1.0
vars = torch.mean((x.clone() + bias) ** 2, dim=[0, 2, 3], keepdim=True)
logs = torch.log(self.scale / (torch.sqrt(vars) + epsilon()))
self.bias.data.copy_(bias.data)
self.logs.data.copy_(logs.data)
self.inited = True
def _center(self, x, inverse=False):
if not inverse:
return x + self.bias
else:
return x - self.bias
def _scale(self, x, compute_jacobian=True, inverse=False):
logs = self.logs
if not inverse:
x = x * torch.exp(logs)
else:
x = x * torch.exp(-logs)
if compute_jacobian:
"""
logs is log_std of `mean of channels`
so we need to multiply pixels
"""
pixels = np.prod(x.size()[2:])
logdet_jacobian = torch.sum(logs) * pixels
return x, logdet_jacobian
return x, None
def forward(self, x, y=None, compute_jacobian=True):
if not self.inited:
self.initialize_parameters(x)
# center and scale
x = self._center(x, inverse=False)
x, logdet_jacobian = self._scale(x, compute_jacobian, inverse=False)
if compute_jacobian:
self._logdet_jacobian = logdet_jacobian
return x
def inverse(self, x, y=None):
if not self.inited:
self.initialize_parameters(x)
# scale and center
x, _ = self._scale(x, compute_jacobian=False, inverse=True)
x = self._center(x, inverse=True)
return x
| 5,961
| 29.731959
| 96
|
py
|
pixyz
|
pixyz-main/pixyz/flows/coupling.py
|
import torch
import numpy as np
from .flows import Flow
class AffineCoupling(Flow):
r"""
Affine coupling layer
.. math::
:nowrap:
\begin{eqnarray*}
\mathbf{y}_{1:d} &=& \mathbf{x}_{1:d} \\
\mathbf{y}_{d+1:D} &=& \mathbf{x}_{d+1:D} \odot \exp(s(\mathbf{x}_{1:d})+t(\mathbf{x}_{1:d}))
\end{eqnarray*}
"""
def __init__(self, in_features, mask_type="channel_wise",
scale_net=None, translate_net=None, scale_translate_net=None,
inverse_mask=False):
super().__init__(in_features)
# mask initializations
if mask_type in ["checkerboard", "channel_wise"]:
self.mask_type = mask_type
else:
raise ValueError
self.inverse_mask = inverse_mask
self.scale_net = None
self.translate_net = None
self.scale_translate_net = None
if scale_net and translate_net:
self.scale_net = scale_net
self.translate_net = translate_net
elif scale_translate_net:
self.scale_translate_net = scale_translate_net
else:
raise ValueError
def build_mask(self, x):
"""
Parameters
----------
x : torch.Tensor
Returns
-------
mask : torch.tensor
Examples
--------
>>> scale_translate_net = lambda x: (x, x)
>>> f1 = AffineCoupling(4, mask_type="channel_wise", scale_translate_net=scale_translate_net,
... inverse_mask=False)
>>> x1 = torch.randn([1,4,3,3])
>>> f1.build_mask(x1)
tensor([[[[1.]],
<BLANKLINE>
[[1.]],
<BLANKLINE>
[[0.]],
<BLANKLINE>
[[0.]]]])
>>> f2 = AffineCoupling(2, mask_type="checkerboard", scale_translate_net=scale_translate_net,
... inverse_mask=True)
>>> x2 = torch.randn([1,2,5,5])
>>> f2.build_mask(x2)
tensor([[[[0., 1., 0., 1., 0.],
[1., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.]]]])
"""
if x.dim() == 4:
[_, channels, height, width] = x.shape
if self.mask_type == "checkerboard":
mask = checkerboard_mask(height, width, self.inverse_mask)
return torch.from_numpy(mask).view(1, 1, height, width).to(x.device)
else:
mask = channel_wise_mask(channels, self.inverse_mask)
return torch.from_numpy(mask).view(1, channels, 1, 1).to(x.device)
elif x.dim() == 2:
[_, n_features] = x.shape
if self.mask_type != "checkerboard":
mask = channel_wise_mask(n_features, self.inverse_mask)
return torch.from_numpy(mask).view(1, n_features).to(x.device)
raise ValueError
def get_parameters(self, x, y=None):
r"""
Parameters
----------
x : torch.tensor
y : torch.tensor
Returns
-------
s : torch.tensor
t : torch.tensor
Examples
--------
>>> # In case of using scale_translate_net
>>> scale_translate_net = lambda x: (x, x)
>>> f1 = AffineCoupling(4, mask_type="channel_wise", scale_translate_net=scale_translate_net,
... inverse_mask=False)
>>> x1 = torch.randn([1,4,3,3])
>>> log_s, t = f1.get_parameters(x1)
>>> # In case of using scale_net and translate_net
>>> scale_net = lambda x: x
>>> translate_net = lambda x: x
>>> f2 = AffineCoupling(4, mask_type="channel_wise", scale_net=scale_net, translate_net=translate_net,
... inverse_mask=False)
>>> x2 = torch.randn([1,4,3,3])
>>> log_s, t = f2.get_parameters(x2)
"""
if self.scale_translate_net:
if y is None:
log_s, t = self.scale_translate_net(x)
else:
log_s, t = self.scale_translate_net(x, y)
else:
if y is None:
log_s = self.scale_net(x)
t = self.translate_net(x)
else:
log_s = self.scale_net(x, y)
t = self.translate_net(x, y)
return log_s, t
def forward(self, x, y=None, compute_jacobian=True):
mask = self.build_mask(x)
x_masked = mask * x
x_inv_masked = (1 - mask) * x
log_s, t = self.get_parameters(x_masked, y)
log_s = log_s * (1 - mask)
t = t * (1 - mask)
x = x_masked + x_inv_masked * torch.exp(log_s) + t
if compute_jacobian:
self._logdet_jacobian = log_s.contiguous().view(log_s.size(0), -1).sum(-1)
return x
def inverse(self, z, y=None):
mask = self.build_mask(z)
z_masked = mask * z
z_inv_masked = (1 - mask) * z
log_s, t = self.get_parameters(z_masked, y)
log_s = log_s * (1 - mask)
t = t * (1 - mask)
z = z_masked + (z_inv_masked - t) * torch.exp(-log_s)
return z
def extra_repr(self):
return 'in_features={}, mask_type={}, inverse_mask={}'.format(
self.in_features, self.mask_type, self.inverse_mask
)
def checkerboard_mask(height, width, inverse_mask=False):
r"""
Parameters
----------
height : int
width : int
inverse_mask : bool
Returns
-------
mask : np.array
Examples
--------
>>> checkerboard_mask(5, 4, False)
array([[1., 0., 1., 0.],
[0., 1., 0., 1.],
[1., 0., 1., 0.],
[0., 1., 0., 1.],
[1., 0., 1., 0.]], dtype=float32)
>>> checkerboard_mask(5, 4, True)
array([[0., 1., 0., 1.],
[1., 0., 1., 0.],
[0., 1., 0., 1.],
[1., 0., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)
"""
mask = np.arange(height).reshape(-1, 1) + np.arange(width)
mask = np.mod((inverse_mask is False) + mask, 2)
return mask.astype(np.float32)
def channel_wise_mask(channels, inverse_mask=False):
r"""
Parameters
----------
channels : int
inverse_mask : bool
Returns
-------
mask : np.array
Examples
--------
>>> channel_wise_mask(6, False)
array([1., 1., 1., 0., 0., 0.], dtype=float32)
>>> channel_wise_mask(6, True)
array([0., 0., 0., 1., 1., 1.], dtype=float32)
"""
mask = np.zeros(channels).astype(np.float32)
if inverse_mask:
mask[channels // 2:] = 1
else:
mask[:channels // 2] = 1
return mask
| 6,754
| 27.263598
| 110
|
py
|
pixyz
|
pixyz-main/pixyz/flows/normalizing_flows.py
|
import math
import torch
from torch import nn
from torch.nn import functional as F
from ..utils import epsilon
from .flows import Flow
class PlanarFlow(Flow):
r"""
Planar flow.
.. math::
f(\mathbf{x}) = \mathbf{x} + \mathbf{u} h( \mathbf{w}^T \mathbf{x} + \mathbf{b})
"""
def __init__(self, in_features, constraint_u=False):
super().__init__(in_features)
self.w = nn.Parameter(torch.Tensor(1, in_features))
self.b = nn.Parameter(torch.Tensor(1))
self.u = nn.Parameter(torch.Tensor(1, in_features))
self.reset_parameters()
self.constraint_u = constraint_u
def deriv_tanh(self, x):
return 1 - torch.tanh(x) ** 2
def reset_parameters(self):
std = 1. / math.sqrt(self.w.size(1))
self.w.data.uniform_(-std, std)
self.b.data.uniform_(-std, std)
self.u.data.uniform_(-std, std)
def forward(self, x, y=None, compute_jacobian=True):
if self.constraint_u:
# modify :attr:`u` so that this flow can be invertible.
wu = torch.mm(self.w, self.u.t()) # (1, 1)
m_wu = -1. + F.softplus(wu)
w_normalized = self.w / torch.norm(self.w, keepdim=True)
u_hat = self.u + ((m_wu - wu) * w_normalized) # (1, in_features)
else:
u_hat = self.u
# compute the flow transformation
linear_output = F.linear(x, self.w, self.b) # (n_batch, 1)
z = x + u_hat * torch.tanh(linear_output)
if compute_jacobian:
# compute the log-det Jacobian (logdet|dz/dx|)
psi = self.deriv_tanh(linear_output) * self.w # (n_batch, in_features)
det_jacobian = 1. + torch.mm(psi, u_hat.t()).squeeze() # (n_batch, 1) -> (n_batch)
logdet_jacobian = torch.log(torch.abs(det_jacobian) + epsilon())
self._logdet_jacobian = logdet_jacobian
return z
def inverse(self, z, y=None):
raise NotImplementedError()
def extra_repr(self):
return 'in_features={}, constraint_u={}'.format(
self.in_features, self.constraint_u
)
| 2,136
| 29.971014
| 95
|
py
|
pixyz
|
pixyz-main/pixyz/flows/flows.py
|
from torch import nn
class Flow(nn.Module):
"""Flow class. In Pixyz, all flows are required to inherit this class."""
def __init__(self, in_features):
"""
Parameters
----------
in_features : int
Size of input data.
"""
super().__init__()
self._in_features = in_features
self._logdet_jacobian = None
@property
def in_features(self):
return self._in_features
def forward(self, x, y=None, compute_jacobian=True):
"""
Forward propagation of flow layers.
Parameters
----------
x : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
compute_jacobian : bool, defaults to True
Whether to calculate and store log-determinant Jacobian.
If true, calculated Jacobian values are stored in :attr:`logdet_jacobian`.
Returns
-------
z : torch.Tensor
"""
z = x
return z
def inverse(self, z, y=None):
"""
Backward (inverse) propagation of flow layers.
In this method, log-determinant Jacobian is not calculated.
Parameters
----------
z : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
Returns
-------
x : torch.Tensor
"""
x = z
return x
@property
def logdet_jacobian(self):
"""
Get log-determinant Jacobian.
Before calling this, you should run :attr:`forward` or :attr:`update_jacobian` methods to calculate and
store log-determinant Jacobian.
"""
return self._logdet_jacobian
class FlowList(Flow):
def __init__(self, flow_list):
"""
Hold flow modules in a list.
Once initializing, it can be handled as a single flow module.
Notes
-----
Indexing is not supported for now.
Parameters
----------
flow_list : list
"""
super().__init__(flow_list[0].in_features)
self.flow_list = nn.ModuleList(flow_list)
def forward(self, x, y=None, compute_jacobian=True):
logdet_jacobian = 0
for flow in self.flow_list:
x = flow.forward(x, y, compute_jacobian)
if compute_jacobian:
logdet_jacobian = logdet_jacobian + flow.logdet_jacobian
if compute_jacobian:
self._logdet_jacobian = logdet_jacobian
return x
def inverse(self, z, y=None):
for flow in self.flow_list[::-1]:
z = flow.inverse(z, y)
return z
def __repr__(self):
# rename "ModuleList" to "FlowList"
flow_list_repr = self.flow_list.__repr__().replace("ModuleList", "FlowList")
return flow_list_repr
| 2,914
| 23.291667
| 111
|
py
|
pixyz
|
pixyz-main/pixyz/flows/operations.py
|
import torch
import torch.nn.functional as F
import numpy as np
from .flows import Flow
from ..utils import sum_samples
class Squeeze(Flow):
"""
Squeeze operation.
c * s * s -> 4c * s/2 * s/2
Examples
--------
>>> import torch
>>> a = torch.tensor([i+1 for i in range(16)]).view(1,1,4,4)
>>> print(a)
tensor([[[[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]]]])
>>> f = Squeeze()
>>> print(f(a))
tensor([[[[ 1, 3],
[ 9, 11]],
<BLANKLINE>
[[ 2, 4],
[10, 12]],
<BLANKLINE>
[[ 5, 7],
[13, 15]],
<BLANKLINE>
[[ 6, 8],
[14, 16]]]])
>>> print(f.inverse(f(a)))
tensor([[[[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]]]])
"""
def __init__(self):
super().__init__(None)
self._logdet_jacobian = 0
def forward(self, x, y=None, compute_jacobian=True):
[_, channels, height, width] = x.shape
if height % 2 != 0 or width % 2 != 0:
raise ValueError
x = x.permute(0, 2, 3, 1)
x = x.view(-1, height // 2, 2, width // 2, 2, channels)
x = x.permute(0, 1, 3, 5, 2, 4)
x = x.contiguous().view(-1, height // 2, width // 2, channels * 4)
z = x.permute(0, 3, 1, 2)
return z
def inverse(self, z, y=None):
[_, channels, height, width] = z.shape
if channels % 4 != 0:
raise ValueError
z = z.permute(0, 2, 3, 1)
z = z.view(-1, height, width, channels // 4, 2, 2)
z = z.permute(0, 1, 4, 2, 5, 3)
z = z.contiguous().view(-1, 2 * height, 2 * width, channels // 4)
x = z.permute(0, 3, 1, 2)
return x
class Unsqueeze(Squeeze):
"""
Unsqueeze operation.
c * s * s -> c/4 * 2s * 2s
Examples
--------
>>> import torch
>>> a = torch.tensor([i+1 for i in range(16)]).view(1,4,2,2)
>>> print(a)
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]],
<BLANKLINE>
[[13, 14],
[15, 16]]]])
>>> f = Unsqueeze()
>>> print(f(a))
tensor([[[[ 1, 5, 2, 6],
[ 9, 13, 10, 14],
[ 3, 7, 4, 8],
[11, 15, 12, 16]]]])
>>> print(f.inverse(f(a)))
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]],
<BLANKLINE>
[[13, 14],
[15, 16]]]])
"""
def forward(self, x, y=None, compute_jacobian=True):
return super().inverse(x)
def inverse(self, z, y=None):
return super().forward(z)
class Permutation(Flow):
"""
Examples
--------
>>> import torch
>>> a = torch.tensor([i+1 for i in range(16)]).view(1,4,2,2)
>>> print(a)
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]],
<BLANKLINE>
[[13, 14],
[15, 16]]]])
>>> perm = [0,3,1,2]
>>> f = Permutation(perm)
>>> f(a)
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[13, 14],
[15, 16]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]]]])
>>> f.inverse(f(a))
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]],
<BLANKLINE>
[[13, 14],
[15, 16]]]])
"""
def __init__(self, permute_indices):
super().__init__(len(permute_indices))
self.permute_indices = permute_indices
self.inv_permute_indices = np.argsort(self.permute_indices)
self._logdet_jacobian = 0
def forward(self, x, y=None, compute_jacobian=True):
if x.dim() == 2:
return x[:, self.permute_indices]
elif x.dim() == 4:
return x[:, self.permute_indices, :, :]
raise ValueError
def inverse(self, z, y=None):
if z.dim() == 2:
return z[:, self.inv_permute_indices]
elif z.dim() == 4:
return z[:, self.inv_permute_indices, :, :]
raise ValueError
class Shuffle(Permutation):
def __init__(self, in_features):
permute_indices = np.random.permutation(in_features)
super().__init__(permute_indices)
class Reverse(Permutation):
def __init__(self, in_features):
permute_indices = np.array(np.arange(0, in_features)[::-1])
super().__init__(permute_indices)
class Flatten(Flow):
def __init__(self, in_size=None):
super().__init__(None)
self.in_size = in_size
self._logdet_jacobian = 0
def forward(self, x, y=None, compute_jacobian=True):
self.in_size = x.shape[1:]
return x.view(x.size(0), -1)
def inverse(self, z, y=None):
if self.in_size is None:
raise ValueError
return z.view(z.size(0), self.in_size[0], self.in_size[1], self.in_size[2])
class Preprocess(Flow):
def __init__(self):
super().__init__(None)
self.register_buffer('data_constraint', torch.tensor([0.05], dtype=torch.float32))
@staticmethod
def logit(x):
return x.log() - (1. - x).log()
def forward(self, x, y=None, compute_jacobian=True):
# 1. transform the domain of x from [0, 1] to [0, 255]
x = x * 255
# 2-1. add noise to pixels to dequantize them and transform its domain ([0, 255]->[0, 1]).
x = (x + torch.rand_like(x)) / 256.
# 2-2. transform pixel values with logit to be unconstrained ([0, 1]->(0, 1)).
x = (1 + (2 * x - 1) * (1 - self.data_constraint)) / 2.
# 2-3. apply the logit function ((0, 1)->(-inf, inf)).
z = self.logit(x)
if compute_jacobian:
# log-det Jacobian of transformation
logdet_jacobian = F.softplus(z) + F.softplus(-z) \
- F.softplus(self.data_constraint.log() - (1. - self.data_constraint).log())
logdet_jacobian = sum_samples(logdet_jacobian)
self._logdet_jacobian = logdet_jacobian
return z
def inverse(self, z, y=None):
# transform the domain of z from (-inf, inf) to (0, 1).
return torch.sigmoid(z)
| 6,742
| 24.541667
| 98
|
py
|
pixyz
|
pixyz-main/pixyz/models/vi.py
|
from torch import optim
from ..models.model import Model
from ..utils import tolist
from ..losses import ELBO
class VI(Model):
"""
Variational Inference (Amortized inference)
The ELBO for given distributions (p, approximate_dist) is set as the loss class of this model.
"""
def __init__(self, p, approximate_dist,
other_distributions=[],
optimizer=optim.Adam,
optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None):
"""
Parameters
----------
p : torch.distributions.Distribution
Generative model (distribution).
approximate_dist : torch.distributions.Distribution
Approximate posterior distribution.
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [p, approximate_dist] + tolist(other_distributions)
# set losses
elbo = ELBO(p, approximate_dist)
loss = -elbo.mean()
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, **kwargs):
return super().train(train_x_dict, **kwargs)
def test(self, test_x_dict={}, **kwargs):
return super().test(test_x_dict, **kwargs)
| 1,794
| 31.636364
| 98
|
py
|
pixyz
|
pixyz-main/pixyz/models/model.py
|
from torch import optim, nn
import torch
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
import re
from ..utils import tolist
from ..distributions.distributions import Distribution
class Model(object):
"""
This class is for training and testing a loss class.
It requires a defined loss class, distributions to train, and optimizer for initialization.
Examples
--------
>>> import torch
>>> from torch import optim
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Bernoulli, Normal
>>> from pixyz.losses import KullbackLeibler
...
>>> # Set distributions (Distribution API)
>>> class Inference(Normal):
... def __init__(self):
... super().__init__(var=["z"],cond_var=["x"],name="q")
... self.model_loc = torch.nn.Linear(128, 64)
... self.model_scale = torch.nn.Linear(128, 64)
... def forward(self, x):
... return {"loc": self.model_loc(x), "scale": F.softplus(self.model_scale(x))}
...
>>> class Generator(Bernoulli):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["z"],name="p")
... self.model = torch.nn.Linear(64, 128)
... def forward(self, z):
... return {"probs": torch.sigmoid(self.model(z))}
...
>>> p = Generator()
>>> q = Inference()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[64], name="p_{prior}")
...
>>> # Define a loss function (Loss API)
>>> reconst = -p.log_prob().expectation(q)
>>> kl = KullbackLeibler(q,prior)
>>> loss_cls = (reconst - kl).mean()
>>> print(loss_cls)
mean \\left(- D_{KL} \\left[q(z|x)||p_{prior}(z) \\right] - \\mathbb{E}_{q(z|x)} \\left[\\log p(x|z) \\right] \\right)
>>>
>>> # Set a model (Model API)
>>> model = Model(loss=loss_cls, distributions=[p, q],
... optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
>>> # Train and test the model
>>> data = torch.randn(1, 128) # Pseudo data
>>> train_loss = model.train({"x": data})
>>> test_loss = model.test({"x": data})
"""
def __init__(self, loss,
test_loss=None,
distributions=[],
optimizer=optim.Adam,
optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None,
retain_graph=False):
"""
Parameters
----------
loss : pixyz.losses.Loss
Loss class for training.
test_loss : pixyz.losses.Loss
Loss class for testing.
distributions : list
List of :class:`pixyz.distributions.Distribution`.
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set losses
self.loss_cls = None
self.test_loss_cls = None
self.set_loss(loss, test_loss)
# set distributions (for training)
self.distributions = nn.ModuleList(tolist(distributions))
# set params and optim
params = self.distributions.parameters()
self.optimizer = optimizer(params, **optimizer_params)
self.clip_norm = clip_grad_norm
self.clip_value = clip_grad_value
self.retain_graph = retain_graph
def __str__(self):
prob_text = []
func_text = []
for prob in self.distributions._modules.values():
if isinstance(prob, Distribution):
prob_text.append(prob.prob_text)
else:
func_text.append(prob.__str__())
text = "Distributions (for training):\n {}\n".format(", ".join(prob_text))
if len(func_text) > 0:
text += "Deterministic functions (for training):\n {}\n".format(", ".join(func_text))
text += "Loss function:\n {}\n".format(str(self.loss_cls))
optimizer_text = re.sub('^', ' ' * 2, str(self.optimizer), flags=re.MULTILINE)
text += "Optimizer:\n{}".format(optimizer_text)
return text
def set_loss(self, loss, test_loss=None):
self.loss_cls = loss
if test_loss:
self.test_loss_cls = test_loss
else:
self.test_loss_cls = loss
def train(self, train_x_dict={}, **kwargs):
"""Train the model.
Parameters
----------
train_x_dict : dict
Input data.
**kwargs
Returns
-------
loss : torch.Tensor
Train loss value
"""
self.distributions.train()
self.optimizer.zero_grad()
loss = self.loss_cls.eval(train_x_dict, **kwargs)
# backprop
loss.backward(retain_graph=self.retain_graph)
if self.clip_norm:
clip_grad_norm_(self.distributions.parameters(), self.clip_norm)
if self.clip_value:
clip_grad_value_(self.distributions.parameters(), self.clip_value)
# update params
self.optimizer.step()
return loss
def test(self, test_x_dict={}, **kwargs):
"""Test the model.
Parameters
----------
test_x_dict : dict
Input data
**kwargs
Returns
-------
loss : torch.Tensor
Test loss value
"""
self.distributions.eval()
with torch.no_grad():
loss = self.test_loss_cls.eval(test_x_dict, **kwargs)
return loss
def save(self, path):
"""Save the model. The only parameters that are saved are those that are included in the distribution.
Parameters such as device, optimizer, placement of clip_grad, etc. are not saved.
Parameters
----------
path : str
Target file path
"""
torch.save({
'distributions': self.distributions.state_dict(),
}, path)
def load(self, path):
"""Load the model.
Parameters
----------
path : str
Target file path
"""
checkpoint = torch.load(path)
self.distributions.load_state_dict(checkpoint['distributions'])
| 6,465
| 29.790476
| 122
|
py
|
pixyz
|
pixyz-main/pixyz/models/vae.py
|
from torch import optim
from ..models.model import Model
from ..utils import tolist
class VAE(Model):
"""
Variational Autoencoder.
In VAE class, reconstruction loss on given distributions (encoder and decoder) is set as the default loss class.
However, if you want to add additional terms, e.g., the KL divergence between encoder and prior,
you need to set them to the `regularizer` argument, which defaults to None.
References
----------
[Kingma+ 2013] Auto-Encoding Variational Bayes
"""
def __init__(self, encoder, decoder,
other_distributions=[],
regularizer=None,
optimizer=optim.Adam,
optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None):
"""
Parameters
----------
encoder : torch.distributions.Distribution
Encoder distribution.
decoder : torch.distributions.Distribution
Decoder distribution.
regularizer : torch.losses.Loss, defaults to None
If you want to add additional terms to the loss, set them to this argument.
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [encoder, decoder] + tolist(other_distributions)
# set losses
reconstruction = -decoder.log_prob().expectation(encoder)
loss = (reconstruction + regularizer).mean()
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, **kwargs):
return super().train(train_x_dict, **kwargs)
def test(self, test_x_dict={}, **kwargs):
return super().test(test_x_dict, **kwargs)
| 2,227
| 34.935484
| 116
|
py
|
pixyz
|
pixyz-main/pixyz/models/gan.py
|
from torch import optim
from ..models.model import Model
from ..losses import AdversarialJensenShannon
from ..distributions import EmpiricalDistribution
class GAN(Model):
r"""
Generative Adversarial Network
(Adversarial) Jensen-Shannon divergence between given distributions (p_data, p)
is set as the loss class of this model.
Examples
--------
>>> import torch
>>> from torch import nn, optim
>>> from pixyz.distributions import Deterministic
>>> from pixyz.distributions import Normal
>>> from pixyz.models import GAN
>>> from pixyz.utils import print_latex
>>> x_dim = 128
>>> z_dim = 100
...
>>> # Set distributions (Distribution API)
...
>>> # generator model p(x|z)
>>> class Generator(Deterministic):
... def __init__(self):
... super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p")
... self.model = nn.Sequential(
... nn.Linear(z_dim, x_dim),
... nn.Sigmoid()
... )
... def forward(self, z):
... x = self.model(z)
... return {"x": x}
...
>>> # prior model p(z)
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[z_dim], name="p_{prior}")
...
>>> # generative model
>>> p_g = Generator()
>>> p = (p_g*prior).marginalize_var("z")
...
>>> # discriminator model p(t|x)
>>> class Discriminator(Deterministic):
... def __init__(self):
... super(Discriminator, self).__init__(var=["t"], cond_var=["x"], name="d")
... self.model = nn.Sequential(
... nn.Linear(x_dim, 1),
... nn.Sigmoid()
... )
... def forward(self, x):
... t = self.model(x)
... return {"t": t}
...
>>> d = Discriminator()
>>> # Set a model (Model API)
>>> model = GAN(p, d, optimizer_params={"lr":0.0002}, d_optimizer_params={"lr":0.0002})
>>> print(model)
Distributions (for training):
p(x)
Loss function:
mean(D_{JS}^{Adv} \left[p_{data}(x)||p(x) \right])
Optimizer:
Adam (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
eps: 1e-08
lr: 0.0002
weight_decay: 0
)
>>> # Train and test the model
>>> data = torch.randn(1, x_dim) # Pseudo data
>>> train_loss = model.train({"x": data})
>>> test_loss = model.test({"x": data})
"""
def __init__(self, p, discriminator,
optimizer=optim.Adam,
optimizer_params={},
d_optimizer=optim.Adam,
d_optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None):
"""
Parameters
----------
p : torch.distributions.Distribution
Generative model (generator).
discriminator : torch.distributions.Distribution
Critic (discriminator).
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [p]
p_data = EmpiricalDistribution(p.var)
# set losses
loss = AdversarialJensenShannon(p_data, p, discriminator, optimizer=d_optimizer,
optimizer_params=d_optimizer_params)
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, adversarial_loss=True, **kwargs):
"""Train the model.
Parameters
----------
train_x_dict : dict, defaults to {}
Input data.
adversarial_loss : bool, defaults to True
Whether to train the discriminator.
**kwargs
Returns
-------
loss : torch.Tensor
Train loss value.
d_loss : torch.Tensor
Train loss value of the discriminator (if :attr:`adversarial_loss` is True).
"""
if adversarial_loss:
d_loss = self.loss_cls.loss_train(train_x_dict, **kwargs)
loss = super().train(train_x_dict, **kwargs)
if adversarial_loss:
return loss, d_loss
return loss
def test(self, test_x_dict={}, adversarial_loss=True, **kwargs):
"""Train the model.
Parameters
----------
test_x_dict : dict, defaults to {}
Input data.
adversarial_loss : bool, defaults to True
Whether to return the discriminator loss.
**kwargs
Returns
-------
loss : torch.Tensor
Test loss value.
d_loss : torch.Tensor
Test loss value of the discriminator (if :attr:`adversarial_loss` is True).
"""
loss = super().test(test_x_dict, **kwargs)
if adversarial_loss:
d_loss = self.loss_cls.loss_test(test_x_dict, **kwargs)
return loss, d_loss
return loss
| 5,491
| 30.745665
| 91
|
py
|
pixyz
|
pixyz-main/pixyz/models/__init__.py
|
from .model import Model
from .vae import VAE
from .vi import VI
from .ml import ML
from .gan import GAN
__all__ = [
'Model',
'ML',
'VAE',
'VI',
'GAN',
]
| 175
| 11.571429
| 24
|
py
|
pixyz
|
pixyz-main/pixyz/models/ml.py
|
from torch import optim
from ..models.model import Model
from ..utils import tolist
class ML(Model):
"""
Maximum Likelihood (log-likelihood)
The negative log-likelihood of a given distribution (p) is set as the loss class of this model.
"""
def __init__(self, p,
other_distributions=[],
optimizer=optim.Adam,
optimizer_params={},
clip_grad_norm=False,
clip_grad_value=False):
"""
Parameters
----------
p : torch.distributions.Distribution
Classifier (distribution).
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [p] + tolist(other_distributions)
# set losses
self.nll = -p.log_prob(sum_features=True)
loss = self.nll.mean()
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, **kwargs):
return super().train(train_x_dict, **kwargs)
def test(self, test_x_dict={}, **kwargs):
return super().test(test_x_dict, **kwargs)
| 1,624
| 30.862745
| 99
|
py
|
pixyz
|
pixyz-main/pixyz/layers/resnet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .norm_util import WNConv2d
class ResidualBlock(nn.Module):
"""ResNet basic block with weight norm."""
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_norm = nn.BatchNorm2d(in_channels)
self.in_conv = WNConv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False)
self.out_norm = nn.BatchNorm2d(out_channels)
self.out_conv = WNConv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=True)
def forward(self, x):
skip = x
x = self.in_norm(x)
x = F.relu(x)
x = self.in_conv(x)
x = self.out_norm(x)
x = F.relu(x)
x = self.out_conv(x)
x = x + skip
return x
class ResNet(nn.Module):
"""ResNet for scale and translate factors in Real NVP.
Args:
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the intermediate layers.
out_channels (int): Number of channels in the output.
num_blocks (int): Number of residual blocks in the network.
kernel_size (int): Side length of each filter in convolutional layers.
padding (int): Padding for convolutional layers.
double_after_norm (bool): Double input after input BatchNorm.
"""
def __init__(self, in_channels, mid_channels, out_channels,
num_blocks, kernel_size, padding, double_after_norm):
super().__init__()
self.in_norm = nn.BatchNorm2d(in_channels)
self.double_after_norm = double_after_norm
self.in_conv = WNConv2d(2 * in_channels, mid_channels, kernel_size, padding, bias=True)
self.in_skip = WNConv2d(mid_channels, mid_channels, kernel_size=1, padding=0, bias=True)
self.blocks = nn.ModuleList([ResidualBlock(mid_channels, mid_channels)
for _ in range(num_blocks)])
self.skips = nn.ModuleList([WNConv2d(mid_channels, mid_channels, kernel_size=1, padding=0, bias=True)
for _ in range(num_blocks)])
self.out_norm = nn.BatchNorm2d(mid_channels)
self.out_conv = WNConv2d(mid_channels, out_channels, kernel_size=1, padding=0, bias=True)
def forward(self, x):
x = self.in_norm(x)
if self.double_after_norm:
x *= 2.
x = torch.cat((x, -x), dim=1)
x = F.relu(x)
x = self.in_conv(x)
x_skip = self.in_skip(x)
for block, skip in zip(self.blocks, self.skips):
x = block(x)
x_skip += skip(x)
x = self.out_norm(x_skip)
x = F.relu(x)
x = self.out_conv(x)
return x
| 2,757
| 33.475
| 109
|
py
|
pixyz
|
pixyz-main/pixyz/layers/norm_util.py
|
import torch.nn as nn
class WNConv2d(nn.Module):
"""Weight-normalized 2d convolution.
Args:
in_channels (int): Number of channels in the input.
out_channels (int): Number of channels in the output.
kernel_size (int): Side length of each convolutional kernel.
padding (int): Padding to add on edges of input.
bias (bool): Use bias in the convolution operation.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding, bias=True):
super(WNConv2d, self).__init__()
self.conv = nn.utils.weight_norm(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias))
def forward(self, x):
x = self.conv(x)
return x
| 746
| 32.954545
| 90
|
py
|
pixyz
|
pixyz-main/pixyz/layers/__init__.py
|
from .resnet import ResNet
__all__ = [
'ResNet',
]
| 56
| 8.5
| 26
|
py
|
pixyz
|
pixyz-main/pixyz/losses/losses.py
|
import abc
import sympy
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
import numbers
from copy import deepcopy
from ..utils import get_dict_values
class Loss(torch.nn.Module, metaclass=abc.ABCMeta):
"""Loss class. In Pixyz, all loss classes are required to inherit this class.
Examples
--------
>>> import torch
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Bernoulli, Normal
>>> from pixyz.losses import KullbackLeibler
...
>>> # Set distributions
>>> class Inference(Normal):
... def __init__(self):
... super().__init__(var=["z"],cond_var=["x"],name="q")
... self.model_loc = torch.nn.Linear(128, 64)
... self.model_scale = torch.nn.Linear(128, 64)
... def forward(self, x):
... return {"loc": self.model_loc(x), "scale": F.softplus(self.model_scale(x))}
...
>>> class Generator(Bernoulli):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["z"],name="p")
... self.model = torch.nn.Linear(64, 128)
... def forward(self, z):
... return {"probs": torch.sigmoid(self.model(z))}
...
>>> p = Generator()
>>> q = Inference()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[64], name="p_{prior}")
...
>>> # Define a loss function (VAE)
>>> reconst = -p.log_prob().expectation(q)
>>> kl = KullbackLeibler(q,prior)
>>> loss_cls = (reconst - kl).mean()
>>> print(loss_cls)
mean \\left(- D_{KL} \\left[q(z|x)||p_{prior}(z) \\right] - \\mathbb{E}_{q(z|x)} \\left[\\log p(x|z) \\right] \\right)
>>> # Evaluate this loss function
>>> data = torch.randn(1, 128) # Pseudo data
>>> loss = loss_cls.eval({"x": data})
>>> print(loss) # doctest: +SKIP
tensor(65.5939, grad_fn=<MeanBackward0>)
"""
def __init__(self, input_var=None):
"""
Parameters
----------
input_var : :obj:`list` of :obj:`str`, defaults to None
Input variables of this loss function.
In general, users do not need to set them explicitly
because these depend on the given distributions and each loss function.
"""
super().__init__()
self._input_var = deepcopy(input_var)
@property
def input_var(self):
"""list: Input variables of this distribution."""
return self._input_var
@property
@abc.abstractmethod
def _symbol(self):
raise NotImplementedError()
@property
def loss_text(self):
return sympy.latex(self._symbol)
def __str__(self):
return self.loss_text
def __repr__(self):
return self.loss_text
def __add__(self, other):
return AddLoss(self, other)
def __radd__(self, other):
return AddLoss(other, self)
def __sub__(self, other):
return SubLoss(self, other)
def __rsub__(self, other):
return SubLoss(other, self)
def __mul__(self, other):
return MulLoss(self, other)
def __rmul__(self, other):
return MulLoss(other, self)
def __truediv__(self, other):
return DivLoss(self, other)
def __rtruediv__(self, other):
return DivLoss(other, self)
def __neg__(self):
return NegLoss(self)
def abs(self):
"""Return an instance of :class:`pixyz.losses.losses.AbsLoss`.
Returns
-------
pixyz.losses.losses.AbsLoss
An instance of :class:`pixyz.losses.losses.AbsLoss`
"""
return AbsLoss(self)
def mean(self):
"""Return an instance of :class:`pixyz.losses.losses.BatchMean`.
Returns
-------
pixyz.losses.losses.BatchMean
An instance of :class:`pixyz.losses.BatchMean`
"""
return BatchMean(self)
def sum(self):
"""Return an instance of :class:`pixyz.losses.losses.BatchSum`.
Returns
-------
pixyz.losses.losses.BatchSum
An instance of :class:`pixyz.losses.losses.BatchSum`
"""
return BatchSum(self)
def detach(self):
"""Return an instance of :class:`pixyz.losses.losses.Detach`.
Returns
-------
pixyz.losses.losses.Detach
An instance of :class:`pixyz.losses.losses.Detach`
"""
return Detach(self)
def expectation(self, p, sample_shape=torch.Size()):
"""Return an instance of :class:`pixyz.losses.Expectation`.
Parameters
----------
p : pixyz.distributions.Distribution
Distribution for sampling.
sample_shape : :obj:`list` or :obj:`NoneType`, defaults to torch.Size()
Shape of generating samples.
Returns
-------
pixyz.losses.Expectation
An instance of :class:`pixyz.losses.Expectation`
"""
return Expectation(p, self, sample_shape=sample_shape)
def constant_var(self, constant_dict):
"""Return an instance of :class:`pixyz.losses.ConstantVar`.
Parameters
----------
constant_dict : dict
constant variables.
Returns
-------
pixyz.losses.ConstantVar
An instance of :class:`pixyz.losses.ConstantVar`
"""
return ConstantVar(self, constant_dict)
def eval(self, x_dict={}, return_dict=False, return_all=True, **kwargs):
"""Evaluate the value of the loss function given inputs (:attr:`x_dict`).
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Input variables.
return_dict : bool, default to False.
Whether to return samples along with the evaluated value of the loss function.
return_all : bool, default to True.
Whether to return all samples, including those that have not been updated.
Returns
-------
loss : torch.Tensor
the evaluated value of the loss function.
x_dict : :obj:`dict`
All samples generated when evaluating the loss function.
If :attr:`return_dict` is False, it is not returned.
"""
if not(set(list(x_dict.keys())) >= set(self._input_var)):
raise ValueError("Input keys are not valid, expected {} but got {}.".format(self._input_var,
list(x_dict.keys())))
input_dict = get_dict_values(x_dict, self.input_var, return_dict=True)
loss, eval_dict = self(input_dict, **kwargs)
if return_dict:
output_dict = x_dict.copy() if return_all else {}
output_dict.update(eval_dict)
return loss, output_dict
return loss
@abc.abstractmethod
def forward(self, x_dict, **kwargs):
"""
Parameters
----------
x_dict : dict
Input variables.
Returns
-------
a tuple of :class:`pixyz.losses.Loss` and dict
deterministically calcurated loss and updated all samples.
"""
raise NotImplementedError()
class Divergence(Loss, abc.ABC):
def __init__(self, p, q=None):
"""
Parameters
----------
p : pixyz.distributions.Distribution
Distribution.
q : pixyz.distributions.Distribution, defaults to None
Distribution.
"""
_input_var = deepcopy(p.input_var)
if q is not None:
_input_var += deepcopy(q.input_var)
_input_var = sorted(set(_input_var), key=_input_var.index)
super().__init__(_input_var)
self.p = p
self.q = q
class ValueLoss(Loss):
"""
This class contains a scalar as a loss value.
If multiplying a scalar by an arbitrary loss class, this scalar is converted to the :class:`ValueLoss`.
Examples
--------
>>> loss_cls = ValueLoss(2)
>>> print(loss_cls)
2
>>> loss = loss_cls.eval()
>>> print(loss)
tensor(2.)
"""
def __init__(self, loss1):
super().__init__()
self.original_value = loss1
self.register_buffer('value', torch.tensor(loss1, dtype=torch.float))
self._input_var = []
def forward(self, x_dict={}, **kwargs):
return self.value, {}
@property
def _symbol(self):
return self.original_value
class Parameter(Loss):
"""
This class defines a single variable as a loss class.
It can be used such as a coefficient parameter of a loss class.
Examples
--------
>>> loss_cls = Parameter("x")
>>> print(loss_cls)
x
>>> loss = loss_cls.eval({"x": 2})
>>> print(loss)
2
"""
def __init__(self, input_var):
if not isinstance(input_var, str):
raise ValueError()
super().__init__([input_var])
def forward(self, x_dict={}, **kwargs):
return x_dict[self._input_var[0]], {}
@property
def _symbol(self):
return sympy.Symbol(self._input_var[0])
class ConstantVar(Loss):
"""
This class is defined as a loss class that makes the value of a variable a constant before evaluation.
It can be used to fix the coefficient parameters of the loss class or to condition random variables.
Examples
--------
>>> loss_cls = Parameter('x').constant_var({'x': 1})
>>> print(loss_cls)
x
>>> loss = loss_cls.eval()
>>> print(loss)
1
"""
def __init__(self, base_loss, constant_dict):
_input_var = set(base_loss.input_var) - set(constant_dict.keys())
super().__init__(_input_var)
self.constant_dict = constant_dict
self.base_loss = base_loss
def forward(self, x_dict={}, **kwargs):
input_dict = dict(x_dict)
input_dict.update(self.constant_dict)
return self.base_loss.eval(input_dict, return_dict=True)
@property
def _symbol(self):
return self.base_loss._symbol
class LossOperator(Loss):
def __init__(self, loss1, loss2):
super().__init__()
_input_var = []
if isinstance(loss1, Loss):
_input_var += deepcopy(loss1.input_var)
elif isinstance(loss1, numbers.Number):
loss1 = ValueLoss(loss1)
elif isinstance(loss2, type(None)):
pass
else:
raise ValueError("{} cannot be operated with {}.".format(type(loss1), type(loss2)))
if isinstance(loss2, Loss):
_input_var += deepcopy(loss2.input_var)
elif isinstance(loss2, numbers.Number):
loss2 = ValueLoss(loss2)
elif isinstance(loss2, type(None)):
pass
else:
raise ValueError("{} cannot be operated with {}.".format(type(loss2), type(loss1)))
_input_var = sorted(set(_input_var), key=_input_var.index)
self._input_var = _input_var
self.loss1 = loss1
self.loss2 = loss2
def forward(self, x_dict={}, **kwargs):
if not isinstance(self.loss1, type(None)):
loss1, x1 = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
else:
loss1 = 0
x1 = {}
if not isinstance(self.loss2, type(None)):
loss2, x2 = self.loss2.eval(x_dict, return_dict=True, return_all=False, **kwargs)
else:
loss2 = 0
x2 = {}
x1.update(x2)
return loss1, loss2, x1
class AddLoss(LossOperator):
"""
Apply the `add` operation to the two losses.
Examples
--------
>>> loss_cls_1 = ValueLoss(2)
>>> loss_cls_2 = Parameter("x")
>>> loss_cls = loss_cls_1 + loss_cls_2 # equals to AddLoss(loss_cls_1, loss_cls_2)
>>> print(loss_cls)
x + 2
>>> loss = loss_cls.eval({"x": 3})
>>> print(loss)
tensor(5.)
"""
@property
def _symbol(self):
return self.loss1._symbol + self.loss2._symbol
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return loss1 + loss2, x_dict
class SubLoss(LossOperator):
"""
Apply the `sub` operation to the two losses.
Examples
--------
>>> loss_cls_1 = ValueLoss(2)
>>> loss_cls_2 = Parameter("x")
>>> loss_cls = loss_cls_1 - loss_cls_2 # equals to SubLoss(loss_cls_1, loss_cls_2)
>>> print(loss_cls)
2 - x
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(-2.)
>>> loss_cls = loss_cls_2 - loss_cls_1 # equals to SubLoss(loss_cls_2, loss_cls_1)
>>> print(loss_cls)
x - 2
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(2.)
"""
@property
def _symbol(self):
return self.loss1._symbol - self.loss2._symbol
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return loss1 - loss2, x_dict
class MulLoss(LossOperator):
"""
Apply the `mul` operation to the two losses.
Examples
--------
>>> loss_cls_1 = ValueLoss(2)
>>> loss_cls_2 = Parameter("x")
>>> loss_cls = loss_cls_1 * loss_cls_2 # equals to MulLoss(loss_cls_1, loss_cls_2)
>>> print(loss_cls)
2 x
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(8.)
"""
@property
def _symbol(self):
return self.loss1._symbol * self.loss2._symbol
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return loss1 * loss2, x_dict
class DivLoss(LossOperator):
"""
Apply the `div` operation to the two losses.
Examples
--------
>>> loss_cls_1 = ValueLoss(2)
>>> loss_cls_2 = Parameter("x")
>>> loss_cls = loss_cls_1 / loss_cls_2 # equals to DivLoss(loss_cls_1, loss_cls_2)
>>> print(loss_cls)
\\frac{2}{x}
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(0.5000)
>>> loss_cls = loss_cls_2 / loss_cls_1 # equals to DivLoss(loss_cls_2, loss_cls_1)
>>> print(loss_cls)
\\frac{x}{2}
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(2.)
"""
@property
def _symbol(self):
return self.loss1._symbol / self.loss2._symbol
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return loss1 / loss2, x_dict
class MinLoss(LossOperator):
r"""
Apply the `min` operation to the loss.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses.losses import ValueLoss, Parameter, MinLoss
>>> loss_min= MinLoss(ValueLoss(3), ValueLoss(1))
>>> print(loss_min)
min \left(3, 1\right)
>>> print(loss_min.eval())
tensor(1.)
"""
def __init__(self, loss1, loss2):
super().__init__(loss1, loss2)
@property
def _symbol(self):
return sympy.Symbol("min \\left({}, {}\\right)".format(self.loss1.loss_text, self.loss2.loss_text))
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return torch.min(loss1, loss2), x_dict
class MaxLoss(LossOperator):
r"""
Apply the `max` operation to the loss.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses.losses import ValueLoss, MaxLoss
>>> loss_max= MaxLoss(ValueLoss(3), ValueLoss(1))
>>> print(loss_max)
max \left(3, 1\right)
>>> print(loss_max.eval())
tensor(3.)
"""
def __init__(self, loss1, loss2):
super().__init__(loss1, loss2)
@property
def _symbol(self):
return sympy.Symbol("max \\left({}, {}\\right)".format(self.loss1.loss_text, self.loss2.loss_text))
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return torch.max(loss1, loss2), x_dict
class LossSelfOperator(Loss):
def __init__(self, loss1):
super().__init__()
_input_var = []
if isinstance(loss1, type(None)):
raise ValueError()
if isinstance(loss1, Loss):
_input_var = deepcopy(loss1.input_var)
elif isinstance(loss1, numbers.Number):
loss1 = ValueLoss(loss1)
else:
raise ValueError()
self._input_var = _input_var
self.loss1 = loss1
def loss_train(self, x_dict={}, **kwargs):
return self.loss1.loss_train(x_dict, **kwargs)
def loss_test(self, x_dict={}, **kwargs):
return self.loss1.loss_test(x_dict, **kwargs)
class NegLoss(LossSelfOperator):
"""
Apply the `neg` operation to the loss.
Examples
--------
>>> loss_cls_1 = Parameter("x")
>>> loss_cls = -loss_cls_1 # equals to NegLoss(loss_cls_1)
>>> print(loss_cls)
- x
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
-4
"""
@property
def _symbol(self):
return -self.loss1._symbol
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return -loss, x_dict
class AbsLoss(LossSelfOperator):
"""
Apply the `abs` operation to the loss.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses import LogProb
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = LogProb(p).abs() # equals to AbsLoss(LogProb(p))
>>> print(loss_cls)
|\\log p(x)|
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor([12.9894, 15.5280])
"""
@property
def _symbol(self):
return sympy.Symbol("|{}|".format(self.loss1.loss_text))
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return loss.abs(), x_dict
class BatchMean(LossSelfOperator):
r"""
Average a loss class over given batch data.
.. math::
\mathbb{E}_{p_{data}(x)}[\mathcal{L}(x)] \approx \frac{1}{N}\sum_{i=1}^N \mathcal{L}(x_i),
where :math:`x_i \sim p_{data}(x)` and :math:`\mathcal{L}` is a loss function.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses import LogProb
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = LogProb(p).mean() # equals to BatchMean(LogProb(p))
>>> print(loss_cls)
mean \left(\log p(x) \right)
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor(-14.5038)
"""
@property
def _symbol(self):
return sympy.Symbol("mean \\left({} \\right)".format(self.loss1.loss_text)) # TODO: fix it
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return loss.mean(), x_dict
class BatchSum(LossSelfOperator):
r"""
Summation a loss class over given batch data.
.. math::
\sum_{i=1}^N \mathcal{L}(x_i),
where :math:`x_i \sim p_{data}(x)` and :math:`\mathcal{L}` is a loss function.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses import LogProb
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = LogProb(p).sum() # equals to BatchSum(LogProb(p))
>>> print(loss_cls)
sum \left(\log p(x) \right)
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor(-31.9434)
"""
@property
def _symbol(self):
return sympy.Symbol("sum \\left({} \\right)".format(self.loss1.loss_text)) # TODO: fix it
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return loss.sum(), x_dict
class Detach(LossSelfOperator):
r"""
Apply the `detach` method to the loss.
"""
@property
def _symbol(self):
return sympy.Symbol("detach \\left({} \\right)".format(self.loss1.loss_text)) # TODO: fix it?
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return loss.detach(), x_dict
class Expectation(Loss):
r"""
Expectation of a given function (Monte Carlo approximation).
.. math::
\mathbb{E}_{p(x)}[f(x)] \approx \frac{1}{L}\sum_{l=1}^L f(x_l),
\quad \text{where}\quad x_l \sim p(x).
Note that :math:`f` doesn't need to be able to sample, which is known as the law of the unconscious statistician
(LOTUS).
Therefore, in this class, :math:`f` is assumed to :attr:`pixyz.Loss`.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal, Bernoulli
>>> from pixyz.losses import LogProb
>>> q = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"],
... features_shape=[10]) # q(z|x)
>>> p = Normal(loc="z", scale=torch.tensor(1.), var=["x"], cond_var=["z"],
... features_shape=[10]) # p(x|z)
>>> loss_cls = LogProb(p).expectation(q) # equals to Expectation(q, LogProb(p))
>>> print(loss_cls)
\mathbb{E}_{p(z|x)} \left[\log p(x|z) \right]
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor([-12.8181, -12.6062])
>>> loss_cls = LogProb(p).expectation(q,sample_shape=(5,))
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
>>> q = Bernoulli(probs=torch.tensor(0.5), var=["x"], cond_var=[], features_shape=[10]) # q(x)
>>> p = Bernoulli(probs=torch.tensor(0.3), var=["x"], cond_var=[], features_shape=[10]) # p(x)
>>> loss_cls = p.log_prob().expectation(q,sample_shape=[64])
>>> train_loss = loss_cls.eval()
>>> print(train_loss) # doctest: +SKIP
tensor([46.7559])
>>> eval_loss = loss_cls.eval(test_mode=True)
>>> print(eval_loss) # doctest: +SKIP
tensor([-7.6047])
"""
def __init__(self, p, f, sample_shape=torch.Size([1]), reparam=True):
input_var = list(set(p.input_var) | set(f.input_var) - set(p.var))
super().__init__(input_var=input_var)
self.p = p
self.f = f
self.sample_shape = torch.Size(sample_shape)
self.reparam = reparam
@property
def _symbol(self):
p_text = "{" + self.p.prob_text + "}"
return sympy.Symbol("\\mathbb{{E}}_{} \\left[{} \\right]".format(p_text, self.f.loss_text))
def forward(self, x_dict={}, **kwargs):
samples_dicts = [self.p.sample(x_dict, reparam=self.reparam, return_all=False, **kwargs)
for i in range(self.sample_shape.numel())]
loss_and_dicts = []
for samples_dict in samples_dicts:
input_dict = x_dict.copy()
input_dict.update(samples_dict)
loss_and_dicts.append(self.f.eval(input_dict, return_dict=True, return_all=False, **kwargs))
losses = [loss for loss, loss_sample_dict in loss_and_dicts]
# sum over sample_shape
loss = torch.stack(losses).mean(dim=0)
output_dict = {}
output_dict.update(samples_dicts[0])
output_dict.update(loss_and_dicts[0][1])
return loss, output_dict
def REINFORCE(p, f, b=ValueLoss(0), sample_shape=torch.Size([1]), reparam=True):
r"""
Surrogate Loss for Policy Gradient Method (REINFORCE) with a given reward function :math:`f` and a given baseline :math:`b`.
.. math::
\mathbb{E}_{p(x)}[detach(f(x)-b(x))\log p(x)+f(x)-b(x)].
in this function, :math:`f` and :math:`b` is assumed to :attr:`pixyz.Loss`.
Parameters
----------
p : :class:`pixyz.distributions.Distribution`
Distribution for expectation.
f : :class:`pixyz.losses.Loss`
reward function
b : :class:`pixyz.losses.Loss` default to pixyz.losses.ValueLoss(0)
baseline function
sample_shape : :class:`torch.Size` default to torch.Size([1])
sample size for expectation
reparam : :obj: bool default to True
using reparameterization in internal sampling
Returns
-------
surrogate_loss : :class:`pixyz.losses.Loss`
policy gradient can be calcurated from a gradient of this surrogate loss.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal, Bernoulli
>>> from pixyz.losses import LogProb
>>> q = Bernoulli(probs=torch.tensor(0.5), var=["x"], cond_var=[], features_shape=[10]) # q(x)
>>> p = Bernoulli(probs=torch.tensor(0.3), var=["x"], cond_var=[], features_shape=[10]) # p(x)
>>> loss_cls = REINFORCE(q,p.log_prob(),sample_shape=[64])
>>> train_loss = loss_cls.eval(test_mode=True)
>>> print(train_loss) # doctest: +SKIP
tensor([46.7559])
>>> loss_cls = p.log_prob().expectation(q,sample_shape=[64])
>>> test_loss = loss_cls.eval()
>>> print(test_loss) # doctest: +SKIP
tensor([-7.6047])
"""
return Expectation(p, (f - b).detach() * p.log_prob() + (f - b), sample_shape, reparam=reparam)
class DataParalleledLoss(Loss):
r"""
Loss class wrapper of torch.nn.DataParallel. It can be used as the original loss class.
`eval` & `forward` methods support data-parallel running.
Examples
--------
>>> import torch
>>> from torch import optim
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Bernoulli, Normal
>>> from pixyz.losses import KullbackLeibler, DataParalleledLoss
>>> from pixyz.models import Model
>>> used_gpu_i = set()
>>> used_gpu_g = set()
>>> # Set distributions (Distribution API)
>>> class Inference(Normal):
... def __init__(self):
... super().__init__(var=["z"],cond_var=["x"],name="q")
... self.model_loc = torch.nn.Linear(12, 6)
... self.model_scale = torch.nn.Linear(12, 6)
... def forward(self, x):
... used_gpu_i.add(x.device.index)
... return {"loc": self.model_loc(x), "scale": F.softplus(self.model_scale(x))}
>>> class Generator(Bernoulli):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["z"],name="p")
... self.model = torch.nn.Linear(6, 12)
... def forward(self, z):
... used_gpu_g.add(z.device.index)
... return {"probs": torch.sigmoid(self.model(z))}
>>> p = Generator()
>>> q = Inference()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[6], name="p_{prior}")
>>> # Define a loss function (Loss API)
>>> reconst = -p.log_prob().expectation(q)
>>> kl = KullbackLeibler(q,prior)
>>> batch_loss_cls = (reconst - kl)
>>> # device settings
>>> device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
>>> device_count = torch.cuda.device_count()
>>> expected = set(range(device_count)) if torch.cuda.is_available() else {None}
>>> if device_count > 1:
... loss_cls = DataParalleledLoss(batch_loss_cls, device_ids=list(expected)).mean().to(device)
... else:
... loss_cls = batch_loss_cls.mean().to(device)
>>> # Set a model (Model API)
>>> model = Model(loss=loss_cls, distributions=[p, q],
... optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
>>> # Train and test the model
>>> data = torch.randn(10, 12).to(device) # Pseudo data
>>> train_loss = model.train({"x": data})
>>> assert used_gpu_i==expected
>>> assert used_gpu_g==expected
"""
def __init__(self, loss, distributed=False, **kwargs):
super().__init__(loss.input_var)
if distributed:
self.paralleled = DistributedDataParallel(loss, **kwargs)
else:
self.paralleled = DataParallel(loss, **kwargs)
def forward(self, x_dict, **kwargs):
return self.paralleled.forward(x_dict, **kwargs)
@property
def _symbol(self):
return self.paralleled.module._symbol
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.paralleled.module, name)
| 28,918
| 29.83049
| 128
|
py
|
pixyz
|
pixyz-main/pixyz/losses/mmd.py
|
import torch
import sympy
from .losses import Divergence
from ..utils import get_dict_values
class MMD(Divergence):
r"""
The Maximum Mean Discrepancy (MMD).
.. math::
D_{MMD^2}[p||q] = \mathbb{E}_{p(x), p(x')}[k(x, x')] + \mathbb{E}_{q(x), q(x')}[k(x, x')]
- 2\mathbb{E}_{p(x), q(x')}[k(x, x')]
where :math:`k(x, x')` is any positive definite kernel.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="p")
>>> q = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="q")
>>> loss_cls = MMD(p, q, kernel="gaussian")
>>> print(loss_cls)
D_{MMD^2} \left[p(z|x)||q(z|x) \right]
>>> loss = loss_cls.eval({"x": torch.randn(1, 64)})
>>> # Use the inverse (multi-)quadric kernel
>>> loss = MMD(p, q, kernel="inv-multiquadratic").eval({"x": torch.randn(10, 64)})
"""
def __init__(self, p, q, kernel="gaussian", **kernel_params):
if set(p.var) != set(q.var):
raise ValueError("The two distribution variables must be the same.")
if len(p.var) != 1:
raise ValueError("A given distribution must have only one variable.")
super().__init__(p, q)
if len(p.input_var) > 0:
self.input_dist = p
elif len(q.input_var) > 0:
self.input_dist = q
else:
raise NotImplementedError()
if kernel == "gaussian":
self.kernel = gaussian_rbf_kernel
elif kernel == "inv-multiquadratic":
self.kernel = inverse_multiquadratic_rbf_kernel
else:
raise NotImplementedError()
self.kernel_params = kernel_params
@property
def _symbol(self):
return sympy.Symbol("D_{{MMD^2}} \\left[{}||{} \\right]".format(self.p.prob_text, self.q.prob_text))
def _get_batch_n(self, x_dict):
return get_dict_values(x_dict, self.input_dist.input_var[0])[0].shape[0]
def forward(self, x_dict={}, **kwargs):
batch_n = self._get_batch_n(x_dict)
# sample from distributions
p_x = get_dict_values(self.p.sample(x_dict, batch_n=batch_n, **kwargs), self.p.var)[0]
q_x = get_dict_values(self.q.sample(x_dict, batch_n=batch_n, **kwargs), self.q.var)[0]
if p_x.shape != q_x.shape:
raise ValueError("The two distribution variables must have the same shape.")
if len(p_x.shape) != 2:
raise ValueError("The number of axes of a given sample must be 2, got %d" % len(p_x.shape))
p_x_dim = p_x.shape[1]
q_x_dim = q_x.shape[1]
# estimate the squared MMD (unbiased estimator)
p_kernel = self.kernel(p_x, p_x, **self.kernel_params).sum() / (p_x_dim * (p_x_dim - 1))
q_kernel = self.kernel(q_x, q_x, **self.kernel_params).sum() / (q_x_dim * (q_x_dim - 1))
pq_kernel = self.kernel(p_x, q_x, **self.kernel_params).sum() / (p_x_dim * q_x_dim)
mmd_loss = p_kernel + q_kernel - 2 * pq_kernel
return mmd_loss, {}
def pairwise_distance_matrix(x, y, metric="euclidean"):
r"""
Computes the pairwise distance matrix between x and y.
"""
if metric == "euclidean":
return torch.sum((x[:, None, :] - y[None, :, :]) ** 2, dim=-1)
raise NotImplementedError()
def gaussian_rbf_kernel(x, y, sigma_sqr=2., **kwargs):
r"""
Gaussian radial basis function (RBF) kernel.
.. math::
k(x, y) = \exp (\frac{||x-y||^2}{\sigma^2})
"""
return torch.exp(-pairwise_distance_matrix(x, y) / (1. * sigma_sqr))
def inverse_multiquadratic_rbf_kernel(x, y, sigma_sqr=2., **kwargs):
r"""
Inverse multi-quadratic radial basis function (RBF) kernel.
.. math::
k(x, y) = \frac{\sigma^2}{||x-y||^2 + \sigma^2}
"""
return sigma_sqr / (pairwise_distance_matrix(x, y) + sigma_sqr)
| 3,976
| 31.598361
| 109
|
py
|
pixyz
|
pixyz-main/pixyz/losses/adversarial_loss.py
|
import sympy
from torch import optim, nn
import torch
from .losses import Divergence
from ..utils import get_dict_values, detach_dict
class AdversarialLoss(Divergence):
def __init__(self, p, q, discriminator, optimizer=optim.Adam, optimizer_params={}):
if set(p.var) != set(q.var):
raise ValueError("The two distribution variables must be the same.")
super().__init__(p, q)
if len(p.input_var) > 0:
self.input_dist = p
elif len(q.input_var) > 0:
self.input_dist = q
else:
raise NotImplementedError()
self.loss_optimizer = optimizer
self.loss_optimizer_params = optimizer_params
self.d = discriminator
params = discriminator.parameters()
self.d_optimizer = optimizer(params, **optimizer_params)
def _get_batch_n(self, x_dict):
return get_dict_values(x_dict, self.input_dist.input_var)[0].shape[0]
def d_loss(self, y_p, y_q, batch_n):
"""Evaluate a discriminator loss given outputs of the discriminator.
Parameters
----------
y_p : torch.Tensor
Output of discriminator given sample from p.
y_q : torch.Tensor
Output of discriminator given sample from q.
batch_n : int
Batch size of inputs.
Returns
-------
torch.Tensor
"""
raise NotImplementedError()
def g_loss(self, y_p, y_q, batch_n):
"""Evaluate a generator loss given outputs of the discriminator.
Parameters
----------
y_p : torch.Tensor
Output of discriminator given sample from p.
y_q : torch.Tensor
Output of discriminator given sample from q.
batch_n : int
Batch size of inputs.
Returns
-------
torch.Tensor
"""
raise NotImplementedError()
def loss_train(self, train_x_dict, **kwargs):
"""Train the evaluation metric (discriminator).
Parameters
----------
train_x_dict : dict
Input variables.
**kwargs
Arbitrary keyword arguments.
Returns
-------
loss : torch.Tensor
"""
self.d.train()
self.d_optimizer.zero_grad()
loss = self.eval(train_x_dict, discriminator=True)
# backprop
loss.backward()
# update params
self.d_optimizer.step()
return loss
def loss_test(self, test_x_dict, **kwargs):
"""Test the evaluation metric (discriminator).
Parameters
----------
test_x_dict : dict
Input variables.
**kwargs
Arbitrary keyword arguments.
Returns
-------
loss : torch.Tensor
"""
self.d.eval()
with torch.no_grad():
loss = self.eval(test_x_dict, discriminator=True)
return loss
class AdversarialJensenShannon(AdversarialLoss):
r"""
Jensen-Shannon divergence (adversarial training).
.. math::
D_{JS}[p(x)||q(x)] \leq 2 \cdot D_{JS}[p(x)||q(x)] + 2 \log 2
= \mathbb{E}_{p(x)}[\log d^*(x)] + \mathbb{E}_{q(x)}[\log (1-d^*(x))],
where :math:`d^*(x) = \arg\max_{d} \mathbb{E}_{p(x)}[\log d(x)] + \mathbb{E}_{q(x)}[\log (1-d(x))]`.
This class acts as a metric that evaluates a given distribution (generator).
If you want to learn this evaluation metric itself, i.e., discriminator (critic), use the :class:`train` method.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Deterministic, EmpiricalDistribution, Normal
>>> # Generator
>>> class Generator(Deterministic):
... def __init__(self):
... super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p")
... self.model = nn.Linear(32, 64)
... def forward(self, z):
... return {"x": self.model(z)}
>>> p_g = Generator()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[32], name="p_{prior}")
>>> p = (p_g*prior).marginalize_var("z")
>>> print(p)
Distribution:
p(x) = \int p(x|z)p_{prior}(z)dz
Network architecture:
p_{prior}(z):
Normal(
name=p_{prior}, distribution_name=Normal,
var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([32])
(loc): torch.Size([1, 32])
(scale): torch.Size([1, 32])
)
p(x|z):
Generator(
name=p, distribution_name=Deterministic,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(model): Linear(in_features=32, out_features=64, bias=True)
)
>>> # Data distribution (dummy distribution)
>>> p_data = EmpiricalDistribution(["x"])
>>> print(p_data)
Distribution:
p_{data}(x)
Network architecture:
EmpiricalDistribution(
name=p_{data}, distribution_name=Data distribution,
var=['x'], cond_var=[], input_var=['x'], features_shape=torch.Size([])
)
>>> # Discriminator (critic)
>>> class Discriminator(Deterministic):
... def __init__(self):
... super(Discriminator, self).__init__(var=["t"], cond_var=["x"], name="d")
... self.model = nn.Linear(64, 1)
... def forward(self, x):
... return {"t": torch.sigmoid(self.model(x))}
>>> d = Discriminator()
>>> print(d)
Distribution:
d(t|x)
Network architecture:
Discriminator(
name=d, distribution_name=Deterministic,
var=['t'], cond_var=['x'], input_var=['x'], features_shape=torch.Size([])
(model): Linear(in_features=64, out_features=1, bias=True)
)
>>>
>>> # Set the loss class
>>> loss_cls = AdversarialJensenShannon(p, p_data, discriminator=d)
>>> print(loss_cls)
mean(D_{JS}^{Adv} \left[p(x)||p_{data}(x) \right])
>>>
>>> sample_x = torch.randn(2, 64) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor(1.3723, grad_fn=<AddBackward0>)
>>> # For evaluating a discriminator loss, set the `discriminator` option to True.
>>> loss_d = loss_cls.eval({"x": sample_x}, discriminator=True)
>>> print(loss_d) # doctest: +SKIP
tensor(1.4990, grad_fn=<AddBackward0>)
>>> # When training the evaluation metric (discriminator), use the train method.
>>> train_loss = loss_cls.loss_train({"x": sample_x})
References
----------
[Goodfellow+ 2014] Generative Adversarial Networks
"""
def __init__(self, p, q, discriminator, optimizer=optim.Adam, optimizer_params={}, inverse_g_loss=True):
super().__init__(p, q, discriminator, optimizer=optimizer, optimizer_params=optimizer_params)
self.bce_loss = nn.BCELoss()
self._inverse_g_loss = inverse_g_loss
@property
def _symbol(self):
return sympy.Symbol("mean(D_{{JS}}^{{Adv}} \\left[{}||{} \\right])".format(self.p.prob_text,
self.q.prob_text))
def forward(self, x_dict, discriminator=False, **kwargs):
batch_n = self._get_batch_n(x_dict)
# sample x_p from p
x_p_dict = get_dict_values(self.p.sample(x_dict, batch_n=batch_n, **kwargs), self.d.input_var, True)
# sample x_q from q
x_q_dict = get_dict_values(self.q.sample(x_dict, batch_n=batch_n, **kwargs), self.d.input_var, True)
if discriminator:
# sample y_p from d
y_p = get_dict_values(self.d.sample(detach_dict(x_p_dict), **kwargs), self.d.var)[0]
# sample y_q from d
y_q = get_dict_values(self.d.sample(detach_dict(x_q_dict), **kwargs), self.d.var)[0]
return self.d_loss(y_p, y_q, batch_n), x_dict
# sample y_p from d
y_p_dict = self.d.sample(x_p_dict, **kwargs)
# sample y_q from d
y_q_dict = self.d.sample(x_q_dict, **kwargs)
y_p = get_dict_values(y_p_dict, self.d.var)[0]
y_q = get_dict_values(y_q_dict, self.d.var)[0]
return self.g_loss(y_p, y_q, batch_n), x_dict
def d_loss(self, y_p, y_q, batch_n):
# set labels
t_p = torch.ones(batch_n, 1).to(y_p.device)
t_q = torch.zeros(batch_n, 1).to(y_p.device)
return self.bce_loss(y_p, t_p) + self.bce_loss(y_q, t_q)
def g_loss(self, y_p, y_q, batch_n):
# set labels
t1 = torch.ones(batch_n, 1).to(y_p.device)
t2 = torch.zeros(batch_n, 1).to(y_p.device)
if self._inverse_g_loss:
y_p_loss = self.bce_loss(y_p, t2)
y_q_loss = self.bce_loss(y_q, t1)
else:
y_p_loss = -self.bce_loss(y_p, t1)
y_q_loss = -self.bce_loss(y_q, t2)
if self.p.distribution_name == "Data distribution":
y_p_loss = y_p_loss.detach()
if self.q.distribution_name == "Data distribution":
y_q_loss = y_q_loss.detach()
return y_p_loss + y_q_loss
def loss_train(self, train_x_dict, **kwargs):
return super().loss_train(train_x_dict, **kwargs)
def loss_test(self, test_x_dict, **kwargs):
return super().loss_test(test_x_dict, **kwargs)
class AdversarialKullbackLeibler(AdversarialLoss):
r"""
Kullback-Leibler divergence (adversarial training).
.. math::
D_{KL}[p(x)||q(x)] = \mathbb{E}_{p(x)}\left[\log \frac{p(x)}{q(x)}\right]
\approx \mathbb{E}_{p(x)}\left[\log \frac{d^*(x)}{1-d^*(x)}\right],
where :math:`d^*(x) = \arg\max_{d} \mathbb{E}_{q(x)}[\log d(x)] + \mathbb{E}_{p(x)}[\log (1-d(x))]`.
Note that this divergence is minimized to close :math:`p` to :math:`q`.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Deterministic, EmpiricalDistribution, Normal
>>> # Generator
>>> class Generator(Deterministic):
... def __init__(self):
... super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p")
... self.model = nn.Linear(32, 64)
... def forward(self, z):
... return {"x": self.model(z)}
>>> p_g = Generator()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[32], name="p_{prior}")
>>> p = (p_g*prior).marginalize_var("z")
>>> print(p)
Distribution:
p(x) = \int p(x|z)p_{prior}(z)dz
Network architecture:
p_{prior}(z):
Normal(
name=p_{prior}, distribution_name=Normal,
var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([32])
(loc): torch.Size([1, 32])
(scale): torch.Size([1, 32])
)
p(x|z):
Generator(
name=p, distribution_name=Deterministic,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(model): Linear(in_features=32, out_features=64, bias=True)
)
>>> # Data distribution (dummy distribution)
>>> p_data = EmpiricalDistribution(["x"])
>>> print(p_data)
Distribution:
p_{data}(x)
Network architecture:
EmpiricalDistribution(
name=p_{data}, distribution_name=Data distribution,
var=['x'], cond_var=[], input_var=['x'], features_shape=torch.Size([])
)
>>> # Discriminator (critic)
>>> class Discriminator(Deterministic):
... def __init__(self):
... super(Discriminator, self).__init__(var=["t"], cond_var=["x"], name="d")
... self.model = nn.Linear(64, 1)
... def forward(self, x):
... return {"t": torch.sigmoid(self.model(x))}
>>> d = Discriminator()
>>> print(d)
Distribution:
d(t|x)
Network architecture:
Discriminator(
name=d, distribution_name=Deterministic,
var=['t'], cond_var=['x'], input_var=['x'], features_shape=torch.Size([])
(model): Linear(in_features=64, out_features=1, bias=True)
)
>>>
>>> # Set the loss class
>>> loss_cls = AdversarialKullbackLeibler(p, p_data, discriminator=d)
>>> print(loss_cls)
mean(D_{KL}^{Adv} \left[p(x)||p_{data}(x) \right])
>>>
>>> sample_x = torch.randn(2, 64) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> # The evaluation value might be negative if the discriminator training is incomplete.
>>> print(loss) # doctest: +SKIP
tensor(-0.8377, grad_fn=<AddBackward0>)
>>> # For evaluating a discriminator loss, set the `discriminator` option to True.
>>> loss_d = loss_cls.eval({"x": sample_x}, discriminator=True)
>>> print(loss_d) # doctest: +SKIP
tensor(1.9321, grad_fn=<AddBackward0>)
>>> # When training the evaluation metric (discriminator), use the train method.
>>> train_loss = loss_cls.loss_train({"x": sample_x})
References
----------
[Kim+ 2018] Disentangling by Factorising
"""
def __init__(self, p, q, discriminator, **kwargs):
super().__init__(p, q, discriminator, **kwargs)
self.bce_loss = nn.BCELoss()
@property
def _symbol(self):
return sympy.Symbol("mean(D_{{KL}}^{{Adv}} \\left[{}||{} \\right])".format(self.p.prob_text,
self.q.prob_text))
def forward(self, x_dict, discriminator=False, **kwargs):
batch_n = self._get_batch_n(x_dict)
# sample x_p from p
x_p_dict = get_dict_values(self.p.sample(x_dict, batch_n=batch_n, **kwargs), self.d.input_var, True)
if discriminator:
# sample x_q from q
x_q_dict = get_dict_values(self.q.sample(x_dict, batch_n=batch_n, **kwargs), self.d.input_var, True)
# sample y_p from d
y_p = get_dict_values(self.d.sample(detach_dict(x_p_dict), **kwargs), self.d.var)[0]
# sample y_q from d
y_q = get_dict_values(self.d.sample(detach_dict(x_q_dict), **kwargs), self.d.var)[0]
return self.d_loss(y_p, y_q, batch_n), {}
# sample y from d
y_p = get_dict_values(self.d.sample(x_p_dict, **kwargs), self.d.var)[0]
return self.g_loss(y_p, batch_n), {}
def g_loss(self, y_p, batch_n):
"""Evaluate a generator loss given an output of the discriminator.
Parameters
----------
y_p : torch.Tensor
Output of discriminator given sample from p.
batch_n : int
Batch size of inputs.
Returns
-------
torch.Tensor
"""
# set labels
t_p = torch.ones(batch_n, 1).to(y_p.device)
t_q = torch.zeros(batch_n, 1).to(y_p.device)
y_p_loss = -self.bce_loss(y_p, t_p) + self.bce_loss(y_p, t_q) # log (y_p) - log (1 - y_p)
return y_p_loss
def d_loss(self, y_p, y_q, batch_n):
# set labels
t_p = torch.ones(batch_n, 1).to(y_p.device)
t_q = torch.zeros(batch_n, 1).to(y_p.device)
return self.bce_loss(y_p, t_p) + self.bce_loss(y_q, t_q)
def loss_train(self, train_x_dict, **kwargs):
return super().loss_train(train_x_dict, **kwargs)
def loss_test(self, test_x_dict, **kwargs):
return super().loss_test(test_x_dict, **kwargs)
class AdversarialWassersteinDistance(AdversarialJensenShannon):
r"""
Wasserstein distance (adversarial training).
.. math::
W(p, q) = \sup_{||d||_{L} \leq 1} \mathbb{E}_{p(x)}[d(x)] - \mathbb{E}_{q(x)}[d(x)]
Examples
--------
>>> import torch
>>> from pixyz.distributions import Deterministic, EmpiricalDistribution, Normal
>>> # Generator
>>> class Generator(Deterministic):
... def __init__(self):
... super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p")
... self.model = nn.Linear(32, 64)
... def forward(self, z):
... return {"x": self.model(z)}
>>> p_g = Generator()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[32], name="p_{prior}")
>>> p = (p_g*prior).marginalize_var("z")
>>> print(p)
Distribution:
p(x) = \int p(x|z)p_{prior}(z)dz
Network architecture:
p_{prior}(z):
Normal(
name=p_{prior}, distribution_name=Normal,
var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([32])
(loc): torch.Size([1, 32])
(scale): torch.Size([1, 32])
)
p(x|z):
Generator(
name=p, distribution_name=Deterministic,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(model): Linear(in_features=32, out_features=64, bias=True)
)
>>> # Data distribution (dummy distribution)
>>> p_data = EmpiricalDistribution(["x"])
>>> print(p_data)
Distribution:
p_{data}(x)
Network architecture:
EmpiricalDistribution(
name=p_{data}, distribution_name=Data distribution,
var=['x'], cond_var=[], input_var=['x'], features_shape=torch.Size([])
)
>>> # Discriminator (critic)
>>> class Discriminator(Deterministic):
... def __init__(self):
... super(Discriminator, self).__init__(var=["t"], cond_var=["x"], name="d")
... self.model = nn.Linear(64, 1)
... def forward(self, x):
... return {"t": self.model(x)}
>>> d = Discriminator()
>>> print(d)
Distribution:
d(t|x)
Network architecture:
Discriminator(
name=d, distribution_name=Deterministic,
var=['t'], cond_var=['x'], input_var=['x'], features_shape=torch.Size([])
(model): Linear(in_features=64, out_features=1, bias=True)
)
>>>
>>> # Set the loss class
>>> loss_cls = AdversarialWassersteinDistance(p, p_data, discriminator=d)
>>> print(loss_cls)
mean(W^{Adv} \left(p(x), p_{data}(x) \right))
>>>
>>> sample_x = torch.randn(2, 64) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor(-0.0060, grad_fn=<SubBackward0>)
>>> # For evaluating a discriminator loss, set the `discriminator` option to True.
>>> loss_d = loss_cls.eval({"x": sample_x}, discriminator=True)
>>> print(loss_d) # doctest: +SKIP
tensor(-0.3802, grad_fn=<NegBackward>)
>>> # When training the evaluation metric (discriminator), use the train method.
>>> train_loss = loss_cls.loss_train({"x": sample_x})
References
----------
[Arjovsky+ 2017] Wasserstein GAN
"""
def __init__(self, p, q, discriminator,
clip_value=0.01, **kwargs):
super().__init__(p, q, discriminator, **kwargs)
self._clip_value = clip_value
@property
def _symbol(self):
return sympy.Symbol("mean(W^{{Adv}} \\left({}, {} \\right))".format(self.p.prob_text, self.q.prob_text))
def d_loss(self, y_p, y_q, *args, **kwargs):
return - (torch.mean(y_p) - torch.mean(y_q))
def g_loss(self, y_p, y_q, *args, **kwargs):
if self.p.distribution_name == "Data distribution":
y_p = y_p.detach()
if self.q.distribution_name == "Data distribution":
y_q = y_q.detach()
return torch.mean(y_p) - torch.mean(y_q)
def loss_train(self, train_x_dict, **kwargs):
loss = super().loss_train(train_x_dict, **kwargs)
# Clip weights of discriminator
for params in self.d.parameters():
params.data.clamp_(-self._clip_value, self._clip_value)
return loss
def loss_test(self, test_x_dict, **kwargs):
return super().loss_test(test_x_dict, **kwargs)
| 19,715
| 33.650264
| 116
|
py
|
pixyz
|
pixyz-main/pixyz/losses/divergences.py
|
import sympy
import torch
from torch.distributions import kl_divergence
from ..utils import get_dict_values
from .losses import Divergence
def KullbackLeibler(p, q, dim=None, analytical=True, sample_shape=torch.Size([1])):
r"""
Kullback-Leibler divergence (analytical or Monte Carlo Apploximation).
.. math::
D_{KL}[p||q] &= \mathbb{E}_{p(x)}\left[\log \frac{p(x)}{q(x)}\right] \qquad \text{(analytical)}\\
&\approx \frac{1}{L}\sum_{l=1}^L \log\frac{p(x_l)}{q(x_l)},
\quad \text{where} \quad x_l \sim p(x) \quad \text{(MC approximation)}.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal, Beta
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["z"], features_shape=[64], name="p")
>>> q = Normal(loc=torch.tensor(1.), scale=torch.tensor(1.), var=["z"], features_shape=[64], name="q")
>>> loss_cls = KullbackLeibler(p,q,analytical=True)
>>> print(loss_cls)
D_{KL} \left[p(z)||q(z) \right]
>>> loss_cls.eval()
tensor([32.])
>>> loss_cls = KullbackLeibler(p,q,analytical=False,sample_shape=[64])
>>> print(loss_cls)
\mathbb{E}_{p(z)} \left[\log p(z) - \log q(z) \right]
>>> loss_cls.eval() # doctest: +SKIP
tensor([31.4713])
"""
if analytical:
loss = AnalyticalKullbackLeibler(p, q, dim)
else:
loss = (p.log_prob() - q.log_prob()).expectation(p, sample_shape=sample_shape)
return loss
class AnalyticalKullbackLeibler(Divergence):
def __init__(self, p, q, dim=None):
self.dim = dim
super().__init__(p, q)
@property
def _symbol(self):
return sympy.Symbol("D_{{KL}} \\left[{}||{} \\right]".format(self.p.prob_text, self.q.prob_text))
def forward(self, x_dict, **kwargs):
if (not hasattr(self.p, 'distribution_torch_class')) or (not hasattr(self.q, 'distribution_torch_class')):
raise ValueError("Divergence between these two distributions cannot be evaluated, "
"got %s and %s." % (self.p.distribution_name, self.q.distribution_name))
input_dict = get_dict_values(x_dict, self.p.input_var, True)
self.p.set_dist(input_dict)
input_dict = get_dict_values(x_dict, self.q.input_var, True)
self.q.set_dist(input_dict)
divergence = kl_divergence(self.p.dist, self.q.dist)
if self.dim:
divergence = torch.sum(divergence, dim=self.dim)
return divergence, {}
dim_list = list(torch.arange(divergence.dim()))
divergence = torch.sum(divergence, dim=dim_list[1:])
return divergence, {}
"""
if (self._p1.distribution_name == "vonMisesFisher" and \
self._p2.distribution_name == "HypersphericalUniform"):
inputs = get_dict_values(x, self._p1.input_var, True)
params1 = self._p1.get_params(inputs, **kwargs)
hyu_dim = self._p2.dim
return vmf_hyu_kl(params1["loc"], params1["scale"],
hyu_dim, self.device), x
raise Exception("You cannot use these distributions, "
"got %s and %s." % (self._p1.distribution_name,
self._p2.distribution_name))
#inputs = get_dict_values(x, self._p2.input_var, True)
#self._p2.set_dist(inputs)
#divergence = kl_divergence(self._p1.dist, self._p2.dist)
if self.dim:
_kl = torch.sum(divergence, dim=self.dim)
return divergence, x
"""
"""
def vmf_hyu_kl(vmf_loc, vmf_scale, hyu_dim, device):
__m = vmf_loc.shape[-1]
vmf_entropy = vmf_scale * ive(__m/2, vmf_scale) / ive((__m/2)-1, vmf_scale)
vmf_log_norm = ((__m / 2 - 1) * torch.log(vmf_scale) - (__m / 2) * math.log(2 * math.pi) - (
vmf_scale + torch.log(ive(__m / 2 - 1, vmf_scale))))
vmf_log_norm = vmf_log_norm.view(*(vmf_log_norm.shape[:-1]))
vmf_entropy = vmf_entropy.view(*(vmf_entropy.shape[:-1])) + vmf_log_norm
hyu_entropy = math.log(2) + ((hyu_dim + 1) / 2) * math.log(math.pi) - torch.lgamma(
torch.Tensor([(hyu_dim + 1) / 2])).to(device)
return - vmf_entropy + hyu_entropy
"""
| 4,222
| 37.045045
| 114
|
py
|
pixyz
|
pixyz-main/pixyz/losses/iteration.py
|
from copy import deepcopy
import sympy
from .losses import Loss
from ..utils import get_dict_values, replace_dict_keys
class IterativeLoss(Loss):
r"""
Iterative loss.
This class allows implementing an arbitrary model which requires iteration.
.. math::
\mathcal{L} = \sum_{t=0}^{T-1}\mathcal{L}_{step}(x_t, h_t),
where :math:`x_t = f_{slice\_step}(x, t)`.
Examples
--------
>>> import torch
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Normal, Bernoulli, Deterministic
>>>
>>> # Set distributions
>>> x_dim = 128
>>> z_dim = 64
>>> h_dim = 32
>>>
>>> # p(x|z,h_{prev})
>>> class Decoder(Bernoulli):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["z", "h_prev"],name="p")
... self.fc = torch.nn.Linear(z_dim + h_dim, x_dim)
... def forward(self, z, h_prev):
... return {"probs": torch.sigmoid(self.fc(torch.cat((z, h_prev), dim=-1)))}
...
>>> # q(z|x,h_{prev})
>>> class Encoder(Normal):
... def __init__(self):
... super().__init__(var=["z"],cond_var=["x", "h_prev"],name="q")
... self.fc_loc = torch.nn.Linear(x_dim + h_dim, z_dim)
... self.fc_scale = torch.nn.Linear(x_dim + h_dim, z_dim)
... def forward(self, x, h_prev):
... xh = torch.cat((x, h_prev), dim=-1)
... return {"loc": self.fc_loc(xh), "scale": F.softplus(self.fc_scale(xh))}
...
>>> # f(h|x,z,h_{prev}) (update h)
>>> class Recurrence(Deterministic):
... def __init__(self):
... super().__init__(var=["h"], cond_var=["x", "z", "h_prev"], name="f")
... self.rnncell = torch.nn.GRUCell(x_dim + z_dim, h_dim)
... def forward(self, x, z, h_prev):
... return {"h": self.rnncell(torch.cat((z, x), dim=-1), h_prev)}
>>>
>>> p = Decoder()
>>> q = Encoder()
>>> f = Recurrence()
>>>
>>> # Set the loss class
>>> step_loss_cls = p.log_prob().expectation(q * f).mean()
>>> print(step_loss_cls)
mean \left(\mathbb{E}_{q(z,h|x,h_{prev})} \left[\log p(x|z,h_{prev}) \right] \right)
>>> loss_cls = IterativeLoss(step_loss=step_loss_cls,
... series_var=["x"], update_value={"h": "h_prev"})
>>> print(loss_cls)
\sum_{t=0}^{t_{max} - 1} mean \left(\mathbb{E}_{q(z,h|x,h_{prev})} \left[\log p(x|z,h_{prev}) \right] \right)
>>>
>>> # Evaluate
>>> x_sample = torch.randn(30, 2, 128) # (timestep_size, batch_size, feature_size)
>>> h_init = torch.zeros(2, 32) # (batch_size, h_dim)
>>> loss = loss_cls.eval({"x": x_sample, "h_prev": h_init})
>>> print(loss) # doctest: +SKIP
tensor(-2826.0906, grad_fn=<AddBackward0>
"""
def __init__(self, step_loss, max_iter=None,
series_var=(), update_value={}, slice_step=None, timestep_var=()):
super().__init__()
self.step_loss = step_loss
self.max_iter = max_iter
self.update_value = update_value
self.timestep_var = timestep_var
if timestep_var:
self.timpstep_symbol = sympy.Symbol(self.timestep_var[0])
else:
self.timpstep_symbol = sympy.Symbol("t")
if not series_var and (max_iter is None):
raise ValueError()
self.slice_step = slice_step
if self.slice_step:
self.step_loss = self.step_loss.expectation(self.slice_step)
_input_var = []
_input_var += deepcopy(self.step_loss.input_var)
_input_var += series_var
_input_var += update_value.values()
self._input_var = sorted(set(_input_var), key=_input_var.index)
if timestep_var:
self._input_var.remove(timestep_var[0]) # delete a time-step variable from input_var
self.series_var = series_var
@property
def _symbol(self):
# TODO: naive implementation
dummy_loss = sympy.Symbol("dummy_loss")
if self.max_iter:
max_iter = self.max_iter
else:
max_iter = sympy.Symbol(sympy.latex(self.timpstep_symbol) + "_{max}")
_symbol = sympy.Sum(dummy_loss, (self.timpstep_symbol, 0, max_iter - 1))
_symbol = _symbol.subs({dummy_loss: self.step_loss._symbol})
return _symbol
def slice_step_fn(self, t, x):
return {k: v[t] for k, v in x.items()}
def forward(self, x_dict, **kwargs):
series_x_dict = get_dict_values(x_dict, self.series_var, return_dict=True)
updated_x_dict = get_dict_values(x_dict, list(self.update_value.values()), return_dict=True)
step_loss_sum = 0
# set max_iter
if self.max_iter:
max_iter = self.max_iter
else:
max_iter = len(series_x_dict[self.series_var[0]])
if "mask" in kwargs.keys():
mask = kwargs["mask"].float()
else:
mask = None
for t in range(max_iter):
if self.timestep_var:
x_dict.update({self.timestep_var[0]: t})
if not self.slice_step:
# update series inputs & use slice_step_fn
x_dict.update(self.slice_step_fn(t, series_x_dict))
# evaluate
step_loss, samples = self.step_loss.eval(x_dict, return_dict=True, return_all=False)
x_dict.update(samples)
if mask is not None:
step_loss *= mask[t]
step_loss_sum += step_loss
# update
x_dict = replace_dict_keys(x_dict, self.update_value)
loss = step_loss_sum
# Restore original values
x_dict.update(series_x_dict)
x_dict.update(updated_x_dict)
# TODO: x_dict contains no-updated variables.
return loss, x_dict
| 5,840
| 34.186747
| 113
|
py
|
pixyz
|
pixyz-main/pixyz/losses/wasserstein.py
|
from torch.nn.modules.distance import PairwiseDistance
import sympy
from .losses import Divergence
from ..utils import get_dict_values
class WassersteinDistance(Divergence):
r"""
Wasserstein distance.
.. math::
W(p, q) = \inf_{\Gamma \in \mathcal{P}(x_p\sim p, x_q\sim q)} \mathbb{E}_{(x_p, x_q) \sim \Gamma}[d(x_p, x_q)]
However, instead of the above true distance, this class computes the following one.
.. math::
W'(p, q) = \mathbb{E}_{x_p\sim p, x_q \sim q}[d(x_p, x_q)].
Here, :math:`W'` is the upper of :math:`W` (i.e., :math:`W\leq W'`), and these are equal when both :math:`p`
and :math:`q` are degenerate (deterministic) distributions.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="p")
>>> q = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="q")
>>> loss_cls = WassersteinDistance(p, q)
>>> print(loss_cls)
W^{upper} \left(p(z|x), q(z|x) \right)
>>> loss = loss_cls.eval({"x": torch.randn(1, 64)})
"""
def __init__(self, p, q, metric=PairwiseDistance(p=2)):
if set(p.var) != set(q.var):
raise ValueError("The two distribution variables must be the same.")
if len(p.var) != 1:
raise ValueError("A given distribution must have only one variable.")
super().__init__(p, q)
if len(p.input_var) > 0:
self.input_dist = p
elif len(q.input_var) > 0:
self.input_dist = q
else:
raise NotImplementedError()
self.metric = metric
@property
def _symbol(self):
return sympy.Symbol("W^{{upper}} \\left({}, {} \\right)".format(self.p.prob_text, self.q.prob_text))
def _get_batch_n(self, x_dict):
return get_dict_values(x_dict, self.input_dist.input_var[0])[0].shape[0]
def forward(self, x_dict, **kwargs):
batch_n = self._get_batch_n(x_dict)
# sample from distributions
p_x = get_dict_values(self.p.sample(x_dict, batch_n=batch_n, **kwargs), self.p.var)[0]
q_x = get_dict_values(self.q.sample(x_dict, batch_n=batch_n, **kwargs), self.q.var)[0]
if p_x.shape != q_x.shape:
raise ValueError("The two distribution variables must have the same shape.")
distance = self.metric(p_x, q_x)
return distance, {}
| 2,504
| 32.4
| 119
|
py
|
pixyz
|
pixyz-main/pixyz/losses/pdf.py
|
import sympy
import torch
from .losses import Loss
class LogProb(Loss):
r"""
The log probability density/mass function.
.. math::
\log p(x)
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = LogProb(p) # or p.log_prob()
>>> print(loss_cls)
\log p(x)
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor([12.9894, 15.5280])
"""
def __init__(self, p, sum_features=True, feature_dims=None):
input_var = p.var + p.cond_var
super().__init__(input_var=input_var)
self.sum_features = sum_features
self.feature_dims = feature_dims
self.p = p
@property
def _symbol(self):
return sympy.Symbol("\\log {}".format(self.p.prob_text))
def forward(self, x={}, **kwargs):
log_prob = self.p.get_log_prob(x, sum_features=self.sum_features, feature_dims=self.feature_dims, **kwargs)
return log_prob, {}
class Prob(LogProb):
r"""
The probability density/mass function.
.. math::
p(x) = \exp(\log p(x))
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = Prob(p) # or p.prob()
>>> print(loss_cls)
p(x)
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor([3.2903e-07, 5.5530e-07])
"""
@property
def _symbol(self):
return sympy.Symbol(self.p.prob_text)
def forward(self, x={}, **kwargs):
log_prob, x = super().forward(x, **kwargs)
return torch.exp(log_prob), {}
| 1,978
| 25.039474
| 115
|
py
|
pixyz
|
pixyz-main/pixyz/losses/elbo.py
|
import torch
def ELBO(p, q, sample_shape=torch.Size([1])):
r"""
The evidence lower bound (Monte Carlo approximation).
.. math::
\mathbb{E}_{q(z|x)}\left[\log \frac{p(x,z)}{q(z|x)}\right] \approx \frac{1}{L}\sum_{l=1}^L \log p(x, z_l),
\quad \text{where} \quad z_l \sim q(z|x).
Note:
This class is a special case of the :attr:`Expectation` class.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> q = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64]) # q(z|x)
>>> p = Normal(loc="z", scale=torch.tensor(1.), var=["x"], cond_var=["z"], features_shape=[64]) # p(x|z)
>>> loss_cls = ELBO(p,q)
>>> print(loss_cls)
\mathbb{E}_{p(z|x)} \left[\log p(x|z) - \log p(z|x) \right]
>>> loss = loss_cls.eval({"x": torch.randn(1, 64)})
"""
loss = (p.log_prob() - q.log_prob()).expectation(q, sample_shape=sample_shape)
return loss
| 985
| 33
| 114
|
py
|
pixyz
|
pixyz-main/pixyz/losses/__init__.py
|
from .divergences import (
KullbackLeibler,
)
from .entropy import (
Entropy,
CrossEntropy,
)
from .elbo import (
ELBO,
)
from .pdf import (
LogProb,
Prob,
)
from .adversarial_loss import (
AdversarialJensenShannon,
AdversarialKullbackLeibler,
AdversarialWassersteinDistance
)
from .losses import (
Parameter,
ValueLoss,
ConstantVar,
MinLoss,
MaxLoss,
Expectation,
REINFORCE,
DataParalleledLoss,
)
from .iteration import (
IterativeLoss,
)
from .mmd import (
MMD,
)
from .wasserstein import (
WassersteinDistance,
)
__all__ = [
'Parameter',
'ValueLoss',
'ConstantVar',
'MinLoss',
'MaxLoss',
'Entropy',
'CrossEntropy',
'Expectation',
'REINFORCE',
'DataParalleledLoss',
'KullbackLeibler',
'LogProb',
'Prob',
'ELBO',
'AdversarialJensenShannon',
'AdversarialKullbackLeibler',
'AdversarialWassersteinDistance',
'IterativeLoss',
'MMD',
'WassersteinDistance',
]
| 1,024
| 13.642857
| 37
|
py
|
pixyz
|
pixyz-main/pixyz/losses/entropy.py
|
import sympy
import torch
from pixyz.losses.losses import Loss
from pixyz.losses.divergences import KullbackLeibler
def Entropy(p, analytical=True, sample_shape=torch.Size([1])):
r"""
Entropy (Analytical or Monte Carlo approximation).
.. math::
H(p) &= -\mathbb{E}_{p(x)}[\log p(x)] \qquad \text{(analytical)}\\
&\approx -\frac{1}{L}\sum_{l=1}^L \log p(x_l), \quad \text{where} \quad x_l \sim p(x) \quad \text{(MC approximation)}.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], features_shape=[64])
>>> loss_cls = Entropy(p,analytical=True)
>>> print(loss_cls)
H \left[ {p(x)} \right]
>>> loss_cls.eval()
tensor([90.8121])
>>> loss_cls = Entropy(p,analytical=False,sample_shape=[10])
>>> print(loss_cls)
- \mathbb{E}_{p(x)} \left[\log p(x) \right]
>>> loss_cls.eval() # doctest: +SKIP
tensor([90.5991])
"""
if analytical:
loss = AnalyticalEntropy(p)
else:
loss = -p.log_prob().expectation(p, sample_shape=sample_shape)
return loss
class AnalyticalEntropy(Loss):
def __init__(self, p):
_input_var = p.input_var.copy()
super().__init__(_input_var)
self.p = p
@property
def _symbol(self):
p_text = "{" + self.p.prob_text + "}"
return sympy.Symbol("H \\left[ {} \\right]".format(p_text))
def forward(self, x_dict, **kwargs):
if not hasattr(self.p, 'distribution_torch_class'):
raise ValueError("Entropy of this distribution cannot be evaluated, "
"got %s." % self.p.distribution_name)
entropy = self.p.get_entropy(x_dict)
return entropy, {}
def CrossEntropy(p, q, analytical=False, sample_shape=torch.Size([1])):
r"""
Cross entropy, a.k.a., the negative expected value of log-likelihood (Monte Carlo approximation or Analytical).
.. math::
H(p,q) &= -\mathbb{E}_{p(x)}[\log q(x)] \qquad \text{(analytical)}\\
&\approx -\frac{1}{L}\sum_{l=1}^L \log q(x_l), \quad \text{where} \quad x_l \sim p(x) \quad \text{(MC approximation)}.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], features_shape=[64], name="p")
>>> q = Normal(loc=torch.tensor(1.), scale=torch.tensor(1.), var=["x"], features_shape=[64], name="q")
>>> loss_cls = CrossEntropy(p,q,analytical=True)
>>> print(loss_cls)
D_{KL} \left[p(x)||q(x) \right] + H \left[ {p(x)} \right]
>>> loss_cls.eval()
tensor([122.8121])
>>> loss_cls = CrossEntropy(p,q,analytical=False,sample_shape=[10])
>>> print(loss_cls)
- \mathbb{E}_{p(x)} \left[\log q(x) \right]
>>> loss_cls.eval() # doctest: +SKIP
tensor([123.2192])
"""
if analytical:
loss = Entropy(p) + KullbackLeibler(p, q)
else:
loss = -q.log_prob().expectation(p, sample_shape=sample_shape)
return loss
class StochasticReconstructionLoss(Loss):
def __init__(self, encoder, decoder, sample_shape=torch.Size([1])):
raise NotImplementedError("This function is obsolete."
" please use `-decoder.log_prob().expectation(encoder)` instead of it.")
| 3,375
| 33.44898
| 126
|
py
|
pixyz
|
pixyz-main/pixyz/autoregressions/__init__.py
| 0
| 0
| 0
|
py
|
|
pixyz
|
pixyz-main/tests/test_example_usage.py
|
# flake8: noqa: F841
from __future__ import print_function
# if you want to run all tests (contains below), type> pytest -m "performance or not performance"
import pytest
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
from pixyz.distributions import Deterministic
from pixyz.models import GAN
from pixyz.distributions import InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, Squeeze, Unsqueeze, Preprocess, ActNorm2d, ChannelConv
from pixyz.layers import ResNet
from pixyz.models import ML
from pixyz.distributions.mixture_distributions import MixtureModel
from pixyz.models import VI
from pixyz.utils import get_dict_values
from pixyz.distributions import Normal, Bernoulli, Categorical, ProductOfNormal
from pixyz.losses import KullbackLeibler
from pixyz.models import VAE
from pixyz.utils import print_latex
seed = 1
torch.manual_seed(seed)
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
batch_size = 2
epochs = 2
mock_mnist = [(torch.zeros(28 * 28), 0), (torch.ones(28 * 28), 1)]
mock_mnist_targets = torch.tensor([0, 1])
mock_cifar10 = [(torch.ones(3, 32, 32), 3), (torch.ones(3, 32, 32), 1)]
# # Conditional variational autoencoder (using the VAE class)
@pytest.mark.performance
def test_run_cvae():
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x,y)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x", "y"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim + y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x, y):
h = F.relu(self.fc1(torch.cat([x, y], 1)))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z,y)
class Generator(Bernoulli):
def __init__(self):
super(Generator, self).__init__(cond_var=["z", "y"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim + y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z, y):
h = F.relu(self.fc1(torch.cat([z, y], 1)))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# In[5]:
print(prior)
print_latex(prior)
# In[6]:
print(p)
print_latex(p)
# In[7]:
print(q)
print_latex(q)
# In[8]:
kl = KullbackLeibler(q, prior)
print(kl)
print_latex(kl)
# In[9]:
model = VAE(q, p, regularizer=kl, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x, y):
with torch.no_grad():
z = q.sample({"x": x, "y": y}, return_all=False)
z.update({"y": y})
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
recon = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return recon
def plot_image_from_latent(z, y):
with torch.no_grad():
sample = p.sample_mean({"z": z, "y": y}).view(-1, 1, 28, 28).cpu()
return sample
def plot_reconstrunction_changing_y(x, y):
y_change = torch.eye(10)[range(7)].to(device)
batch_dummy = torch.ones(x.size(0))[:, None].to(device)
recon_all = []
with torch.no_grad():
for _y in y_change:
z = q.sample({"x": x, "y": y}, return_all=False)
z.update({"y": batch_dummy * _y[None, :]})
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
recon_all.append(recon_batch)
recon_changing_y = torch.cat(recon_all)
recon_changing_y = torch.cat([x.view(-1, 1, 28, 28), recon_changing_y]).cpu()
return recon_changing_y
# In[13]:
# writer = SummaryWriter()
plot_number = 1
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
y_sample = torch.eye(10)[[plot_number] * 64].to(device)
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_latent(z_sample, y_sample)
recon_changing_y = plot_reconstrunction_changing_y(_x[:8], _y[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_change_y', recon_changing_y, epoch)
#
# writer.close()
# In[ ]:
# # Examples of creating and operating distributions in Pixyz
@pytest.mark.performance
def test_run_distributions():
# In[1]:
# In[2]:
# In[3]:
x_dim = 20
y_dim = 30
z_dim = 40
a_dim = 50
batch_n = 2
class P1(Normal):
def __init__(self):
super(P1, self).__init__(cond_var=["y", "a"], var=["x"], name="p_{1}")
self.fc1 = nn.Linear(y_dim, 10)
self.fc2 = nn.Linear(a_dim, 10)
self.fc21 = nn.Linear(10 + 10, 20)
self.fc22 = nn.Linear(10 + 10, 20)
def forward(self, a, y):
h1 = F.relu(self.fc1(y))
h2 = F.relu(self.fc2(a))
h12 = torch.cat([h1, h2], 1)
return {"loc": self.fc21(h12), "scale": F.softplus(self.fc22(h12))}
class P2(Normal):
def __init__(self):
super(P2, self).__init__(cond_var=["x", "y"], var=["z"], name="p_{2}")
self.fc3 = nn.Linear(x_dim, 30)
self.fc4 = nn.Linear(30 + y_dim, 400)
self.fc51 = nn.Linear(400, 20)
self.fc52 = nn.Linear(400, 20)
def forward(self, x, y):
h3 = F.relu(self.fc3(x))
h4 = F.relu(self.fc4(torch.cat([h3, y], 1)))
return {"loc": self.fc51(h4), "scale": F.softplus(self.fc52(h4))}
p4 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["a"], features_shape=[a_dim], name="p_{4}")
p6 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["y"], features_shape=[y_dim], name="p_{6}")
x = torch.from_numpy(np.random.random((batch_n, x_dim)).astype("float32"))
y = torch.from_numpy(np.random.random((batch_n, y_dim)).astype("float32"))
a = torch.from_numpy(np.random.random((batch_n, a_dim)).astype("float32"))
# In[4]:
p1 = P1()
p2 = P2()
p3 = p2 * p1
p3.name = "p_{3}"
p5 = p3 * p4
p5.name = "p_{5}"
p_all = p1 * p2 * p4 * p6
p_all.name = "p_{all}"
# In[5]:
print(p1)
print_latex(p1)
# In[6]:
print(p2)
print_latex(p2)
# In[7]:
print(p3)
print_latex(p3)
# In[8]:
print(p4)
print_latex(p4)
# In[9]:
print(p5)
print_latex(p5)
# In[10]:
print(p_all)
print_latex(p_all)
# In[11]:
for param in p3.parameters():
print(type(param.data), param.size())
# In[12]:
p1.sample({"a": a, "y": y}, return_all=False)
# In[13]:
p1.sample({"a": a, "y": y}, sample_shape=[5], return_all=False)
# In[14]:
p1.sample({"a": a, "y": y}, return_all=True)
# In[15]:
p1_log_prob = p1.log_prob()
print(p1_log_prob)
print_latex(p1_log_prob)
# In[16]:
outputs = p1.sample({"y": y, "a": a})
print(p1_log_prob.eval(outputs))
# In[17]:
outputs = p2.sample({"x": x, "y": y})
print(p2.log_prob().eval(outputs))
# In[18]:
outputs = p1.sample({"y": y, "a": a})
print(outputs)
# In[19]:
p2.sample(outputs)
# In[20]:
outputs = p3.sample({"y": y, "a": a}, batch_n=batch_n)
print(p3.log_prob().eval(outputs))
# In[21]:
outputs = p_all.sample(batch_n=batch_n)
print(p_all.log_prob().eval(outputs))
# In[ ]:
# # Generative adversarial network (using the GAN class)
@pytest.mark.performance
def test_run_gan():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
z_dim = 100
# generator model p(x|z)
class Generator(Deterministic):
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(z_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, x_dim),
nn.Sigmoid()
)
def forward(self, z):
x = self.model(z)
return {"x": x}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# generative model
p_g = Generator()
p = (p_g * prior).marginalize_var("z").to(device)
# In[5]:
print(p)
print_latex(p)
# In[6]:
# discriminator model p(t|x)
class Discriminator(Deterministic):
def __init__(self):
super(Discriminator, self).__init__(cond_var=["x"], var=["t"], name="d")
self.model = nn.Sequential(
nn.Linear(x_dim, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid()
)
def forward(self, x):
t = self.model(x)
return {"t": t}
d = Discriminator().to(device)
# In[7]:
print(d)
print_latex(d)
# In[8]:
model = GAN(p, d,
optimizer=optim.Adam, optimizer_params={"lr": 0.0002},
d_optimizer=optim.Adam, d_optimizer_params={"lr": 0.0002})
print(model)
print_latex(model)
# In[9]:
def train(epoch):
train_loss = 0
train_d_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss, d_loss = model.train({"x": x})
train_loss += loss
train_d_loss += d_loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
train_d_loss = train_d_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}, {:.4f}'.format(epoch, train_loss.item(), train_d_loss.item()))
return train_loss
# In[10]:
def test(epoch):
test_loss = 0
test_d_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss, d_loss = model.test({"x": x})
test_loss += loss
test_d_loss += d_loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
test_d_loss = test_d_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}, {:.4f}'.format(test_loss, test_d_loss.item()))
return test_loss
# In[11]:
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p_g.sample({"z": z_sample})["x"].view(-1, 1, 28, 28).cpu()
return sample
# In[12]:
# writer = SummaryWriter()
z_sample = torch.randn(64, z_dim).to(device)
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = _y.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
#
# writer.close()
# In[ ]:
# # Glow (CIFAR10)
@pytest.mark.performance
def test_run_glow():
# In[1]:
# In[2]:
root = '../data'
num_workers = 8
# transform_train = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
# transform_test = transforms.Compose([transforms.ToTensor()])
#
# train_loader = DataLoader(datasets.CIFAR10(root=root, train=True, download=True, transform=transform_train),
# batch_size=batch_size, shuffle=True, num_workers=num_workers)
#
# test_loader = DataLoader(datasets.CIFAR10(root=root, train=False, download=True, transform=transform_test),
# batch_size=batch_size, shuffle=False, num_workers=num_workers)
train_loader = DataLoader(mock_cifar10, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_loader = DataLoader(mock_cifar10, batch_size=batch_size, shuffle=False, num_workers=num_workers)
# In[3]:
# In[4]:
in_channels = 3
mid_channels = 64
num_scales = 2
input_dim = 32
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[in_channels, input_dim, input_dim], name="p_prior")
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_channels, mid_channels):
super().__init__()
self.resnet = ResNet(in_channels=in_channels, mid_channels=mid_channels, out_channels=in_channels * 2,
num_blocks=8, kernel_size=3, padding=1,
double_after_norm=True)
def forward(self, x):
s_t = self.resnet(x)
log_s, t = torch.chunk(s_t, 2, dim=1)
log_s = torch.tanh(log_s)
return log_s, t
# In[7]:
flow_list = []
flow_list.append(Preprocess())
# Squeeze -> 3x coupling (channel-wise)
flow_list.append(Squeeze())
for i in range(3):
flow_list.append(ActNorm2d(in_channels * 4))
flow_list.append(ChannelConv(in_channels * 4))
flow_list.append(AffineCoupling(in_features=in_channels * 4, mask_type="channel_wise",
scale_translate_net=ScaleTranslateNet(in_channels * 4, mid_channels * 2),
inverse_mask=False))
flow_list.append(Unsqueeze())
f = FlowList(flow_list)
# In[9]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print(p)
print_latex(p)
# In[10]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.inverse(z_sample).cpu()
return sample
def plot_reconstrunction(x):
with torch.no_grad():
z = p.forward(x, compute_jacobian=False)
recon_batch = p.inverse(z)
comparison = torch.cat([x.view(-1, 3, 32, 32), recon_batch]).cpu()
return comparison
# In[13]:
# writer = SummaryWriter()
z_sample = torch.randn(64, 3, 32, 32).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# # Gaussian Mixture Model
@pytest.mark.performance
def test_run_gmm():
# In[1]:
# import matplotlib.pyplot as plt
# from matplotlib import cm
# from mpl_toolkits.mplot3d import Axes3D
# ### toy dataset
# In[2]:
# https://angusturner.github.io/generative_models/2017/11/03/pytorch-gaussian-mixture-model.html
def sample(mu, var, nb_samples=500):
"""
Return a tensor of (nb_samples, features), sampled
from the parameterized gaussian.
:param mu: torch.Tensor of the means
:param var: torch.Tensor of variances (NOTE: zero covars.)
"""
out = []
for i in range(nb_samples):
out += [
torch.normal(mu, var.sqrt())
]
return torch.stack(out, dim=0)
# generate some clusters
cluster1 = sample(
torch.Tensor([1.5, 2.5]),
torch.Tensor([1.2, .8]),
nb_samples=150
)
cluster2 = sample(
torch.Tensor([7.5, 7.5]),
torch.Tensor([.75, .5]),
nb_samples=50
)
cluster3 = sample(
torch.Tensor([8, 1.5]),
torch.Tensor([.6, .8]),
nb_samples=100
)
def plot_2d_sample(sample_dict):
x = sample_dict["x"][:, 0].data.numpy()
y = sample_dict["x"][:, 1].data.numpy()
# plt.plot(x, y, 'gx')
# plt.show()
# In[3]:
# create the dummy dataset, by combining the clusters.
samples = torch.cat([cluster1, cluster2, cluster3])
samples = (samples - samples.mean(dim=0)) / samples.std(dim=0)
samples_dict = {"x": samples}
plot_2d_sample(samples_dict)
# ## GMM
# In[4]:
z_dim = 3 # the number of mixture
x_dim = 2
distributions = []
for i in range(z_dim):
loc = torch.randn(x_dim)
scale = torch.empty(x_dim).fill_(0.6)
distributions.append(Normal(loc=loc, scale=scale, var=["x"], name="p_%d" % i))
probs = torch.empty(z_dim).fill_(1. / z_dim)
prior = Categorical(probs=probs, var=["z"], name="p_{prior}")
# In[5]:
p = MixtureModel(distributions=distributions, prior=prior)
print(p)
print_latex(p)
# In[6]:
post = p.posterior()
print(post)
print_latex(post)
# In[7]:
def get_density(N=200, x_range=(-5, 5), y_range=(-5, 5)):
x = np.linspace(*x_range, N)
y = np.linspace(*y_range, N)
x, y = np.meshgrid(x, y)
# get the design matrix
points = np.concatenate([x.reshape(-1, 1), y.reshape(-1, 1)], axis=1)
points = torch.from_numpy(points).float()
pdf = p.prob().eval({"x": points}).data.numpy().reshape([N, N])
return x, y, pdf
# In[8]:
# def plot_density_3d(x, y, loglike):
# fig = plt.figure(figsize=(10, 10))
# ax = fig.gca(projection='3d')
# ax.plot_surface(x, y, loglike, rstride=3, cstride=3, linewidth=1, antialiased=True,
# cmap=cm.inferno)
# cset = ax.contourf(x, y, loglike, zdir='z', offset=-0.15, cmap=cm.inferno)
#
# # adjust the limits, ticks and view angle
# ax.set_zlim(-0.15, 0.2)
# ax.set_zticks(np.linspace(0, 0.2, 5))
# ax.view_init(27, -21)
# plt.show()
# In[9]:
def plot_density_2d(x, y, pdf):
# fig = plt.figure(figsize=(5, 5))
# plt.plot(samples_dict["x"][:, 0].data.numpy(), samples_dict["x"][:, 1].data.numpy(), 'gx')
#
# for d in distributions:
# plt.scatter(d.loc[0, 0], d.loc[0, 1], c='r', marker='o')
#
# cs = plt.contour(x, y, pdf, 10, colors='k', linewidths=2)
# plt.show()
pass
# In[10]:
eps = 1e-6
min_scale = 1e-6
# plot_density_3d(*get_density())
plot_density_2d(*get_density())
print("Epoch: {}, log-likelihood: {}".format(0, p.log_prob().mean().eval(samples_dict)))
for epoch in range(20):
# E-step
posterior = post.prob().eval(samples_dict)
# M-step
N_k = posterior.sum(dim=1) # (n_mix,)
# update probs
probs = N_k / N_k.sum() # (n_mix,)
prior.probs[0] = probs
# update loc & scale
loc = (posterior[:, None] @ samples[None]).squeeze(1) # (n_mix, n_dim)
loc /= (N_k[:, None] + eps)
cov = (samples[None, :, :] - loc[:, None, :]) ** 2 # Covariances are set to 0.
var = (posterior[:, None, :] @ cov).squeeze(1) # (n_mix, n_dim)
var /= (N_k[:, None] + eps)
scale = var.sqrt()
for i, d in enumerate(distributions):
d.loc[0] = loc[i]
d.scale[0] = scale[i]
# plot_density_3d(*get_density())
plot_density_2d(*get_density())
print("Epoch: {}, log-likelihood: {}".format(epoch + 1, p.log_prob().mean().eval({"x": samples}).mean()))
# In[11]:
psudo_sample_dict = p.sample(batch_n=200)
plot_2d_sample(samples_dict)
# In[ ]:
# # Variational inference on a hierarchical latent model
@pytest.mark.performance
def test_run_hvi():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
a_dim = 64
z_dim = 32
# inference models
class Q1(Normal):
def __init__(self):
super(Q1, self).__init__(cond_var=["x"], var=["a"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, a_dim)
self.fc32 = nn.Linear(512, a_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
class Q2(Normal):
def __init__(self):
super(Q2, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
q1 = Q1().to(device)
q2 = Q2().to(device)
q = q1 * q2
q.name = "q"
# generative models
class P2(Normal):
def __init__(self):
super(P2, self).__init__(cond_var=["z"], var=["a"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, a_dim)
self.fc32 = nn.Linear(512, a_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
class P3(Bernoulli):
def __init__(self):
super(P3, self).__init__(cond_var=["a"], var=["x"], name="p")
self.fc1 = nn.Linear(a_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, a):
h = F.relu(self.fc1(a))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p2 = P2().to(device)
p3 = P3().to(device)
p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
_p = p2 * p3
p = _p * p1
# In[5]:
print(p)
print_latex(p)
# In[6]:
print(_p)
print_latex(_p)
# In[7]:
print(q)
print_latex(q)
# In[8]:
model = VI(p, q, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[9]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[10]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[11]:
def plot_reconstrunction(x):
with torch.no_grad():
z = q.sample({"x": x})
z = get_dict_values(z, _p.cond_var, return_dict=True) # select latent variables
recon_batch = _p.sample(z)["x"].view(-1, 1, 28, 28) # TODO: it should be sample_mean
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = _p.sample({"z": z_sample})["x"].view(-1, 1, 28, 28).cpu() # TODO: it should be sample_mean
return sample
# In[12]:
# writer = SummaryWriter()
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# # JMVAE with a PoE encoder (using the VAE class)
# * JMVAE: Joint Multimodal Learning with Deep Generative Models
# * The PoE encoder is originally proposed in "Multimodal Generative Models for Scalable Weakly-Supervised Learning"
@pytest.mark.performance
def test_run_jmvae_poe():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x)
class InferenceX(Normal):
def __init__(self):
super(InferenceX, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|y)
class InferenceY(Normal):
def __init__(self):
super(InferenceY, self).__init__(cond_var=["y"], var=["z"], name="q")
self.fc1 = nn.Linear(y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, y):
h = F.relu(self.fc1(y))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class GeneratorX(Bernoulli):
def __init__(self):
super(GeneratorX, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
# generative model p(y|z)
class GeneratorY(Categorical):
def __init__(self):
super(GeneratorY, self).__init__(cond_var=["z"], var=["y"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": F.softmax(self.fc3(h), dim=1)}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_x = GeneratorX().to(device)
p_y = GeneratorY().to(device)
p = p_x * p_y
q_x = InferenceX().to(device)
q_y = InferenceY().to(device)
q = ProductOfNormal([q_x, q_y], name="q").to(device)
# In[5]:
print(q)
print_latex(q)
# In[6]:
print(p)
print_latex(p)
# In[7]:
kl = KullbackLeibler(q, prior)
kl_x = KullbackLeibler(q, q_x)
kl_y = KullbackLeibler(q, q_y)
regularizer = kl + kl_x + kl_y
print(regularizer)
print_latex(regularizer)
# In[8]:
model = VAE(q, p, other_distributions=[q_x, q_y],
regularizer=regularizer, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[9]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[10]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[11]:
def plot_reconstrunction_missing(x):
with torch.no_grad():
z = q_x.sample({"x": x}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_label(x, y):
with torch.no_grad():
x_all = [x.view(-1, 1, 28, 28)]
for i in range(7):
z = q_y.sample({"y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
x_all.append(recon_batch)
comparison = torch.cat(x_all).cpu()
return comparison
def plot_reconstrunction(x, y):
with torch.no_grad():
z = q.sample({"x": x, "y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
# In[12]:
# writer = SummaryWriter()
plot_number = 1
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_label(_x[:8], _y[:8])
recon_missing = plot_reconstrunction_missing(_x[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_label', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_missing', recon_missing, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Joint multimodal variational autoencoder (JMVAE, using the VAE class)
# Original paper: Joint Multimodal Learning with Deep Generative Models (https://arxiv.org/abs/1611.01891 )
@pytest.mark.performance
def test_run_jmvae():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x,y)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x", "y"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim + y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x, y):
h = F.relu(self.fc1(torch.cat([x, y], 1)))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|x)
class InferenceX(Normal):
def __init__(self):
super(InferenceX, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|y)
class InferenceY(Normal):
def __init__(self):
super(InferenceY, self).__init__(cond_var=["y"], var=["z"], name="q")
self.fc1 = nn.Linear(y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, y):
h = F.relu(self.fc1(y))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class GeneratorX(Bernoulli):
def __init__(self):
super(GeneratorX, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
# generative model p(y|z)
class GeneratorY(Categorical):
def __init__(self):
super(GeneratorY, self).__init__(cond_var=["z"], var=["y"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": F.softmax(self.fc3(h), dim=1)}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_x = GeneratorX().to(device)
p_y = GeneratorY().to(device)
q = Inference().to(device)
q_x = InferenceX().to(device)
q_y = InferenceY().to(device)
p = p_x * p_y
# In[5]:
print(p)
print_latex(p)
# In[6]:
kl = KullbackLeibler(q, prior)
kl_x = KullbackLeibler(q, q_x)
kl_y = KullbackLeibler(q, q_y)
regularizer = kl + kl_x + kl_y
print(regularizer)
print_latex(regularizer)
# In[7]:
model = VAE(q, p, other_distributions=[q_x, q_y],
regularizer=regularizer, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[8]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[9]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[10]:
def plot_reconstrunction_missing(x):
with torch.no_grad():
z = q_x.sample({"x": x}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_label(x, y):
with torch.no_grad():
x_all = [x.view(-1, 1, 28, 28)]
for i in range(7):
z = q_y.sample({"y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
x_all.append(recon_batch)
comparison = torch.cat(x_all).cpu()
return comparison
def plot_reconstrunction(x, y):
with torch.no_grad():
z = q.sample({"x": x, "y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
# In[11]:
# writer = SummaryWriter()
plot_number = 1
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_label(_x[:8], _y[:8])
recon_missing = plot_reconstrunction_missing(_x[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_label', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_missing', recon_missing, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Semi-supervised learning with M2 model
@pytest.mark.performance
def test_run_m2():
# In[1]:
# In[2]:
# https://github.com/wohlert/semi-supervised-pytorch/blob/master/examples/notebooks/datautils.py
from functools import reduce
from operator import __or__
from torch.utils.data.sampler import SubsetRandomSampler
# from torchvision.datasets import MNIST
import numpy as np
# labels_per_class = 10
# n_labels = 10
labels_per_class = 1
n_labels = 2
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
#
# mnist_train = MNIST(root=root, train=True, download=True, transform=transform)
# mnist_valid = MNIST(root=root, train=False, transform=transform)
mnist_train = mock_mnist
mnist_valid = mock_mnist
def get_sampler(labels, n=None):
# Only choose digits in n_labels
(indices,) = np.where(reduce(__or__, [labels == i for i in np.arange(n_labels)]))
# Ensure uniform distribution of labels
np.random.shuffle(indices)
indices = np.hstack([list(filter(lambda idx: labels[idx] == i, indices))[:n] for i in range(n_labels)])
indices = torch.from_numpy(indices)
sampler = SubsetRandomSampler(indices)
return sampler
# Dataloaders for MNIST
# kwargs = {'num_workers': 1, 'pin_memory': True}
# labelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size,
# sampler=get_sampler(mnist_train.targets.numpy(), labels_per_class),
# **kwargs)
# unlabelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size,
# sampler=get_sampler(mnist_train.targets.numpy()), **kwargs)
# validation = torch.utils.data.DataLoader(mnist_valid, batch_size=batch_size,
# sampler=get_sampler(mnist_valid.targets.numpy()), **kwargs)
kwargs = {'num_workers': 1, 'pin_memory': True}
labelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size,
sampler=get_sampler(mock_mnist_targets.numpy(), labels_per_class),
**kwargs)
unlabelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size,
sampler=get_sampler(mock_mnist_targets.numpy()), **kwargs)
validation = torch.utils.data.DataLoader(mnist_valid, batch_size=batch_size,
sampler=get_sampler(mock_mnist_targets.numpy()), **kwargs)
# In[3]:
from pixyz.distributions import Normal, Bernoulli, RelaxedCategorical, Categorical
from pixyz.models import Model
from pixyz.losses import ELBO
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x,y)
class Inference(Normal):
def __init__(self):
super().__init__(cond_var=["x", "y"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim + y_dim, 512)
self.fc21 = nn.Linear(512, z_dim)
self.fc22 = nn.Linear(512, z_dim)
def forward(self, x, y):
h = F.relu(self.fc1(torch.cat([x, y], 1)))
return {"loc": self.fc21(h), "scale": F.softplus(self.fc22(h))}
# generative model p(x|z,y)
class Generator(Bernoulli):
def __init__(self):
super().__init__(cond_var=["z", "y"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim + y_dim, 512)
self.fc2 = nn.Linear(512, x_dim)
def forward(self, z, y):
h = F.relu(self.fc1(torch.cat([z, y], 1)))
return {"probs": torch.sigmoid(self.fc2(h))}
# classifier p(y|x)
class Classifier(RelaxedCategorical):
def __init__(self):
super(Classifier, self).__init__(cond_var=["x"], var=["y"], name="p")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, y_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.softmax(self.fc2(h), dim=1)
return {"probs": h}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# distributions for supervised learning
p = Generator().to(device)
q = Inference().to(device)
f = Classifier().to(device)
p_joint = p * prior
# In[5]:
print(p_joint)
print_latex(p_joint)
# In[6]:
print(q)
print_latex(q)
# In[7]:
print(f)
print_latex(f)
# In[8]:
# distributions for unsupervised learning
_q_u = q.replace_var(x="x_u", y="y_u")
p_u = p.replace_var(x="x_u", y="y_u")
f_u = f.replace_var(x="x_u", y="y_u")
q_u = _q_u * f_u
p_joint_u = p_u * prior
p_joint_u.to(device)
q_u.to(device)
f_u.to(device)
print(p_joint_u)
print_latex(p_joint_u)
# In[9]:
print(q_u)
print_latex(q_u)
# In[10]:
print(f_u)
print_latex(f_u)
# In[11]:
elbo_u = ELBO(p_joint_u, q_u)
elbo = ELBO(p_joint, q)
nll = -f.log_prob() # or -LogProb(f)
rate = 1 * (len(unlabelled) + len(labelled)) / len(labelled)
loss_cls = -elbo_u.mean() - elbo.mean() + (rate * nll).mean()
print(loss_cls)
print_latex(loss_cls)
# In[12]:
model = Model(loss_cls, test_loss=nll.mean(),
distributions=[p, q, f], optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[13]:
def train(epoch):
train_loss = 0
for x_u, y_u in unlabelled:
x, y = iter(labelled).next()
x = x.to(device)
y = torch.eye(10)[y].to(device)
x_u = x_u.to(device)
loss = model.train({"x": x, "y": y, "x_u": x_u})
train_loss += loss
train_loss = train_loss * unlabelled.batch_size / len(unlabelled.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[14]:
def test(epoch):
test_loss = 0
correct = 0
total = 0
for x, y in validation:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
pred_y = f.sample_mean({"x": x})
total += y.size(0)
correct += (pred_y.argmax(dim=1) == y.argmax(dim=1)).sum().item()
test_loss = test_loss * validation.batch_size / len(validation.dataset)
test_accuracy = 100 * correct / total
print('Test loss: {:.4f}, Test accuracy: {:.4f}'.format(test_loss, test_accuracy))
return test_loss, test_accuracy
# In[15]:
# writer = SummaryWriter()
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss, test_accuracy = test(epoch)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
# writer.add_scalar('test_accuracy', test_accuracy, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Maximum likelihood estimation (using the ML class)
@pytest.mark.performance
def test_run_maximum_likelihood():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Categorical
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
y_dim = 10
# classifier p(y|x)
class Classifier(Categorical):
def __init__(self):
super(Classifier, self).__init__(cond_var=["x"], var=["y"])
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
h = F.softmax(self.fc3(h), dim=1)
return {"probs": h}
p = Classifier().to(device)
# In[5]:
print(p)
print_latex(p)
# In[6]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[7]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[8]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[9]:
# writer = SummaryWriter()
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # MMD-VAE (using the Model class)
@pytest.mark.performance
def test_run_mmd_vae():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, Bernoulli, EmpiricalDistribution
from pixyz.losses import CrossEntropy, MMD
from pixyz.models import Model
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_data = EmpiricalDistribution(["x"]).to(device)
q_mg = (q * p_data).marginalize_var("x")
q_mg.name = "q"
# In[5]:
print(p)
print_latex(p)
# In[6]:
print(q_mg)
print_latex(q_mg)
# In[7]:
loss_cls = CrossEntropy(q, p).mean() + MMD(q_mg, prior, kernel="gaussian", sigma_sqr=z_dim / 2.)
print(loss_cls)
print_latex(loss_cls)
# In[8]:
model = Model(loss=loss_cls, distributions=[p, q, q_mg], optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[9]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[10]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[11]:
def plot_reconstrunction(x):
with torch.no_grad():
z = q.sample({"x": x}, return_all=False)
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[12]:
# writer = SummaryWriter()
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # MVAE
@pytest.mark.performance
def test_run_mvae():
# * Original paper: Multimodal Generative Models for Scalable Weakly-Supervised Learning (https://papers.nips.cc/paper/7801-multimodal-generative-models-for-scalable-weakly-supervised-learning.pdf)
# * Original code: https://github.com/mhw32/multimodal-vae-public
#
# ### MVAE summary
# Multimodal variational autoencoder(MVAE) uses a product-of-experts inferece network and a sub-sampled training paradigm to solve the multi-modal inferece problem.
# - Product-of-experts
# In the multimodal setting we assume the N modalities, $x_{1}, x_{2}, ..., x_{N}$, are conditionally independent given the common latent variable, z. That is we assume a generative model of the form $p_{\theta}(x_{1}, x_{2}, ..., x_{N}, z) = p(z)p_{\theta}(x_{1}|z)p_{\theta}(x_{2}|z)$・・・$p_{\theta}(x_{N}|z)$. The conditional independence assumptions in the generative model imply a relation among joint- and simgle-modality posteriors. That is, the joint posterior is a procuct of individual posteriors, with an additional quotient by the prior.
#
# - Sub-sampled training
# MVAE sub-sample which ELBO terms to optimize for every gradient step for capturing the relationships between modalities and training individual inference networks.
# In[1]:
# In[2]:
# MNIST
# treat labels as a second modality
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.utils import print_latex
# ## Define probability distributions
# ### In the original paper
# Modalities: $x_{1}, x_{2}, ..., x_{N}$
# Generative model:
#
# $p_{\theta}\left(x_{1}, x_{2}, \ldots, x_{N}, z\right)=p(z) p_{\theta}\left(x_{1} | z\right) p_{\theta}\left(x_{2} | z\right) \cdots p_{\theta}\left(x_{N} | z\right)$
#
# Inference:
#
# $p\left(z | x_{1}, \ldots, x_{N}\right) \propto \frac{\prod_{i=1}^{N} p\left(z | x_{i}\right)}{\prod_{i=1}^{N-1} p(z)} \approx \frac{\prod_{i=1}^{N}\left[\tilde{q}\left(z | x_{i}\right) p(z)\right]}{\prod_{i=1}^{N-1} p(z)}=p(z) \prod_{i=1}^{N} \tilde{q}\left(z | x_{i}\right)$
#
# ### MNIST settings
# Modalities:
# - x for image modality
# - y for label modality
#
# Prior: $p(z) = \cal N(z; \mu=0, \sigma^2=1)$
# Generators:
# $p_{\theta}(x|z) = \cal B(x; \lambda = g_x(z))$ for image modality
# $p_{\theta}(y|z) = \cal Cat(y; \lambda = g_y(z))$ for label modality
# $p_{\theta}\left(x, y, z\right)=p(z) p_{\theta}(x| z) p_{\theta}(y | z)$
#
# Inferences:
# $q_{\phi}(z|x) = \cal N(z; \mu=fx_\mu(x), \sigma^2=fx_{\sigma^2}(x))$ for image modality
# $q_{\phi}(z|y) = \cal N(z; \mu=fy_\mu(y), \sigma^2=fy_{\sigma^2}(y))$ for label modality
# $p(z)q_{\phi}(z|x)q_{\phi}(z|y)$
#
# In[4]:
from pixyz.distributions import Normal, Bernoulli, Categorical, ProductOfNormal
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x) for image modality
class InferenceX(Normal):
def __init__(self):
super(InferenceX, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|y) for label modality
class InferenceY(Normal):
def __init__(self):
super(InferenceY, self).__init__(cond_var=["y"], var=["z"], name="q")
self.fc1 = nn.Linear(y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, y):
h = F.relu(self.fc1(y))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class GeneratorX(Bernoulli):
def __init__(self):
super(GeneratorX, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
# generative model p(y|z)
class GeneratorY(Categorical):
def __init__(self):
super(GeneratorY, self).__init__(cond_var=["z"], var=["y"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": F.softmax(self.fc3(h), dim=1)}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_x = GeneratorX().to(device)
p_y = GeneratorY().to(device)
p = p_x * p_y
q_x = InferenceX().to(device)
q_y = InferenceY().to(device)
# equation (4) in the paper
# "we can use a product of experts (PoE), including a “prior expert”, as the approximating distribution for the joint-posterior"
# Pixyz docs: https://docs.pixyz.io/en/latest/distributions.html#pixyz.distributions.ProductOfNormal
q = ProductOfNormal([q_x, q_y], name="q").to(device)
# In[5]:
print(q)
print_latex(q)
# In[6]:
print(p)
print_latex(p)
# ## Define Loss function
# $\cal L = \mathrm{ELBO}\left(x_{1}, \ldots, x_{N}\right)+\sum_{i=1}^{N} \mathrm{ELBO}\left(x_{i}\right)+\sum_{j=1}^{k} \mathrm{ELBO}\left(X_{j}\right)$
# In[7]:
from pixyz.losses import KullbackLeibler
from pixyz.losses import LogProb
from pixyz.losses import Expectation as E
# In[8]:
ELBO = -E(q, LogProb(p)) + KullbackLeibler(q, prior)
ELBO_x = -E(q_x, LogProb(p_x)) + KullbackLeibler(q_x, prior)
ELBO_y = -E(q_y, LogProb(p_y)) + KullbackLeibler(q_y, prior)
loss = ELBO.mean() + ELBO_x.mean() + ELBO_y.mean()
print_latex(loss) # Note: Terms in the printed loss may be reordered
# ## Define MVAE model using Model Class
# In[9]:
from pixyz.models import Model
model = Model(loss=loss, distributions=[p_x, p_y, q_x, q_y],
optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# ## Define Train and Test loop using model
# In[10]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# ## Reconstruction and generation
# In[12]:
def plot_reconstrunction_missing_label_modality(x):
with torch.no_grad():
# infer from x (image modality) only
z = q_x.sample({"x": x}, return_all=False)
# generate image from latent variable
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_label(x, y):
with torch.no_grad():
x_all = [x.view(-1, 1, 28, 28)]
for i in range(7):
# infer from y (label modality) only
z = q_y.sample({"y": y}, return_all=False)
# generate image from latent variable
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
x_all.append(recon_batch)
comparison = torch.cat(x_all).cpu()
return comparison
def plot_reconstrunction(x, y):
with torch.no_grad():
# infer from x and y
z = q.sample({"x": x, "y": y}, return_all=False)
# generate image from latent variable
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
# In[13]:
# for visualising in TensorBoard
# writer = SummaryWriter()
plot_number = 1
# set-aside observation for watching generative model improvement
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_label(_x[:8], _y[:8])
recon_missing = plot_reconstrunction_missing_label_modality(_x[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_label', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_missing_label', recon_missing, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # A toy example of variational inference with normalizing flow (using the VI class)
@pytest.mark.performance
def test_run_normalizing_flow_toy():
# In[1]:
# In[2]:
from pixyz.distributions import CustomProb, Normal, TransformedDistribution
from pixyz.models import VI
from pixyz.flows import PlanarFlow, FlowList
from pixyz.utils import print_latex
# In[3]:
# def plot_samples(points):
# X_LIMS = (-4, 4)
# Y_LIMS = (-4, 4)
#
# fig = plt.figure(figsize=(4, 4))
# ax = fig.add_subplot(111)
# ax.scatter(points[:, 0], points[:, 1], alpha=0.7, s=25)
# ax.set_xlim(*X_LIMS)
# ax.set_ylim(*Y_LIMS)
# ax.set_xlabel("p(z)")
#
# plt.show()
# In[4]:
import torch
x_dim = 2
def log_prob(z):
z1, z2 = torch.chunk(z, chunks=2, dim=1)
norm = torch.sqrt(z1 ** 2 + z2 ** 2)
exp1 = torch.exp(-0.5 * ((z1 - 2) / 0.6) ** 2)
exp2 = torch.exp(-0.5 * ((z1 + 2) / 0.6) ** 2)
u = 0.5 * ((norm - 2) / 0.4) ** 2 - torch.log(exp1 + exp2)
return -u
p = CustomProb(log_prob, var=["z"])
# In[5]:
# def plot_density(p):
# X_LIMS = (-4, 4)
# Y_LIMS = (-4, 4)
#
# x1 = np.linspace(*X_LIMS, 300)
# x2 = np.linspace(*Y_LIMS, 300)
# x1, x2 = np.meshgrid(x1, x2)
# shape = x1.shape
# x1 = x1.ravel()
# x2 = x2.ravel()
#
# z = np.c_[x1, x2]
# z = torch.FloatTensor(z)
#
# density_values = p.prob().eval({"z": z}).data.numpy().reshape(shape)
# plt.imshow(density_values, cmap='jet')
# plt.show()
# plot_density(p)
# In[6]:
# prior
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["x"], features_shape=[x_dim], name="prior").to(device)
# In[7]:
# flow
f = FlowList([PlanarFlow(x_dim) for _ in range(32)])
# In[8]:
# transformed distribution (x -> f -> z)
q = TransformedDistribution(prior, f, var=["z"], name="q").to(device)
print(q)
print_latex(q)
# In[9]:
model = VI(p, q, optimizer=optim.Adam, optimizer_params={"lr": 1e-2})
print(model)
print_latex(model)
# In[10]:
for epoch in range(epochs):
loss = model.train(batch_size=batch_size)
if epoch % 100 == 0:
print('Epoch: {} Test loss: {:.4f}'.format(epoch, loss))
loss = model.test(batch_n=batch_size)
samples = q.sample(batch_n=1000)
# plot_samples(samples["z"].cpu().data.numpy())
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Real NVP (CIFAR10)
@pytest.mark.performance
def test_run_real_nvp_cifar():
# In[1]:
# In[2]:
# root = '../data'
# num_workers = 8
#
# transform_train = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
# transform_test = transforms.Compose([transforms.ToTensor()])
#
# train_loader = DataLoader(datasets.CIFAR10(root=root, train=True, download=True, transform=transform_train),
# batch_size=batch_size, shuffle=True, num_workers=num_workers)
#
# test_loader = DataLoader(datasets.CIFAR10(root=root, train=False, download=True, transform=transform_test),
# batch_size=batch_size, shuffle=False, num_workers=num_workers)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_cifar10, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_cifar10, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, Squeeze, Unsqueeze, Preprocess, Flow
from pixyz.layers import ResNet
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
in_channels = 3
mid_channels = 64
num_scales = 2
input_dim = 32
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[in_channels, input_dim, input_dim], name="p_prior")
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_channels, mid_channels):
super().__init__()
self.resnet = ResNet(in_channels=in_channels, mid_channels=mid_channels, out_channels=in_channels * 2,
num_blocks=8, kernel_size=3, padding=1,
double_after_norm=True)
def forward(self, x):
s_t = self.resnet(x)
log_s, t = torch.chunk(s_t, 2, dim=1)
log_s = torch.tanh(log_s)
return log_s, t
# In[7]:
flow_list = [Preprocess()]
# Coupling_Layer(checkboard) x3
for i in range(3):
flow_list.append(AffineCoupling(in_features=in_channels, mask_type="checkerboard",
scale_translate_net=ScaleTranslateNet(in_channels, mid_channels),
inverse_mask=(i % 2 != 0)))
# Squeeze -> 3x coupling (channel-wise)
flow_list.append(Squeeze())
for i in range(3):
flow_list.append(AffineCoupling(in_features=in_channels * 4, mask_type="channel_wise",
scale_translate_net=ScaleTranslateNet(in_channels * 4, mid_channels * 2),
inverse_mask=(i % 2 != 0)))
flow_list.append(Unsqueeze())
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.inverse(z_sample).cpu()
return sample
def plot_reconstrunction(x):
with torch.no_grad():
z = p.forward(x, compute_jacobian=False)
recon_batch = p.inverse(z)
comparison = torch.cat([x.view(-1, 3, 32, 32), recon_batch]).cpu()
return comparison
# In[13]:
# writer = SummaryWriter()
z_sample = torch.randn(64, 3, 32, 32).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Real NVP (CIFAR10)
@pytest.mark.performance
def test_run_real_nvp_cond():
# In[1]:
# In[2]:
# root = '../data'
# num_workers = 8
#
# transform_train = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
# transform_test = transforms.Compose([transforms.ToTensor()])
#
# train_loader = DataLoader(datasets.CIFAR10(root=root, train=True, download=True, transform=transform_train),
# batch_size=batch_size, shuffle=True, num_workers=num_workers)
#
# test_loader = DataLoader(datasets.CIFAR10(root=root, train=False, download=True, transform=transform_test),
# batch_size=batch_size, shuffle=False, num_workers=num_workers)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_cifar10, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_cifar10, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, Squeeze, Unsqueeze, Preprocess, Flow
from pixyz.layers import ResNet
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
in_channels = 3
mid_channels = 64
num_scales = 2
input_dim = 32
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[in_channels, input_dim, input_dim], name="p_prior")
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_channels, mid_channels):
super().__init__()
self.resnet = ResNet(in_channels=in_channels, mid_channels=mid_channels, out_channels=in_channels * 2,
num_blocks=8, kernel_size=3, padding=1,
double_after_norm=True)
def forward(self, x):
s_t = self.resnet(x)
log_s, t = torch.chunk(s_t, 2, dim=1)
log_s = torch.tanh(log_s)
return log_s, t
# In[7]:
flow_list = [Preprocess()]
# Coupling_Layer(checkboard) x3
for i in range(3):
flow_list.append(AffineCoupling(in_features=in_channels, mask_type="checkerboard",
scale_translate_net=ScaleTranslateNet(in_channels, mid_channels),
inverse_mask=(i % 2 != 0)))
# Squeeze -> 3x coupling (channel-wise)
flow_list.append(Squeeze())
for i in range(3):
flow_list.append(AffineCoupling(in_features=in_channels * 4, mask_type="channel_wise",
scale_translate_net=ScaleTranslateNet(in_channels * 4, mid_channels * 2),
inverse_mask=(i % 2 != 0)))
flow_list.append(Unsqueeze())
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.inverse(z_sample).cpu()
return sample
def plot_reconstrunction(x):
with torch.no_grad():
z = p.forward(x, compute_jacobian=False)
recon_batch = p.inverse(z)
comparison = torch.cat([x.view(-1, 3, 32, 32), recon_batch]).cpu()
return comparison
# In[13]:
# writer = SummaryWriter()
z_sample = torch.randn(64, 3, 32, 32).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Conditional Real NVP
@pytest.mark.performance
def test_run_real_nvp_cond_():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, BatchNorm1d, Shuffle, Preprocess, Reverse
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
x_dim = 28 * 28
y_dim = 10
z_dim = x_dim
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_prior").to(device)
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_features, hidden_features):
super().__init__()
self.fc1 = nn.Linear(in_features + y_dim, hidden_features)
self.fc2 = nn.Linear(hidden_features, hidden_features)
self.fc3_s = nn.Linear(hidden_features, in_features)
self.fc3_t = nn.Linear(hidden_features, in_features)
def forward(self, x, y):
hidden = F.relu(self.fc2(F.relu(self.fc1(torch.cat([x, y], 1)))))
log_s = torch.tanh(self.fc3_s(hidden))
t = self.fc3_t(hidden)
return log_s, t
# In[7]:
# flow
flow_list = []
num_block = 5
flow_list.append(Preprocess())
for i in range(num_block):
flow_list.append(AffineCoupling(in_features=x_dim,
scale_translate_net=ScaleTranslateNet(x_dim, 1028),
inverse_mask=(i % 2 != 0)))
flow_list.append(BatchNorm1d(x_dim))
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"], cond_var=["y"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x, y):
with torch.no_grad():
z = p.forward(x, y, compute_jacobian=False)
recon_batch = p.inverse(z, y).view(-1, 1, 28, 28)
recon = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return recon
def plot_image_from_latent(z, y):
with torch.no_grad():
sample = p.inverse(z, y).view(-1, 1, 28, 28).cpu()
return sample
def plot_reconstrunction_changing_y(x, y):
y_change = torch.eye(10)[range(7)].to(device)
batch_dummy = torch.ones(x.size(0))[:, None].to(device)
recon_all = []
with torch.no_grad():
for _y in y_change:
z = p.forward(x, y, compute_jacobian=False)
recon_batch = p.inverse(z, batch_dummy * _y[None, :]).view(-1, 1, 28, 28)
recon_all.append(recon_batch)
recon_changing_y = torch.cat(recon_all)
recon_changing_y = torch.cat([x.view(-1, 1, 28, 28), recon_changing_y]).cpu()
return recon_changing_y
# In[13]:
# writer = SummaryWriter()
plot_number = 5
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
y_sample = torch.eye(10)[[plot_number] * 64].to(device)
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_latent(z_sample, y_sample)
recon_changing_y = plot_reconstrunction_changing_y(_x[:8], _y[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_change_y', recon_changing_y, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Conditional Real NVP
@pytest.mark.performance
def test_run_real_nvp_cond__():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, BatchNorm1d, Shuffle, Preprocess, Reverse
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
x_dim = 28 * 28
y_dim = 10
z_dim = x_dim
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_prior").to(device)
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_features, hidden_features):
super().__init__()
self.fc1 = nn.Linear(in_features + y_dim, hidden_features)
self.fc2 = nn.Linear(hidden_features, hidden_features)
self.fc3_s = nn.Linear(hidden_features, in_features)
self.fc3_t = nn.Linear(hidden_features, in_features)
def forward(self, x, y):
hidden = F.relu(self.fc2(F.relu(self.fc1(torch.cat([x, y], 1)))))
log_s = torch.tanh(self.fc3_s(hidden))
t = self.fc3_t(hidden)
return log_s, t
# In[7]:
# flow
flow_list = []
num_block = 5
flow_list.append(Preprocess())
for i in range(num_block):
flow_list.append(AffineCoupling(in_features=x_dim,
scale_translate_net=ScaleTranslateNet(x_dim, 1028),
inverse_mask=(i % 2 != 0)))
flow_list.append(BatchNorm1d(x_dim))
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"], cond_var=["y"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x, y):
with torch.no_grad():
z = p.forward(x, y, compute_jacobian=False)
recon_batch = p.inverse(z, y).view(-1, 1, 28, 28)
recon = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return recon
def plot_image_from_latent(z, y):
with torch.no_grad():
sample = p.inverse(z, y).view(-1, 1, 28, 28).cpu()
return sample
def plot_reconstrunction_changing_y(x, y):
y_change = torch.eye(10)[range(7)].to(device)
batch_dummy = torch.ones(x.size(0))[:, None].to(device)
recon_all = []
with torch.no_grad():
for _y in y_change:
z = p.forward(x, y, compute_jacobian=False)
recon_batch = p.inverse(z, batch_dummy * _y[None, :]).view(-1, 1, 28, 28)
recon_all.append(recon_batch)
recon_changing_y = torch.cat(recon_all)
recon_changing_y = torch.cat([x.view(-1, 1, 28, 28), recon_changing_y]).cpu()
return recon_changing_y
# In[13]:
# writer = SummaryWriter()
plot_number = 5
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
y_sample = torch.eye(10)[[plot_number] * 64].to(device)
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_latent(z_sample, y_sample)
recon_changing_y = plot_reconstrunction_changing_y(_x[:8], _y[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_change_y', recon_changing_y, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# A toy example of Real NVP (using the ML class)
@pytest.mark.performance
def test_run_real_nvp_toy():
# In[1]:
test_size = 5
# In[2]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, BatchNorm1d
from pixyz.models import ML
from pixyz.utils import print_latex
# In[3]:
# def plot_samples(points, noise):
# X_LIMS = (-1.5, 2.5)
# Y_LIMS = (-2.5, 2.5)
#
# fig = plt.figure(figsize=(8, 4))
# ax = fig.add_subplot(121)
# ax.scatter(points[:, 0], points[:, 1], alpha=0.7, s=25, c="b")
# ax.set_xlim(*X_LIMS)
# ax.set_ylim(*Y_LIMS)
# ax.set_xlabel("p(x)")
#
# X_LIMS = (-3, 3)
# Y_LIMS = (-3, 3)
#
# ax = fig.add_subplot(122)
# ax.scatter(noise[:, 0], noise[:, 1], alpha=0.7, s=25, c="r")
# ax.set_xlim(*X_LIMS)
# ax.set_ylim(*Y_LIMS)
# ax.set_xlabel("p(z)")
#
# plt.show()
# In[4]:
x_dim = 2
z_dim = x_dim
# In[5]:
# prior
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="prior").to(device)
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_features, hidden_features):
super().__init__()
self.layers = nn.Sequential(nn.Linear(in_features, hidden_features),
nn.ReLU(),
nn.Linear(hidden_features, hidden_features),
nn.ReLU())
self.log_s = nn.Linear(hidden_features, in_features)
self.t = nn.Linear(hidden_features, in_features)
def forward(self, x):
hidden = self.layers(x)
log_s = torch.tanh(self.log_s(hidden))
t = self.t(hidden)
return log_s, t
# In[7]:
# flow
flow_list = []
for i in range(5):
scale_translate_net = nn.Sequential(nn.Linear(x_dim, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, x_dim * 2))
flow_list.append(AffineCoupling(in_features=2,
scale_translate_net=ScaleTranslateNet(x_dim, 256),
inverse_mask=(i % 2 != 0)))
flow_list.append(BatchNorm1d(2))
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-2})
print(model)
print_latex(model)
# In[10]:
# plot training set
from sklearn import datasets
x = datasets.make_moons(n_samples=test_size, noise=0.1)[0].astype("float32")
noise = prior.sample(batch_n=test_size)["z"].data.cpu()
# plot_samples(x, noise)
# In[11]:
for epoch in range(epochs):
x = datasets.make_moons(n_samples=batch_size, noise=0.1)[0].astype("float32")
x = torch.tensor(x).to(device)
loss = model.train({"x": x})
if epoch % 500 == 0:
print('Epoch: {} Test loss: {:.4f}'.format(epoch, loss))
# samples
samples = p.sample(batch_n=test_size)["x"].data.cpu()
# inference
_x = datasets.make_moons(n_samples=test_size, noise=0.1)[0].astype("float32")
_x = torch.tensor(_x).to(device)
noise = p.inference({"x": _x})["z"].data.cpu()
# plot_samples(samples, noise)
# In[12]:
samples = p.sample(batch_n=test_size)["x"].data.cpu()
# inference
_x = datasets.make_moons(n_samples=test_size, noise=0.1)[0].astype("float32")
_x = torch.tensor(_x).to(device)
noise = p.inference({"x": _x})["z"].data.cpu()
# plot_samples(samples, noise)
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Real NVP
@pytest.mark.performance
def test_run_real_nvp():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 4, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, BatchNorm1d, Shuffle, Preprocess, Reverse
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
x_dim = 28 * 28
z_dim = x_dim
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_prior").to(device)
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_features, hidden_features):
super().__init__()
self.fc1 = nn.Linear(in_features, hidden_features)
self.fc2 = nn.Linear(hidden_features, hidden_features)
self.fc3_s = nn.Linear(hidden_features, in_features)
self.fc3_t = nn.Linear(hidden_features, in_features)
def forward(self, x):
hidden = F.relu(self.fc2(F.relu(self.fc1(x))))
log_s = torch.tanh(self.fc3_s(hidden))
t = self.fc3_t(hidden)
return log_s, t
# In[7]:
# flow
flow_list = []
num_block = 5
flow_list.append(Preprocess())
for i in range(num_block):
flow_list.append(AffineCoupling(in_features=x_dim,
scale_translate_net=ScaleTranslateNet(x_dim, 1028),
inverse_mask=(i % 2 != 0)))
flow_list.append(BatchNorm1d(x_dim))
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x):
with torch.no_grad():
z = p.forward(x, compute_jacobian=False)
recon_batch = p.inverse(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.inverse(z_sample).view(-1, 1, 28, 28).cpu()
return sample
# In[13]:
# writer = SummaryWriter()
z_sample = torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Variational autoencoder (using the Model class)
@pytest.mark.performance
def test_run_vae_model():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, Bernoulli
from pixyz.losses import KullbackLeibler, Expectation as E
from pixyz.models import Model
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# In[5]:
print(prior)
print_latex(prior)
# In[6]:
print(p)
print_latex(p)
# In[7]:
print(q)
print_latex(q)
# In[8]:
loss = (KullbackLeibler(q, prior) - E(q, p.log_prob())).mean()
print(loss)
print_latex(loss)
# In[9]:
model = Model(loss=loss, distributions=[p, q],
optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x):
with torch.no_grad():
z = q.sample({"x": x}, return_all=False)
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[13]:
# writer = SummaryWriter('/runs/vae_model')
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# In[ ]:
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Variational autoencoder (using the VAE class)
@pytest.mark.performance
def test_run_vae_with_vae_class():
# * Original paper: Auto-Encoding Variational Bayes (https://arxiv.org/pdf/1312.6114.pdf)
# In[1]:
# In[2]:
# MNIST
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.utils import print_latex
# ## Define probability distributions
# Prior: $p(z) = \cal N(z; \mu=0, \sigma^2=1)$
# Generator: $p_{\theta}(x|z) = \cal B(x; \lambda = g(z))$
# Inference: $q_{\phi}(z|x) = \cal N(z; \mu=f_\mu(x), \sigma^2=f_{\sigma^2}(x))$
# In[4]:
from pixyz.distributions import Normal, Bernoulli
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
"""
parameterizes q(z | x)
infered z follows a Gaussian distribution with mean 'loc', variance 'scale'
z ~ N(loc, scale)
"""
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
"""
given the observation x,
return the mean and variance of the Gaussian distritbution
"""
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
"""
parameterizes the bernoulli(for MNIST) observation likelihood p(x | z)
"""
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
"""
given the latent variable z,
return the probability of Bernoulli distribution
"""
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior p(z)
# z ~ N(0, 1)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# In[5]:
print(prior)
print_latex(prior)
# In[6]:
print(p)
print_latex(p)
# In[7]:
print(q)
print_latex(q)
# ## Define VAE model using VAE Model Class
# - https://docs.pixyz.io/en/latest/models.html#vae
# In[8]:
from pixyz.losses import KullbackLeibler
# define additional loss terms for regularizing representation of latent variables
kl = KullbackLeibler(q, prior)
print_latex(kl)
# In[9]:
from pixyz.models import VAE
model = VAE(encoder=q, decoder=p, regularizer=kl, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# ## Define Train and Test loop using model
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# ## Reconstruct image and generate image
# In[12]:
def plot_reconstrunction(x):
"""
reconstruct image given input observation x
"""
with torch.no_grad():
# infer and sampling z using inference model q `.sample()` method
z = q.sample({"x": x}, return_all=False)
# reconstruct image from inferred latent variable z using Generator model p `.sample_mean()` method
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
# concatenate original image and reconstructed image for comparison
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
"""
generate new image given latent variable z
"""
with torch.no_grad():
# generate image from latent variable z using Generator model p `.sample_mean()` method
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[13]:
# for visualising in TensorBoard
# writer = SummaryWriter()
# fix latent variable z for watching generative model improvement
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
# set-aside observation for watching generative model improvement
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# !/usr/bin/env python
# coding: utf-8
# # Variational autoencoder
@pytest.mark.performance
def test_run_vae():
# * Original paper: Auto-Encoding Variational Bayes (https://arxiv.org/pdf/1312.6114.pdf)
# In[1]:
# In[2]:
# MNIST
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.utils import print_latex
# ## Define probability distributions
# Prior: $p(z) = \cal N(z; \mu=0, \sigma^2=1)$
# Generator: $p_{\theta}(x|z) = \cal B(x; \lambda = g(z))$
# Inference: $q_{\phi}(z|x) = \cal N(z; \mu=f_\mu(x), \sigma^2=f_{\sigma^2}(x))$
# In[4]:
from pixyz.distributions import Normal, Bernoulli
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
"""
parameterizes q(z | x)
infered z follows a Gaussian distribution with mean 'loc', variance 'scale'
z ~ N(loc, scale)
"""
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
"""
given the observation x,
return the mean and variance of the Gaussian distritbution
"""
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
"""
parameterizes the bernoulli(for MNIST) observation likelihood p(x | z)
"""
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
"""
given the latent variable z,
return the probability of Bernoulli distribution
"""
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior p(z)
# z ~ N(0, 1)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# In[5]:
print(prior)
print_latex(prior)
# In[6]:
print(p)
print_latex(p)
# In[7]:
print(q)
print_latex(q)
# ## Define Loss function
# Loss function:
#
# $\frac{1}{N} \sum_{i=1}^{N}\left[K L\left(q\left(z | x^{(i)}\right) \| p_{prior}(z)\right)-\mathbb{E}_{q\left(z | x^{(i)}\right)}\left[\log p\left(x^{(i)} | z\right)\right]\right]$
# In[8]:
from pixyz.losses import LogProb, KullbackLeibler, Expectation as E
loss = (KullbackLeibler(q, prior) - E(q, LogProb(p))).mean()
print_latex(loss)
# ## Define VAE model using Model Class
# - https://docs.pixyz.io/en/latest/models.html#model
# In[9]:
from pixyz.models import Model
model = Model(loss=loss, distributions=[p, q],
optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# ## Define Train and Test loop using model
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# ## Reconstruct image and generate image
# In[12]:
def plot_reconstrunction(x):
"""
reconstruct image given input observation x
"""
with torch.no_grad():
# infer and sampling z using inference model q `.sample()` method
z = q.sample({"x": x}, return_all=False)
# reconstruct image from inferred latent variable z using Generator model p `.sample_mean()` method
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
# concatenate original image and reconstructed image for comparison
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
"""
generate new image given latent variable z
"""
with torch.no_grad():
# generate image from latent variable z using Generator model p `.sample_mean()` method
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[13]:
# for visualising in TensorBoard
# writer = SummaryWriter()
# fix latent variable z for watching generative model improvement
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
# set-aside observation for watching generative model improvement
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# !/usr/bin/env python
# coding: utf-8
# # Variational autoencoder (using the VI class)
@pytest.mark.performance
def test_run_vi():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, Bernoulli
from pixyz.models import VI
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_joint = p * prior
# In[5]:
print(p_joint)
print_latex(p_joint)
# In[6]:
print(q)
print_latex(q)
# In[7]:
model = VI(p_joint, q, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[8]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[9]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[10]:
def plot_reconstrunction(x):
with torch.no_grad():
z = q.sample({"x": x}, return_all=False)
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[11]:
# writer = SummaryWriter()
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
| 126,799
| 29.517449
| 554
|
py
|
pixyz
|
pixyz-main/tests/distributions/test_distribution.py
|
import pytest
from os.path import join as pjoin
import torch
from pixyz.distributions import Normal, MixtureModel, Categorical, FactorizedBernoulli
from pixyz.utils import lru_cache_for_sample_dict
from pixyz.losses import KullbackLeibler
from pixyz.models import VAE
class TestGraph:
def test_rename_atomdist(self):
normal = Normal(var=['x'], name='p')
graph = normal.graph
assert graph.name == 'p'
normal.name = 'q'
assert graph.name == 'q'
def test_print(self):
normal = Normal(var=['x'], name='p')
print(normal.graph)
def test_set_option(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1)
dist.graph.set_option(dict(batch_n=4, sample_shape=(2, 3)), ['y'])
sample = dist.sample()
assert sample['y'].shape == torch.Size([2, 3, 4])
assert sample['x'].shape == torch.Size([2, 3, 4])
dist.graph.set_option({}, ['y'])
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=None).shape == torch.Size([2])
assert dist.get_log_prob(sample,
sum_features=False).shape == torch.Size([2, 3, 4])
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * FactorizedBernoulli(
var=['y'], probs=torch.tensor([0.3, 0.8]))
dist.graph.set_option(dict(batch_n=3, sample_shape=(4,)), ['y'])
sample = dist.sample()
assert sample['y'].shape == torch.Size([4, 3, 2])
assert sample['x'].shape == torch.Size([4, 3, 2])
dist.graph.set_option(dict(), ['y'])
assert dist.get_log_prob(sample, sum_features=True, feature_dims=[-1]).shape == torch.Size([4, 3])
def test_sample_mean(self):
dist = Normal(var=['x'], loc=0, scale=1) * Normal(var=['y'], cond_var=['x'], loc='x', scale=1)
assert dist.sample(sample_mean=True)['y'] == torch.zeros(1)
def test_input_extra_var(self):
normal = Normal(var=['x'], loc=0, scale=1) * Normal(var=['y'], loc=0, scale=1)
assert set(normal.sample({'z': torch.zeros(1)})) == set(('x', 'y', 'z'))
assert normal.get_log_prob({'y': torch.zeros(1), 'x': torch.zeros(1),
'z': torch.zeros(1)}).shape == torch.Size([1])
assert set(normal.sample({'x': torch.zeros(1)})) == set(('x', 'y'))
class TestDistributionBase:
def test_init_with_scalar_params(self):
normal = Normal(loc=0, scale=1, features_shape=[2])
assert normal.sample()['x'].shape == torch.Size([1, 2])
assert normal.features_shape == torch.Size([2])
normal = Normal(loc=0, scale=1)
assert normal.sample()['x'].shape == torch.Size([1])
assert normal.features_shape == torch.Size([])
def test_batch_n(self):
normal = Normal(loc=0, scale=1)
assert normal.sample(batch_n=3)['x'].shape == torch.Size([3])
def test_input_extra_var(self):
normal = Normal(loc=0, scale=1)
assert set(normal.sample({'y': torch.zeros(1)})) == set(('x', 'y'))
assert normal.get_log_prob({'y': torch.zeros(1), 'x': torch.zeros(1)}).shape == torch.Size([1])
assert set(normal.sample({'x': torch.zeros(1)})) == set(('x'))
def test_sample_mean(self):
dist = Normal(loc=0, scale=1)
assert dist.sample(sample_mean=True)['x'] == torch.zeros(1)
@pytest.mark.parametrize(
"dist", [
Normal(loc=0, scale=1),
Normal(var=['x'], loc=0, scale=1) * Normal(var=['y'], loc=0, scale=1),
# Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1),
],
)
def test_get_log_prob_feature_dims(self, dist):
assert dist.get_log_prob(dist.sample(batch_n=4, sample_shape=(2, 3)),
sum_features=True, feature_dims=None).shape == torch.Size([2])
assert dist.get_log_prob(dist.sample(batch_n=4, sample_shape=(2, 3)),
sum_features=True, feature_dims=[-2]).shape == torch.Size([2, 4])
assert dist.get_log_prob(dist.sample(batch_n=4, sample_shape=(2, 3)),
sum_features=True, feature_dims=[0, 1]).shape == torch.Size([4])
assert dist.get_log_prob(dist.sample(batch_n=4, sample_shape=(2, 3)),
sum_features=True, feature_dims=[]).shape == torch.Size([2, 3, 4])
def test_get_log_prob_feature_dims2(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1)
dist.graph.set_option(dict(batch_n=4, sample_shape=(2, 3)), ['y'])
sample = dist.sample()
assert sample['y'].shape == torch.Size([2, 3, 4])
list(dist.graph._factors_from_variable('y'))[0].option = {}
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=None).shape == torch.Size([2])
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=[-2]).shape == torch.Size([2, 4])
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=[0, 1]).shape == torch.Size([4])
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=[]).shape == torch.Size([2, 3, 4])
@pytest.mark.parametrize(
"dist", [
Normal(loc=0, scale=1),
Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1),
])
def test_unknown_option(self, dist):
x_dict = dist.sample(unknown_opt=None)
dist.get_log_prob(x_dict, unknown_opt=None)
class TestReplaceVarDistribution:
def test_get_params(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1)
result = dist.get_params({'y': torch.ones(1)})
assert list(result.keys()) == ['loc', 'scale']
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
result = dist.get_params({'z': torch.ones(1)})
assert list(result.keys()) == ['loc', 'scale']
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
with pytest.raises(ValueError):
dist.get_params({'y': torch.ones(1)})
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(x='z')
result = dist.get_params({'y': torch.ones(1)})
assert list(result.keys()) == ['loc', 'scale']
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1)
with pytest.raises(NotImplementedError):
dist.get_params()
def test_sample_mean(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1)
result = dist.sample_mean({'y': torch.ones(1)})
assert result == torch.ones(1)
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
result = dist.sample_mean({'z': torch.ones(1)})
assert result == torch.ones(1)
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
with pytest.raises(ValueError):
dist.sample_mean({'y': torch.ones(1)})
def test_sample_variance(self):
dist = Normal(var=['x'], cond_var=['y'], loc=2, scale='y')
result = dist.sample_variance({'y': torch.ones(1)})
assert result == torch.ones(1)
dist = Normal(var=['x'], cond_var=['y'], loc=2, scale='y').replace_var(y='z')
result = dist.sample_variance({'z': torch.ones(1)})
assert result == torch.ones(1)
dist = Normal(var=['x'], cond_var=['y'], loc=2, scale='y').replace_var(y='z')
with pytest.raises(ValueError):
dist.sample_variance({'y': torch.ones(1)})
def test_get_entropy(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1)
truth = dist.get_entropy({'y': torch.ones(1)})
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z', x='y')
result = dist.get_entropy({'z': torch.ones(1)})
assert result == truth
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
with pytest.raises(ValueError):
dist.get_entropy({'y': torch.ones(1)})
class TestMixtureDistribution:
def test_sample_mean(self):
dist = MixtureModel([Normal(loc=0, scale=1), Normal(loc=1, scale=1)], Categorical(probs=torch.tensor([1., 2.])))
assert dist.sample(sample_mean=True)['x'] == torch.ones(1)
def test_memoization():
exec_order = []
class Encoder(Normal):
def __init__(self, exec_order):
super().__init__(var=["z"], cond_var=["x"], name="q")
self.linear = torch.nn.Linear(10, 10)
self.exec_order = exec_order
@lru_cache_for_sample_dict()
def get_params(self, params_dict={}, **kwargs):
return super().get_params(params_dict, **kwargs)
def forward(self, x):
exec_order.append("E")
return {"loc": self.linear(x), "scale": 1.0}
class Decoder(Normal):
def __init__(self, exec_order):
super().__init__(var=["x"], cond_var=["z"], name="p")
self.exec_order = exec_order
@lru_cache_for_sample_dict()
def get_params(self, params_dict={}, **kwargs):
return super().get_params(params_dict, **kwargs)
def forward(self, z):
self.exec_order.append("D")
return {"loc": z, "scale": 1.0}
def prior():
return Normal(var=["z"], name="p_{prior}", features_shape=[10], loc=torch.tensor(0.), scale=torch.tensor(1.))
q = Encoder(exec_order)
p = Decoder(exec_order)
prior = prior()
kl = KullbackLeibler(q, prior)
mdl = VAE(q, p, regularizer=kl, optimizer=torch.optim.Adam, optimizer_params={"lr": 1e-3})
x = torch.zeros((10, 10))
mdl.train({"x": x})
assert exec_order == ["E", "D"]
@pytest.mark.parametrize(
"no_contiguous_tensor", [
torch.zeros(2, 3),
torch.zeros(2, 3).T,
torch.zeros(1).expand(3),
]
)
def test_save_dist(tmpdir, no_contiguous_tensor):
# pull request:#110
ones = torch.ones_like(no_contiguous_tensor)
p = Normal(loc=no_contiguous_tensor, scale=ones)
save_path = pjoin(tmpdir, "tmp.pt")
torch.save(p.state_dict(), save_path)
q = Normal(loc=ones, scale=3 * ones)
assert not torch.all(no_contiguous_tensor == q.loc).item()
# it needs copy of tensor
q = Normal(loc=ones, scale=ones)
q.load_state_dict(torch.load(save_path))
assert torch.all(no_contiguous_tensor == q.loc).item()
if __name__ == "__main__":
TestReplaceVarDistribution().test_get_entropy()
| 10,844
| 41.034884
| 120
|
py
|
pixyz
|
pixyz-main/tests/distributions/test_expornential_distributions.py
|
import pytest
import torch
from pixyz.distributions.exponential_distributions import RelaxedBernoulli, Normal
class TestNormal:
def test_init_with_same_param(self):
n = Normal(var=['x'], cond_var=['y'], loc='y', scale='y')
result = n.sample({'y': torch.ones(2, 3)})
assert result['x'].shape == (2, 3)
class TestRelaxedBernoulli:
def test_log_prob_of_hard_value(self):
rb = RelaxedBernoulli(var=['x'], temperature=torch.tensor(0.5), probs=torch.ones(2))
assert self.nearly_eq(rb.get_log_prob({'x': torch.tensor([0., 1.])}), torch.tensor([-15.9424]))
def nearly_eq(self, tensor1, tensor2):
return abs(tensor1.item() - tensor2.item()) < 0.001
def test_sample_mean(self):
rb = RelaxedBernoulli(var=['x'], temperature=torch.tensor(0.5), probs=torch.tensor([0.5, 0.8]))
with pytest.raises(NotImplementedError):
rb.sample(sample_mean=True)
| 932
| 34.884615
| 103
|
py
|
pixyz
|
pixyz-main/tests/models/test_model.py
|
import os
import torch
import torch.nn as nn
from pixyz.distributions import Normal
from pixyz.losses import CrossEntropy
from pixyz.models import Model
class TestModel:
def _make_model(self, loc):
class Dist(Normal):
def __init__(self):
super().__init__(loc=loc, scale=1)
self.module = nn.Linear(2, 2)
p = Dist()
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
loss = CrossEntropy(p, p).to(device)
model = Model(loss=loss, distributions=[p])
return model
def test_save_load(self, tmp_path):
model = self._make_model(0)
save_path = os.path.join(tmp_path, 'model.pth')
model.save(save_path)
model = self._make_model(1)
p: Normal = model.distributions[0]
assert p.get_params()['loc'] == 1
model.load(save_path)
p: Normal = model.distributions[0]
assert p.get_params()['loc'] == 0
| 1,009
| 24.897436
| 55
|
py
|
pixyz
|
pixyz-main/tests/losses/test_losses.py
|
from pixyz.distributions import Normal
from pixyz.losses import Expectation
class TestExpectation:
def test_sample_mean(self):
p = Normal(loc=0, scale=1)
f = p.log_prob()
e = Expectation(p, f)
e.eval({}, sample_mean=True)
| 260
| 22.727273
| 38
|
py
|
pixyz
|
pixyz-main/tests/losses/test_iteration.py
|
import torch
from pixyz.losses import IterativeLoss, Parameter, Expectation
from pixyz.distributions import Normal
class TestIterativeLoss:
def test_print_latex(self):
t_max = 3
itr = IterativeLoss(Parameter('t'), max_iter=t_max, timestep_var='t')
assert itr.loss_text == r"\sum_{t=0}^{" + str(t_max - 1) + "} t"
def test_time_specific_step_loss(self):
t_max = 3
itr = IterativeLoss(Parameter('t'), max_iter=t_max, timestep_var='t')
assert itr.eval() == sum(range(t_max))
def test_input_var(self):
q = Normal(var=['z'], cond_var=['x'], loc='x', scale=1)
p = Normal(var=['y'], cond_var=['z'], loc='z', scale=1)
e = Expectation(q, p.log_prob())
assert set(e.input_var) == set(('x', 'y'))
assert e.eval({'y': torch.zeros(1), 'x': torch.zeros(1)}).shape == torch.Size([1])
def test_input_extra_var(self):
q = Normal(var=['z'], cond_var=['x'], loc='x', scale=1)
p = Normal(var=['y'], cond_var=['z'], loc='z', scale=1)
e = Expectation(q, p.log_prob())
assert set(e.eval({'y': torch.zeros(1), 'x': torch.zeros(1),
'w': torch.zeros(1)}, return_dict=True)[1]) == set(('w', 'x', 'y', 'z'))
assert set(e.eval({'y': torch.zeros(1), 'x': torch.zeros(1),
'z': torch.zeros(1)}, return_dict=True)[1]) == set(('x', 'y', 'z'))
| 1,412
| 43.15625
| 99
|
py
|
pixyz
|
pixyz-main/docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'Pixyz'
copyright = '2019, masa-su'
author = 'masa-su'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinx.ext.imgmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pixyzdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pixyz.tex', 'Pixyz Documentation',
'masa', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pixyz', 'Pixyz Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pixyz', 'Pixyz Documentation',
author, 'Pixyz', 'One line description of project.',
'Miscellaneous'),
]
autodoc_member_order = 'bysource'
autodoc_default_options = {'show-inheritance': True}
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
| 5,099
| 28.651163
| 79
|
py
|
pixyz
|
pixyz-main/tutorial/English/utils.py
|
from torch.utils.data import Dataset
import pickle
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
def imshow(img_tensors):
img = torchvision.utils.make_grid(img_tensors)
npimg = img.numpy()
plt.figure(figsize=(16, 12))
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class DMMDataset(Dataset):
def __init__(self, pickle_path="cartpole_28.pickle"):
with open(pickle_path, mode='rb') as f:
data = pickle.load(f)
episode_frames, actions = data
# episode_frames: np.array([episode_num, one_episode_length, height, width, Channels]) (10000, 30, 28, 28, 3)
# actions: np.array([episode_num, one_episode_length]) (10000, 30)
# HWC → CHW
episode_frames = episode_frames.transpose(0, 1, 4, 2, 3) / 1.0
# print(episode_frames.dtype)
actions = actions[:, :, np.newaxis]
self.episode_frames = torch.from_numpy(episode_frames.astype(np.float32))
self.actions = torch.from_numpy(actions.astype(np.float32))
self.mean = torch.zeros_like(self.episode_frames[0])
self.std = torch.zeros_like(self.episode_frames[0])
self.mean[:, 0, :, :] = 182.6091
self.mean[:, 1, :, :] = 182.6091
self.mean[:, 2, :, :] = 182.6091
self.std[:, 0, :, :] = 45.5565
self.std[:, 1, :, :] = 47.6260
self.std[:, 2, :, :] = 50.7284
def __len__(self):
return len(self.episode_frames)
def __getitem__(self, idx):
return {
"episode_frames": (self.episode_frames[idx] - self.mean) / self.std,
"actions": self.actions[idx]
}
def _calculate_mean_std(self):
print(self.episode_frames.shape)
std = torch.std(self.episode_frames, dim=(0, 1, 3, 4))
mean = torch.mean(self.episode_frames, dim=(0, 1, 3, 4))
print("mean: ", mean)
print(mean.shape)
print("std: ", std)
print(std.shape)
# mean: tensor([182.6091, 182.6091, 182.6091])
# torch.Size([3])
# std: tensor([45.5565, 47.6260, 50.7284])
# torch.Size([3])
def postprocess(image):
image_ = image.detach().clone()
# print(image_.shape)
mean = torch.ones_like(image_)
std = torch.ones_like(image_)
mean[:, 0, :, :] = 182.6091
mean[:, 1, :, :] = 182.6091
mean[:, 2, :, :] = 182.6091
std[:, 0, :, :] = 45.5565
std[:, 1, :, :] = 47.6260
std[:, 2, :, :] = 50.7284
image_ = image_ * std + mean
image_ = torch.clamp(image_, min=0.0, max=255.0) / 255.
return image_
if __name__ == "__main__":
data_set = DMMDataset()
data_set._calculate_mean_std()
| 2,721
| 29.931818
| 117
|
py
|
pixyz
|
pixyz-main/tutorial/English/prepare_cartpole_dataset.py
|
import gym
import pickle
import numpy as np
import cv2
def main():
env = gym.make("CartPole-v1")
observation = env.reset()
episodes = {"frames": [], "actions": []}
# for 56 *56 episode num = 500
# for 28 * 28 episode num = 1000
for _episode in range(1000):
frames = []
actions = []
for _frame in range(30):
action = env.action_space.sample() # your agent here (this takes random actions)
frame = env.render(mode='rgb_array')
observation, reward, done, info = env.step(action)
img = frame
img = img[150:350, 200:400]
img = cv2.resize(img, (28, 28))
frames.append(img)
actions.append(action)
_ = env.reset()
episodes["frames"].append(frames)
episodes["actions"].append(actions)
env.close()
data = [np.array(episodes["frames"]), np.array(episodes["actions"])]
print(data[0].shape, data[1].shape)
with open('cartpole_28.pickle', mode='wb') as f:
pickle.dump(data, f)
if __name__ == "__main__":
main()
| 1,107
| 25.380952
| 93
|
py
|
pixyz
|
pixyz-main/tutorial/Japanese/utils.py
|
from torch.utils.data import Dataset
import pickle
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
def imshow(img_tensors):
img = torchvision.utils.make_grid(img_tensors)
npimg = img.numpy()
plt.figure(figsize=(16, 12))
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class DMMDataset(Dataset):
def __init__(self, pickle_path="cartpole_28.pickle"):
with open(pickle_path, mode='rb') as f:
data = pickle.load(f)
episode_frames, actions = data
# episode_frames: np.array([episode_num, one_episode_length, height, width, Channels]) (10000, 30, 28, 28, 3)
# actions: np.array([episode_num, one_episode_length]) (10000, 30)
# HWC → CHW
episode_frames = episode_frames.transpose(0, 1, 4, 2, 3) / 1.0
# print(episode_frames.dtype)
actions = actions[:, :, np.newaxis]
self.episode_frames = torch.from_numpy(episode_frames.astype(np.float32))
self.actions = torch.from_numpy(actions.astype(np.float32))
self.mean = torch.zeros_like(self.episode_frames[0])
self.std = torch.zeros_like(self.episode_frames[0])
self.mean[:, 0, :, :] = 182.6091
self.mean[:, 1, :, :] = 182.6091
self.mean[:, 2, :, :] = 182.6091
self.std[:, 0, :, :] = 45.5565
self.std[:, 1, :, :] = 47.6260
self.std[:, 2, :, :] = 50.7284
def __len__(self):
return len(self.episode_frames)
def __getitem__(self, idx):
return {
"episode_frames": (self.episode_frames[idx] - self.mean) / self.std,
"actions": self.actions[idx]
}
def _calculate_mean_std(self):
print(self.episode_frames.shape)
std = torch.std(self.episode_frames, dim=(0, 1, 3, 4))
mean = torch.mean(self.episode_frames, dim=(0, 1, 3, 4))
print("mean: ", mean)
print(mean.shape)
print("std: ", std)
print(std.shape)
# mean: tensor([182.6091, 182.6091, 182.6091])
# torch.Size([3])
# std: tensor([45.5565, 47.6260, 50.7284])
# torch.Size([3])
def postprocess(image):
image_ = image.detach().clone()
# print(image_.shape)
mean = torch.ones_like(image_)
std = torch.ones_like(image_)
mean[:, 0, :, :] = 182.6091
mean[:, 1, :, :] = 182.6091
mean[:, 2, :, :] = 182.6091
std[:, 0, :, :] = 45.5565
std[:, 1, :, :] = 47.6260
std[:, 2, :, :] = 50.7284
image_ = image_ * std + mean
image_ = torch.clamp(image_, min=0.0, max=255.0) / 255.
return image_
if __name__ == "__main__":
data_set = DMMDataset()
data_set._calculate_mean_std()
| 2,721
| 29.931818
| 117
|
py
|
pixyz
|
pixyz-main/tutorial/Japanese/prepare_cartpole_dataset.py
|
import gym
import pickle
import numpy as np
import cv2
def main():
env = gym.make("CartPole-v1")
observation = env.reset()
episodes = {"frames": [], "actions": []}
# for 56 *56 episode num = 500
# for 28 * 28 episode num = 1000
for _episode in range(1000):
frames = []
actions = []
for _frame in range(30):
action = env.action_space.sample() # your agent here (this takes random actions)
frame = env.render(mode='rgb_array')
observation, reward, done, info = env.step(action)
img = frame
img = img[150:350, 200:400]
img = cv2.resize(img, (28, 28))
frames.append(img)
actions.append(action)
_ = env.reset()
episodes["frames"].append(frames)
episodes["actions"].append(actions)
env.close()
data = [np.array(episodes["frames"]), np.array(episodes["actions"])]
print(data[0].shape, data[1].shape)
with open('cartpole_28.pickle', mode='wb') as f:
pickle.dump(data, f)
if __name__ == "__main__":
main()
| 1,107
| 25.380952
| 93
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/__main__.py
|
from archive_query_log.cli import main
if __name__ == "__main__":
main()
| 78
| 14.8
| 38
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/test_fastwarc.py
|
def test_fastwarc_installed():
import fastwarc
assert fastwarc
from fastwarc import GZipStream
assert GZipStream
from fastwarc import FileStream
assert FileStream
from fastwarc import ArchiveIterator
assert ArchiveIterator
from fastwarc import WarcRecordType
assert WarcRecordType
from fastwarc import WarcRecord
assert WarcRecord
# pylint: disable=no-name-in-module
from fastwarc.stream_io import PythonIOStreamAdapter
assert PythonIOStreamAdapter
| 515
| 21.434783
| 56
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/conftest.py
|
from approvaltests import set_default_reporter, DiffReporter
from pytest import fixture
def configure_approvaltests():
set_default_reporter(DiffReporter())
@fixture(scope="session", autouse=True)
def set_default_reporter_for_all_tests():
configure_approvaltests()
| 276
| 22.083333
| 60
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/generate_review_sample.py
|
from gzip import GzipFile
from io import TextIOWrapper
from pathlib import Path
from random import Random
from tqdm import tqdm
REVIEW_SAMPLE_SIZE = 1000
DATA_PATH = Path(
"/mnt/ceph/storage/data-in-progress/data-research/"
"web-search/web-archive-query-log/focused/"
)
SAMPLE_CORPUS_PATH = DATA_PATH / "sample-corpus"
SAMPLE_QUERIES_PATH = SAMPLE_CORPUS_PATH / "queries"
SAMPLE_DOCUMENTS_PATH = SAMPLE_CORPUS_PATH / "documents"
REVIEW_SAMPLE_CORPUS_PATH = DATA_PATH / "review-corpus-unfiltered"
REVIEW_SAMPLE_CORPUS_PATH.mkdir(exist_ok=True)
REVIEW_SAMPLE_QUERIES_PATH = REVIEW_SAMPLE_CORPUS_PATH / "queries.jsonl"
REVIEW_SAMPLE_DOCUMENTS_PATH = REVIEW_SAMPLE_CORPUS_PATH / "documents.jsonl"
def main():
lines = []
for path in tqdm(list(SAMPLE_QUERIES_PATH.glob("part*.gz"))):
# noinspection PyTypeChecker
with GzipFile(path, "rb") as gf, TextIOWrapper(gf) as f:
for line in f:
if "\"archived_query_url_location\": {" not in line:
continue
if "\"archived_raw_serp_location\": {" not in line:
continue
# if "\"archived_parsed_serp_location\": {" not in line:
# continue
lines.append(line)
lines = Random(0).sample(lines, REVIEW_SAMPLE_SIZE)
with REVIEW_SAMPLE_QUERIES_PATH.open("wt") as o:
for line in lines:
o.write(line)
if __name__ == '__main__':
main()
| 1,468
| 29.604167
| 76
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/service_stats.py
|
from archive_query_log.config import SERVICES
if __name__ == '__main__':
num_url_prefixes = sum(
len(service.focused_url_prefixes)
for service in SERVICES.values()
)
print(f"Number of URL prefixes: {num_url_prefixes}")
num_query_parsers = sum(
len(service.query_parsers)
for service in SERVICES.values()
)
print(f"Number of query parsers: {num_query_parsers}")
num_page_parsers = sum(
len(service.page_parsers)
for service in SERVICES.values()
)
print(f"Number of page parsers: {num_page_parsers}")
num_offset_parsers = sum(
len(service.offset_parsers)
for service in SERVICES.values()
)
print(f"Number of offset parsers: {num_offset_parsers}")
num_interpreted_query_parsers = sum(
len(service.interpreted_query_parsers)
for service in SERVICES.values()
)
print(
f"Number of interpreted query parsers: {num_interpreted_query_parsers}"
)
num_results_parsers = sum(
len(service.results_parsers)
for service in SERVICES.values()
)
print(f"Number of results parsers: {num_results_parsers}")
| 1,168
| 31.472222
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/config.py
|
from typing import Mapping
from archive_query_log import DATA_DIRECTORY_PATH
from archive_query_log.model import Service
from archive_query_log.services import read_services
# Load all services that have parsers and create the services for them.
SERVICES_PATH = DATA_DIRECTORY_PATH / "selected-services.yaml"
SERVICES: Mapping[str, Service] = read_services(SERVICES_PATH)
| 374
| 36.5
| 71
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/__init__.py
|
from logging import getLogger
from pathlib import Path
__version__ = "0.1.0"
PROJECT_DIRECTORY_PATH = Path(__file__).parent.parent
DATA_DIRECTORY_PATH = PROJECT_DIRECTORY_PATH / "data"
# DATA_DIRECTORY_PATH = Path("/mnt/ceph/storage/TODO")
CDX_API_URL = "https://web.archive.org/cdx/search/cdx"
LOGGER = getLogger(__name__)
| 328
| 24.307692
| 54
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/schema.py
|
from pyarrow import field, schema, string, timestamp, uint16, uint8, \
dictionary, int8, list_, struct, uint32
from pyarrow.dataset import partitioning
SERP_SCHEMA = schema(
fields=[
field(
"serp_id",
string(),
False,
metadata={
"description":
"Unique SERP ID (based on a hash of the URL and timestamp "
"of the SERP).",
},
),
field(
"serp_url",
string(),
False,
metadata={
"description": "Full URL of the SERP.",
},
),
field(
"serp_domain",
string(),
False,
metadata={
"description": "Domain of the SERP URL.",
},
),
field(
"serp_domain_public_suffix",
string(),
False,
metadata={
"description":
"Public suffix (https://publicsuffix.org/) of the SERP "
"domain.",
},
),
field(
"serp_timestamp",
timestamp("s"),
False,
metadata={
"description":
"Timestamp of the archived snapshot in the Wayback "
"Machine.",
},
),
field(
"serp_year",
uint16(),
False,
metadata={
"description":
"Year of the archived snapshot in the Wayback Machine.",
},
),
field(
"serp_month",
uint8(),
False,
metadata={
"description":
"Month of the archived snapshot in the Wayback Machine.",
},
),
field(
"serp_wayback_url",
string(),
False,
metadata={
"description":
"URL of the archived snapshot's contents in the Wayback "
"Machine.",
},
),
field(
"serp_wayback_raw_url",
string(),
False,
metadata={
"description":
"URL of the archived snapshot's raw contents in the "
"Wayback Machine.",
},
),
field(
"serp_page",
uint8(),
True,
metadata={
"description":
"SERP page number as parsed from the URL, e.g., 1, 2, "
"3 (zero-indexed).",
},
),
field(
"serp_offset",
uint16(),
True,
metadata={
"description":
"SERP results offset (start position) as parsed from the "
"URL, e.g., 10, 20 (zero-indexed).",
},
),
field(
"serp_query_text_url",
string(),
True,
metadata={
"description": "The SERP's query as parsed from the URL.",
},
),
field(
"serp_query_text_url_language",
dictionary(int8(), string()),
True,
metadata={
"description":
"Language identified in the query as parsed from the URL. "
"(Google's cld3; min threshold for 'hr' or 'bs': 0.5, for "
"others: 0.7.)",
},
),
field(
"serp_query_text_html",
string(),
True,
metadata={
"description":
"The SERP's query as parsed from the HTML contents. "
"(Can be different from the query parsed from the URL due "
"to spelling correction etc.)",
},
),
field(
"serp_warc_relative_path",
string(),
True,
metadata={
"description":
"Path of the SERP's WARC file relative to the corpus root "
"path.",
},
),
field(
"serp_warc_byte_offset",
uint32(),
True,
metadata={
"description":
"Position of the SERP's WARC record's first byte in the "
"compressed WARC file.",
},
),
field(
"serp_results",
list_(struct([
field(
"result_id",
string(),
False,
metadata={
"description":
"Unique document ID (based on a hash of the URL "
"and timestamp of the SERP and the result snippet "
"rank).",
},
),
field(
"result_url",
string(),
False,
metadata={
"description": "Full URL of the document.",
},
),
field(
"result_domain",
string(),
False,
metadata={
"description": "Domain of the document URL.",
},
),
field(
"result_domain_public_suffix",
string(),
False,
metadata={
"description":
"Public suffix (https://publicsuffix.org/) of the "
"document domain.",
},
),
field(
"result_wayback_url",
string(),
False,
metadata={
"description":
"URL of the document's nearest archived "
"snapshot's contents in the Wayback Machine. "
"Note that there might not be a snapshot for the "
"exact timestamp, but the Wayback Machine instead "
"redirects to the nearest available snapshot.",
},
),
field(
"result_wayback_raw_url",
string(),
False,
metadata={
"description":
"URL of the document's nearest archived "
"snapshot's raw contents in the Wayback Machine. "
"Note that there might not be a snapshot for the "
"exact timestamp, but the Wayback Machine instead "
"redirects to the nearest available snapshot.",
},
),
field(
"result_snippet_rank",
uint8(),
False,
metadata={
"description":
"Rank of the document's snippet on the SERP.",
},
),
field(
"result_snippet_title",
string(),
False,
metadata={
"description":
"Snippet title of the search result with optional "
"highlighting (normalized to ``<em>`` tags, other "
"tags removed).",
},
),
field(
"result_snippet_text",
string(),
True,
metadata={
"description":
"Snippet text of the search result with optional "
"highlighting (normalized to ``<em>`` tags, other "
"tags removed).",
},
),
field(
"result_warc_relative_path",
string(),
True,
metadata={
"description":
"Path of the SERP's WARC file relative to the "
"corpus root path.",
},
),
field(
"result_warc_byte_offset",
uint32(),
True,
metadata={
"description":
"Position of the SERP's WARC record's first byte "
"in the compressed WARC file.",
},
),
])),
True,
metadata={
"description":
"Retrieved results from the SERP in the same order as "
"they appear.",
},
),
field(
"search_provider_name",
string(),
False,
metadata={
"description":
"Search provider name (domain without the Public Suffix).",
},
),
field(
"search_provider_alexa_domain",
string(),
False,
metadata={
"description":
"Main domain of the search provider as it appears in "
"Alexa top-1M ranks.",
},
),
field(
"search_provider_alexa_domain_public_suffix",
string(),
False,
metadata={
"description":
"Public Suffix (https://publicsuffix.org/) of the search "
"provider's main domain.",
},
),
field(
"search_provider_alexa_rank",
uint32(),
True,
metadata={
"description":
"Rank of the search provider's main domain in fused Alexa "
"top-1M rankings.",
},
),
field(
"search_provider_category",
dictionary(uint8(), string()),
True,
metadata={
"description":
"Category of the search provider (manual annotation).",
},
),
],
metadata={
"description": "A single search engine result page.",
},
)
RESULT_SCHEMA = schema(
fields=[
field(
"result_id",
string(),
False,
metadata={
"description":
"Unique document ID (based on a hash of the URL and "
"timestamp of the SERP and the result snippet rank).",
},
),
field(
"result_url",
string(),
False,
metadata={
"description": "Full URL of the document.",
},
),
field(
"result_domain",
string(),
False,
metadata={
"description": "Domain of the document URL.",
},
),
field(
"result_domain_public_suffix",
string(),
False,
metadata={
"description":
"Public suffix (https://publicsuffix.org/) of the "
"document domain.",
},
),
field(
"result_wayback_url",
string(),
False,
metadata={
"description":
"URL of the document's nearest archived snapshot's "
"contents in the Wayback Machine. Note that there might "
"not be a snapshot for the exact timestamp, but the "
"Wayback Machine instead redirects to the nearest "
"available snapshot.",
},
),
field(
"result_wayback_raw_url",
string(),
False,
metadata={
"description":
"URL of the document's nearest archived snapshot's raw "
"contents in the Wayback Machine. Note that there might "
"not be a snapshot for the exact timestamp, but the "
"Wayback Machine instead redirects to the nearest "
"available snapshot.",
},
),
field(
"result_snippet_rank",
uint8(),
False,
metadata={
"description": "Rank of the document's snippet on the SERP.",
},
),
field(
"result_snippet_title",
string(),
False,
metadata={
"description":
"Snippet title of the search result with optional "
"highlighting (normalized to ``<em>`` tags, other tags "
"removed).",
},
),
field(
"result_snippet_text",
string(),
True,
metadata={
"description":
"Snippet text of the search result with optional "
"highlighting (normalized to ``<em>`` tags, other tags "
"removed).",
},
),
field(
"result_warc_relative_path",
string(),
True,
metadata={
"description":
"Path of the SERP's WARC file relative to the corpus root "
"path.",
},
),
field(
"result_warc_byte_offset",
uint32(),
True,
metadata={
"description":
"Position of the SERP's WARC record's first byte in the "
"compressed WARC file.",
},
),
field(
"serp_id",
string(),
False,
metadata={
"description":
"Unique SERP ID (based on a hash of the URL and timestamp "
"of the SERP).",
},
),
field(
"serp_url",
string(),
False,
metadata={
"description": "Full URL of the SERP.",
},
),
field(
"serp_domain",
string(),
False,
metadata={
"description": "Domain of the SERP URL.",
},
),
field(
"serp_domain_public_suffix",
string(),
False,
metadata={
"description":
"Public suffix (https://publicsuffix.org/) of the SERP "
"domain.",
},
),
field(
"serp_timestamp",
timestamp("s"),
False,
metadata={
"description":
"Timestamp of the archived snapshot in the Wayback "
"Machine.",
},
),
field(
"serp_year",
uint16(),
False,
metadata={
"description":
"Year of the archived snapshot in the Wayback Machine.",
},
),
field(
"serp_month",
uint8(),
False,
metadata={
"description":
"Month of the archived snapshot in the Wayback Machine.",
},
),
field(
"serp_wayback_url",
string(),
False,
metadata={
"description":
"URL of the archived snapshot's contents in the Wayback "
"Machine.",
},
),
field(
"serp_wayback_raw_url",
string(),
False,
metadata={
"description":
"URL of the archived snapshot's raw contents in the "
"Wayback Machine.",
},
),
field(
"serp_page",
uint8(),
True,
metadata={
"description":
"SERP page number as parsed from the URL, e.g., 1, 2, "
"3 (zero-indexed).",
},
),
field(
"serp_offset",
uint16(),
True,
metadata={
"description":
"SERP results offset (start position) as parsed from the "
"URL, e.g., 10, 20 (zero-indexed).",
},
),
field(
"serp_query_text_url",
string(),
True,
metadata={
"description": "The SERP's query as parsed from the URL.",
},
),
field(
"serp_query_text_url_language",
dictionary(int8(), string()),
True,
metadata={
"description":
"Language identified in the query as parsed from the URL. "
"(Google's cld3; min threshold for 'hr' or 'bs': 0.5, for "
"others: 0.7.)",
},
),
field(
"serp_query_text_html",
string(),
True,
metadata={
"description":
"The SERP's query as parsed from the HTML contents.",
},
),
field(
"serp_warc_relative_path",
string(),
True,
metadata={
"description":
"Path of the SERP's WARC file relative to the corpus root "
"path.",
},
),
field(
"serp_warc_byte_offset",
uint32(),
True,
metadata={
"description":
"Position of the SERP's WARC record's first byte in the "
"compressed WARC file.",
},
),
field(
"search_provider_name",
string(),
False,
metadata={
"description":
"Search provider name (domain without the Public Suffix).",
},
),
field(
"search_provider_alexa_domain",
string(),
False,
metadata={
"description":
"Main domain of the search provider as it appears in "
"Alexa top-1M ranks.",
},
),
field(
"search_provider_alexa_domain_public_suffix",
string(),
False,
metadata={
"description":
"Public Suffix (https://publicsuffix.org/) of the search "
"provider's main domain.",
},
),
field(
"search_provider_alexa_rank",
uint32(),
True,
metadata={
"description":
"Rank of the search provider's main domain in fused Alexa "
"top-1M rankings.",
},
),
field(
"search_provider_category",
dictionary(uint8(), string()),
True,
metadata={
"description":
"Category of the search provider (manual annotation).",
},
),
],
metadata={
"description": "A single result from a SERP.",
},
)
SERP_PARTITIONING = partitioning(
schema=schema(
fields=[
field(
"serp_domain_public_suffix",
string(),
False,
metadata={
"description":
"Public suffix (https://publicsuffix.org/) of the "
"SERP domain.",
},
),
field(
"serp_domain",
string(),
False,
metadata={
"description": "Domain of the SERP URL.",
},
),
field(
"serp_year",
uint16(),
False,
metadata={
"description":
"Year of the archived snapshot in the Wayback "
"Machine.",
},
),
field(
"serp_month",
uint8(),
False,
metadata={
"description":
"Month of the archived snapshot in the Wayback "
"Machine.",
},
),
],
),
flavor="hive",
)
RESULT_PARTITIONING = SERP_PARTITIONING
| 21,534
| 29.459689
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/cli/main.py
|
from click import group
@group()
def main():
pass
| 56
| 7.142857
| 23
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/cli/corpus.py
|
from concurrent.futures import ThreadPoolExecutor
from contextlib import ExitStack
from csv import writer
from datetime import datetime
from gzip import GzipFile
from pathlib import Path
from typing import Collection
from uuid import UUID
from click import option, BOOL
from tqdm.auto import tqdm
from archive_query_log import DATA_DIRECTORY_PATH
from archive_query_log.cli import main
from archive_query_log.cli.util import PathParam
from archive_query_log.index import ArchivedRawSerpIndex, \
ArchivedUrlIndex, ArchivedQueryUrlIndex, ArchivedParsedSerpIndex, \
ArchivedSearchResultSnippetIndex, ArchivedRawSearchResultIndex, \
LocatedRecord
from archive_query_log.model import ArchivedUrl, CorpusQueryUrl, \
ArchivedSearchResultSnippet, CorpusDocument, CorpusJsonlLocation, \
CorpusWarcLocation, ArchivedRawSerp, \
ArchivedQueryUrl, ArchivedParsedSerp, CorpusQuery, CorpusSearchResult
@main.command(
"corpus",
help="Generate corpus.",
)
@option(
"-d", "--data-directory", "--data-directory-path",
type=PathParam(
exists=True,
file_okay=False,
dir_okay=True,
writable=True,
readable=False,
resolve_path=True,
path_type=Path,
),
default=DATA_DIRECTORY_PATH
)
@option(
"-f", "--focused",
type=BOOL,
default=False,
is_flag=True,
)
@option(
"-q", "--queries",
type=BOOL,
default=False,
is_flag=True,
)
@option(
"-o", "--output-directory", "--output-directory-path",
type=PathParam(
exists=False,
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=True,
path_type=Path,
),
required=False,
)
def corpus_command(
data_directory: Path,
focused: bool,
queries: bool,
output_directory: Path,
) -> None:
from archive_query_log.index import ArchivedUrlIndex, \
ArchivedQueryUrlIndex, ArchivedRawSerpIndex, ArchivedParsedSerpIndex, \
ArchivedSearchResultSnippetIndex, ArchivedRawSearchResultIndex
output_path: Path
if output_directory is not None:
output_path = output_directory
elif focused:
output_path = data_directory / "focused" / "corpus"
else:
output_path = data_directory / "corpus"
output_path.mkdir(parents=True, exist_ok=True)
timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
queries_path = output_path / f"queries-{timestamp}.jsonl.gz"
queries_path.touch(exist_ok=True)
queries_offsets_path = output_path / f"queries-{timestamp}.jsonl.offsets"
queries_offsets_path.touch(exist_ok=True)
documents_path = output_path / f"documents-{timestamp}.jsonl.gz"
documents_path.touch(exist_ok=True)
documents_offsets_path = \
output_path / f"documents-{timestamp}.jsonl.offsets"
documents_offsets_path.touch(exist_ok=True)
# Load indices.
with ExitStack() as stack:
archived_url_index = stack.enter_context(
ArchivedUrlIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_query_url_index = stack.enter_context(
ArchivedQueryUrlIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_raw_serp_index = stack.enter_context(
ArchivedRawSerpIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_parsed_serp_index = stack.enter_context(
ArchivedParsedSerpIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_search_result_snippet_index = stack.enter_context(
ArchivedSearchResultSnippetIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_raw_search_result_index = stack.enter_context(
ArchivedRawSearchResultIndex(
data_directory=data_directory,
focused=focused,
)
)
# archived_parsed_search_result_index = stack.enter_context(
# ArchivedParsedSearchResultIndex(
# data_directory=data_directory,
# focused=focused,
# )
# )
query_schema = CorpusQuery.schema()
document_schema = CorpusDocument.schema()
with queries_path.open("wb") as queries_file, \
queries_offsets_path.open("w") as queries_offsets_file, \
documents_path.open("wb") as documents_file, \
documents_offsets_path.open("w") as documents_offsets_file:
queries_offsets_writer = writer(queries_offsets_file)
documents_offsets_writer = writer(documents_offsets_file)
archived_ids: Collection[UUID]
if queries:
archived_ids = set(archived_query_url_index)
else:
archived_ids = set(archived_url_index)
archived_ids = tqdm(
archived_ids,
desc="Build corpus",
unit="ID",
)
for archived_id in archived_ids:
query_documents = _build_query_documents(
archived_url_index=archived_url_index,
archived_query_url_index=archived_query_url_index,
archived_raw_serp_index=archived_raw_serp_index,
archived_parsed_serp_index=archived_parsed_serp_index,
archived_search_result_snippet_index=(
archived_search_result_snippet_index
),
archived_raw_search_result_index=(
archived_raw_search_result_index
),
# archived_parsed_search_result_index=(
# archived_parsed_search_result_index
# ),
archived_id=archived_id,
)
if query_documents is None:
continue
query, documents = query_documents
queries_file_offset = queries_file.tell()
with GzipFile(
fileobj=queries_file,
mode="w",
) as queries_gzip_file:
queries_gzip_file.write(
f"{query_schema.dumps(query)}\n".encode("utf8")
)
queries_offsets_writer.writerow(
[str(query.id), str(queries_file_offset)]
)
for document in documents:
documents_file_offset = documents_file.tell()
with GzipFile(
fileobj=documents_file,
mode="w",
) as documents_gzip_file:
documents_gzip_file.write(
f"{document_schema.dumps(document)}\n".encode(
"utf8")
)
documents_offsets_writer.writerow(
[str(document.id), str(documents_file_offset)]
)
def _build_query_url(
archived_url_loc: LocatedRecord[
CorpusJsonlLocation, ArchivedUrl
],
archived_query_url_loc: LocatedRecord[
CorpusJsonlLocation, ArchivedQueryUrl
] | None,
archived_raw_serp_loc: LocatedRecord[
CorpusWarcLocation, ArchivedRawSerp
] | None,
archived_parsed_serp_loc: LocatedRecord[
CorpusJsonlLocation, ArchivedParsedSerp
] | None,
) -> CorpusQueryUrl | None:
archived_url = archived_url_loc.record
return CorpusQueryUrl(
id=archived_url.id,
url=archived_url.url,
timestamp=archived_url.timestamp,
wayback_url=archived_url.archive_url,
wayback_raw_url=archived_url.raw_archive_url,
url_query=(
archived_query_url_loc.record.query
if archived_query_url_loc is not None else None
),
url_page=(
archived_query_url_loc.record.page
if archived_query_url_loc is not None else None
),
url_offset=(
archived_query_url_loc.record.offset
if archived_query_url_loc is not None else None
),
serp_query=(
archived_parsed_serp_loc.record.interpreted_query
if archived_parsed_serp_loc is not None else None
),
archived_url_location=(
archived_url_loc.location
if archived_url_loc is not None else None
),
archived_query_url_location=(
archived_query_url_loc.location
if archived_query_url_loc is not None else None
),
archived_raw_serp_location=(
archived_raw_serp_loc.location
if archived_raw_serp_loc is not None else None
),
archived_parsed_serp_location=(
archived_parsed_serp_loc.location
if archived_parsed_serp_loc is not None else None
),
)
def _build_search_result(
archived_search_result_snippet_index: ArchivedSearchResultSnippetIndex,
archived_raw_search_result_index: ArchivedRawSearchResultIndex,
# archived_parsed_search_result_index: ArchivedParsedSearchResultIndex,
archived_search_result_snippet: ArchivedSearchResultSnippet,
) -> CorpusSearchResult:
archived_snippet_loc = archived_search_result_snippet_index \
.get(archived_search_result_snippet.id)
archived_raw_search_result_loc = archived_raw_search_result_index \
.get(archived_search_result_snippet.id)
# archived_parsed_search_result_loc = archived_parsed_search_result_index \
# .get(archived_search_result_snippet.id)
# archived_parsed_search_result_loc = None
return CorpusSearchResult(
id=archived_search_result_snippet.id,
url=archived_search_result_snippet.url,
timestamp=archived_search_result_snippet.timestamp,
wayback_url=archived_search_result_snippet.archive_url,
wayback_raw_url=archived_search_result_snippet.raw_archive_url,
snippet_rank=archived_search_result_snippet.rank,
snippet_title=archived_search_result_snippet.title,
snippet_text=archived_search_result_snippet.snippet,
archived_snippet_location=archived_snippet_loc.location,
archived_raw_search_result_location=(
archived_raw_search_result_loc.location
if archived_raw_search_result_loc is not None else None
),
archived_parsed_search_result_location=None,
# archived_parsed_search_result_location=(
# archived_parsed_search_result_loc.location
# if archived_parsed_search_result_loc is not None else None
# ),
)
def _build_query_documents(
archived_url_index: ArchivedUrlIndex,
archived_query_url_index: ArchivedQueryUrlIndex,
archived_raw_serp_index: ArchivedRawSerpIndex,
archived_parsed_serp_index: ArchivedParsedSerpIndex,
archived_search_result_snippet_index: ArchivedSearchResultSnippetIndex,
archived_raw_search_result_index: ArchivedRawSearchResultIndex,
# archived_parsed_search_result_index: ArchivedParsedSearchResultIndex,
archived_id: UUID,
) -> tuple[CorpusQuery, list[CorpusDocument]] | None:
archived_url_loc = archived_url_index.get(archived_id)
archived_query_url_loc = archived_query_url_index.get(archived_id)
archived_raw_serp_loc = archived_raw_serp_index.get(archived_id)
archived_parsed_serp_loc = archived_parsed_serp_index.get(archived_id)
if archived_url_loc is None:
return None
query_url = _build_query_url(
archived_url_loc,
archived_query_url_loc,
archived_raw_serp_loc,
archived_parsed_serp_loc,
)
snippets = archived_parsed_serp_loc.record.results \
if archived_parsed_serp_loc is not None else []
path_components = archived_url_loc.location.relative_path.parts
service = path_components[2] \
if path_components[0] == "focused" else path_components[1]
def convert_search_result(
archived_search_result_snippet: ArchivedSearchResultSnippet
) -> CorpusSearchResult:
return _build_search_result(
archived_search_result_snippet_index,
archived_raw_search_result_index,
# archived_parsed_search_result_index,
archived_search_result_snippet,
)
with ThreadPoolExecutor() as executor:
results = list(executor.map(convert_search_result, snippets))
query = CorpusQuery(
service=service,
id=query_url.id,
url=query_url.url,
timestamp=query_url.timestamp,
wayback_url=query_url.wayback_url,
wayback_raw_url=query_url.wayback_raw_url,
url_query=query_url.url_query,
url_page=query_url.url_page,
url_offset=query_url.url_offset,
serp_query=query_url.serp_query,
archived_url_location=query_url.archived_url_location,
archived_query_url_location=query_url.archived_query_url_location,
archived_raw_serp_location=query_url.archived_raw_serp_location,
archived_parsed_serp_location=query_url.archived_parsed_serp_location,
results=results,
)
documents = [
CorpusDocument(
service=service,
id=result.id,
url=result.url,
timestamp=result.timestamp,
wayback_url=result.wayback_url,
wayback_raw_url=result.wayback_raw_url,
query=query,
snippet_rank=result.snippet_rank,
snippet_title=result.snippet_title,
snippet_text=result.snippet_text,
archived_snippet_location=result.archived_snippet_location,
archived_raw_search_result_location=(
result.archived_raw_search_result_location
),
archived_parsed_search_result_location=None,
# archived_parsed_search_result_location=(
# result.archived_parsed_search_result_location
# ),
)
for result in results
]
return query, documents
| 14,579
| 36.96875
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/cli/stats.py
|
from asyncio import run
from gzip import open as gzip_open
from json import loads
from math import inf
from pathlib import Path
from click import option, BOOL, IntRange
from pandas import DataFrame
from tqdm.auto import tqdm
from archive_query_log import DATA_DIRECTORY_PATH, CDX_API_URL, LOGGER
from archive_query_log.cli import main
from archive_query_log.cli.util import PathParam
from archive_query_log.config import SERVICES
# See:
# https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md#pagination-api
_URLS_PER_BLOCK = 3000
_BLOCKS_PER_PAGE = 50
def _all_archived_urls(
data_directory: Path,
focused: bool,
service: str,
) -> int:
from archive_query_log.config import SERVICES
from archive_query_log.urls.fetch import ArchivedUrlsFetcher, \
UrlMatchScope
service_config = SERVICES[service]
match_scope = UrlMatchScope.PREFIX if focused else UrlMatchScope.DOMAIN
fetcher = ArchivedUrlsFetcher(
match_scope=match_scope,
include_status_codes={200},
exclude_status_codes=set(),
include_mime_types={"text/html"},
exclude_mime_types=set(),
cdx_api_url=CDX_API_URL,
)
if focused:
if len(service_config.focused_url_prefixes) == 0:
LOGGER.warning(
f"No focused URL prefixes configured for service {service}."
)
num_pages = run(fetcher.num_service_pages(
data_directory=data_directory,
focused=focused,
service=service_config,
))
return num_pages * _BLOCKS_PER_PAGE * _URLS_PER_BLOCK
@main.command(
"stats",
help="Get stats for the most recent exported corpus.",
)
@option(
"-d", "--data-directory", "--data-directory-path",
type=PathParam(
exists=True,
file_okay=False,
dir_okay=True,
writable=True,
readable=False,
resolve_path=True,
path_type=Path,
),
default=DATA_DIRECTORY_PATH
)
@option(
"-f", "--focused",
type=BOOL,
default=False,
is_flag=True,
)
@option(
"--min-rank", "--min-alexa-rank",
type=IntRange(min=1),
required=False,
)
@option(
"--max-rank", "--max-alexa-rank",
type=IntRange(min=1),
required=False,
)
@option(
"-c", "--corpus-directory", "--corpus-directory-path",
type=PathParam(
exists=False,
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=True,
path_type=Path,
),
required=False,
)
@option(
"-o", "--output", "--output-path",
type=PathParam(
exists=False,
file_okay=True,
dir_okay=False,
writable=True,
readable=False,
resolve_path=True,
path_type=Path,
),
required=False,
)
def stats_command(
data_directory: Path,
focused: bool,
min_rank: int | None,
max_rank: int | None,
corpus_directory: Path | None,
output: Path | None,
) -> None:
services = SERVICES.values()
if min_rank is not None:
services = (
service
for service in services
if (service.alexa_rank is not None and
service.alexa_rank >= min_rank)
)
if max_rank is not None:
services = (
service
for service in services
if (service.alexa_rank is not None and
service.alexa_rank <= max_rank)
)
services = sorted(services, key=lambda service: service.alexa_rank or inf)
results: dict[str, dict[str, int]] = {
service.name: {
"all-archived-urls": 0,
"archived-urls": 0,
"archived-query-urls": 0,
"archived-raw-serps": 0,
"archived-parsed-serps": 0,
"archived-snippets": 0,
"archived-raw-search-results": 0,
"archived-parsed-search-results": 0,
}
for service in services
}
for service in services:
results[service.name]["all-archived-urls"] = _all_archived_urls(
data_directory,
focused,
service.name,
)
corpus_path: Path
if corpus_directory is not None:
corpus_path = corpus_directory
elif focused:
corpus_path = data_directory / "focused" / "corpus"
else:
corpus_path = data_directory / "corpus"
if corpus_path.exists():
queries_paths = sorted(
corpus_path.glob("queries-*.jsonl.gz"),
reverse=True,
)
documents_paths = sorted(
corpus_path.glob("documents-*.jsonl.gz"),
reverse=True,
)
if len(queries_paths) > 0 and len(documents_paths) > 0:
queries_path = queries_paths[0]
documents_path = documents_paths[0]
with gzip_open(queries_path, "rt") as queries_file:
lines = tqdm(
queries_file,
desc="Read queries corpus"
)
for line in lines:
query = loads(line)
service_name = query["service"]
if query["archived_url_location"] is not None:
results[service_name]["archived-urls"] += 1
if query["archived_query_url_location"] is not None:
results[service_name]["archived-query-urls"] += 1
if query["archived_raw_serp_location"] is not None:
results[service_name]["archived-raw-serps"] += 1
if query["archived_parsed_serp_location"] is not None:
results[service_name]["archived-parsed-serps"] += 1
with gzip_open(documents_path, "rt") as documents_file:
lines = tqdm(
documents_file,
desc="Read documents corpus"
)
for line in lines:
document = loads(line)
service_name = document["service"]
if document["archived_snippet_location"] is not None:
results[service_name]["archived-snippets"] += 1
if document[
"archived_raw_search_result_location"
] is not None:
results[service_name][
"archived-raw-search-results"] += 1
if document[
"archived_parsed_search_result_location"
] is not None:
results[service_name][
"archived-parsed-search-results"] += 1
output_path: Path
if output is not None:
output_path = output
elif focused:
output_path = data_directory / "focused" / "stats.csv"
else:
output_path = data_directory / "stats.csv"
df = DataFrame([
{
"service": service_name,
**service_results,
}
for service_name, service_results in results.items()
])
df.to_csv(output_path, index=False)
| 7,159
| 29.729614
| 100
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/cli/external.py
|
from json import loads
from math import nan
from pathlib import Path
from re import compile, escape
from urllib.parse import quote
from click import argument
from pandas import DataFrame, read_csv, Series, concat
from yaml import dump
from archive_query_log import DATA_DIRECTORY_PATH
from archive_query_log.cli import main
from archive_query_log.cli.util import PathParam
sheets_id = "1LnIJYFBYQtZ32rxnT6RPGMOvuRIUQMoEx7tOS0z7Mi8"
sheet_services = "Services"
sheet_domains = "Domains"
sheet_url_prefixes = "URL Prefixes"
sheet_query_parsers = "Query Parsers"
sheet_page_parsers = "Page Parsers"
@main.group("external")
def external():
pass
def from_sheets(sheet_name: str, transpose: bool = False) -> DataFrame:
url = f"https://docs.google.com/spreadsheets/d/{sheets_id}/" \
f"gviz/tq?tqx=out:csv&sheet={quote(sheet_name)}"
if transpose:
df = read_csv(
url, low_memory=False, na_values=[""], keep_default_na=False,
)
return DataFrame([
{
"name": column,
"value": value,
}
for column in df.columns
for value in df[column].dropna()
])
else:
return read_csv(url)
def load_services() -> DataFrame:
df = from_sheets(sheet_services)
df = df[~df["service"].str.contains(".", regex=False)]
df["name"] = df["service"]
df["public_suffix"] = df["tld"]
df["alexa_domain"] = df["name"] + "." + df["public_suffix"]
df["alexa_rank"] = df["rank"]
df["notes"] = df["Notes"]
for col in ["has_input_field", "has_search_form", "has_search_div"]:
df[col.removeprefix("has_")] = df[col].replace("FALSCH", False)
df[col].replace("False", False, inplace=True)
df[col].replace("True", True, inplace=True)
df["alexa_rank"].astype(int, copy=False)
df["alexa_rank"].replace(99999, nan, inplace=True)
return df[["name", "public_suffix", "alexa_domain", "alexa_rank",
"category", "notes", "input_field",
"search_form", "search_div"]]
def load_domains() -> DataFrame:
df = from_sheets(sheet_domains, transpose=True)
df["domain"] = df["value"]
return df[["name", "domain"]]
def url_prefix_pattern(url_prefix: str) -> str | None:
if url_prefix == "":
return None
return f"^https?://[^/]+/{escape(url_prefix)}"
compile(r"[^/]+/images/search\?")
def load_url_prefixes() -> DataFrame:
df = from_sheets(sheet_url_prefixes, transpose=True)
df["value"].replace("NULL", "", inplace=True)
df["pattern"] = df["value"].map(url_prefix_pattern)
return df[["name", "pattern"]]
def load_query_parsers() -> DataFrame:
df = from_sheets(sheet_query_parsers, transpose=True)
df["query_parser"] = df["value"]
return df[["name", "query_parser"]]
def load_page_offset_parsers() -> DataFrame:
df = from_sheets(sheet_page_parsers, transpose=True)
df["value"].replace("NULL", "{}", inplace=True)
df["page_offset_parser"] = df["value"]
return df[["name", "page_offset_parser"]]
def service_domains(domains: DataFrame, service: Series) -> list[str]:
return sorted(
set(list(domains[domains["name"] == service["name"]]["domain"])) | {
service["alexa_domain"]})
def query_parser(row: Series) -> dict:
row = row.to_dict()
row.update(loads(row["query_parser"]))
url_pattern = "" if row["pattern"] is None else row["pattern"]
if row["type"] == "qp":
return {
"url_pattern": url_pattern,
"type": "query_parameter",
"parameter": row["key"]
}
elif row["type"] == "fp":
return {
"url_pattern": url_pattern,
"type": "fragment_parameter",
"parameter": row["key"]
}
elif row["type"] == "ps":
return {
"url_pattern": url_pattern,
"type": "path_suffix",
"path_prefix": row["key"]
}
else:
raise NotImplementedError()
page_offset_parser_map = {"parameter": "query_parameter",
"suffix": "path_suffix",
"fragment": "fragment_parameter"}
def page_offset_parser(row: Series, count="results") -> dict:
row = row.to_dict()
row.update(loads(row["page_offset_parser"]))
if row["count"] == count:
url_pattern = "" if row["pattern"] is None else row["pattern"]
return {
"url_pattern": url_pattern,
"type": page_offset_parser_map[row["type"]],
"parameter": row["key"]
}
else:
raise NotImplementedError()
def page_offset_parser_series(page_offset_parsers, services, count):
return [
sorted((
page_offset_parser(row, count=count)
for _, row in
page_offset_parsers[
(page_offset_parsers["name"].str.fullmatch(service["name"])) &
(page_offset_parsers["page_offset_parser"].str.contains(
f'"count": "{count}"'
))
].iterrows()
), key=lambda pp: str(pp["url_pattern"]))
for _, service in services.iterrows()
]
@external.command("import-services")
@argument(
"services-file",
type=PathParam(
exists=False,
file_okay=True,
dir_okay=False,
writable=True,
readable=False,
resolve_path=True,
path_type=Path,
),
default=DATA_DIRECTORY_PATH / "services.yaml",
)
def import_services(services_file: Path):
services = load_services()
domains = load_domains()
services["domains"] = [
service_domains(domains, row)
for _, row in services.iterrows()
]
query_parsers = concat(
[
load_url_prefixes(),
load_query_parsers()[["query_parser"]]
],
axis="columns")
services["query_parsers"] = [
sorted((
query_parser(row)
for _, row in
query_parsers[
query_parsers["name"].str.endswith(service["name"])
].iterrows()
), key=lambda qp: str(qp["url_pattern"]))
for _, service in services.iterrows()
]
page_offset_parsers = concat(
[
load_url_prefixes(),
load_page_offset_parsers()[["page_offset_parser"]]
],
axis="columns")
services["page_parsers"] = page_offset_parser_series(
page_offset_parsers, services, count="pages"
)
services["offset_parsers"] = page_offset_parser_series(
page_offset_parsers, services, count="results"
)
services["interpreted_query_parsers"] = [
[]
for _, service in services.iterrows()
]
services["results_parsers"] = [
[]
for _, service in services.iterrows()
]
services.replace({nan: None}, inplace=True)
services_dict = services.to_dict(orient="records")
with services_file.open("wt") as file:
dump(services_dict, stream=file, sort_keys=False)
| 7,019
| 29.655022
| 78
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/cli/alexa.py
|
from pathlib import Path
from click import option, Path as PathParam, argument, IntRange
from archive_query_log import DATA_DIRECTORY_PATH, CDX_API_URL
from archive_query_log.cli.main import main
from archive_query_log.cli.util import URL
@main.group("alexa")
def alexa():
pass
@alexa.command("fetch-archived-urls")
@option(
"-u", "--api-url", "--cdx-api-url",
type=URL,
default=CDX_API_URL,
)
@argument(
"output-path",
type=PathParam(
exists=False,
file_okay=True,
dir_okay=False,
writable=True,
readable=False,
resolve_path=True,
path_type=Path,
),
default=DATA_DIRECTORY_PATH / "alexa-top-1m-archived-urls.jsonl"
)
def archived_urls(api_url: str, output_path: Path) -> None:
from archive_query_log.services.alexa import AlexaTop1MArchivedUrls
AlexaTop1MArchivedUrls(
output_path=output_path,
cdx_api_url=api_url,
).fetch()
@alexa.command("fuse-domains")
@option(
"-d", "--data-dir",
type=PathParam(
exists=True,
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=True,
path_type=Path,
),
default=DATA_DIRECTORY_PATH
)
@option(
"-u", "--api-url", "--cdx-api-url",
type=URL,
default=CDX_API_URL,
)
@option(
"-k", "--depth",
type=IntRange(min=1),
default=1000,
)
def domains(data_dir: Path, api_url: str, depth: int) -> None:
from archive_query_log.services.alexa import AlexaTop1MFusedDomains
AlexaTop1MFusedDomains(
data_directory_path=data_dir,
cdx_api_url=api_url,
max_domains_per_ranking=depth,
).fetch()
| 1,690
| 22.164384
| 71
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/cli/util.py
|
from typing import Dict, Any, List
from urllib.parse import urlparse
from click import Parameter, Context
from click.shell_completion import CompletionItem
from click.types import StringParamType, Path, Choice
class UrlParam(StringParamType):
name = "url"
def convert(self, value, param, ctx):
value = super().convert(value, param, ctx)
if value is None:
return None
tokens = urlparse(value)
if not tokens.scheme or not tokens.netloc:
self.fail(f"{value} is not a valid URL", param, ctx)
return value
URL = UrlParam()
PathParam = Path
class ServiceChoice(Choice):
def __init__(self) -> None:
super().__init__(choices=[], case_sensitive=False)
def _ensure_choices(self):
if len(self.choices) == 0:
from archive_query_log.config import SERVICES
self.choices = sorted(SERVICES.keys())
def to_info_dict(self) -> Dict[str, Any]:
self._ensure_choices()
return super().to_info_dict()
def get_metavar(self, param: Parameter) -> str:
self._ensure_choices()
return super().get_metavar(param)
def get_missing_message(self, param: Parameter) -> str:
self._ensure_choices()
return super().get_missing_message(param)
def convert(
self,
value: Any,
param: Parameter | None,
ctx: Context | None,
) -> Any:
self._ensure_choices()
return super().convert(value, param, ctx)
def __repr__(self) -> str:
self._ensure_choices()
return super().__repr__()
def shell_complete(
self,
ctx: Context,
param: Parameter,
incomplete: str,
) -> List[CompletionItem]:
self._ensure_choices()
return super().shell_complete(ctx, param, incomplete)
| 1,871
| 25.742857
| 64
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/cli/__init__.py
|
# flake8: noqa
from archive_query_log.cli.main import main
from archive_query_log.cli.alexa import alexa
from archive_query_log.cli.external import external
from archive_query_log.cli.make import make_group
from archive_query_log.cli.stats import stats_command
from archive_query_log.cli.corpus import corpus_command
from archive_query_log.cli.index import index_command
| 371
| 40.333333
| 55
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/cli/make.py
|
from asyncio import run
from pathlib import Path
from click import option, argument, STRING, IntRange, BOOL
from archive_query_log import DATA_DIRECTORY_PATH, CDX_API_URL, LOGGER
from archive_query_log.cli import main
from archive_query_log.cli.util import PathParam, ServiceChoice
@main.group("make")
def make_group():
pass
def _data_directory_option():
return option(
"-d", "--data-directory", "--data-directory-path",
type=PathParam(
exists=True,
file_okay=False,
dir_okay=True,
writable=True,
readable=False,
resolve_path=True,
path_type=Path,
),
default=DATA_DIRECTORY_PATH
)
def _focused_argument():
return option(
"-f", "--focused",
type=BOOL,
default=False,
is_flag=True,
)
def _service_name_argument():
return argument(
"service",
type=ServiceChoice(),
required=True,
)
def _domain_argument():
return argument(
"domain",
type=STRING,
required=False,
)
def _cdx_page_argument():
return argument(
"cdx_page",
type=IntRange(min=0),
required=False,
)
@make_group.command(
"archived-urls",
help="Fetch archived URLs from the Wayback Machine's CDX API.",
)
@_data_directory_option()
@_focused_argument()
@_service_name_argument()
@_domain_argument()
@_cdx_page_argument()
def archived_urls_command(
data_directory: Path,
focused: bool,
service: str,
domain: str | None,
cdx_page: int | None,
) -> None:
from archive_query_log.config import SERVICES
from archive_query_log.urls.fetch import ArchivedUrlsFetcher, \
UrlMatchScope
service_config = SERVICES[service]
match_scope = UrlMatchScope.PREFIX if focused else UrlMatchScope.DOMAIN
fetcher = ArchivedUrlsFetcher(
match_scope=match_scope,
include_status_codes={200},
exclude_status_codes=set(),
include_mime_types={"text/html"},
exclude_mime_types=set(),
cdx_api_url=CDX_API_URL
)
if focused:
if len(service_config.focused_url_prefixes) == 0:
LOGGER.warning(
f"No focused URL prefixes configured for service {service}."
)
data_directory = data_directory / "focused"
run(fetcher.fetch_service(
data_directory=data_directory,
focused=focused,
service=service_config,
domain=domain,
cdx_page=cdx_page,
))
@make_group.command(
"archived-query-urls",
help="Parse queries from fetched archived URLs.",
)
@_data_directory_option()
@_focused_argument()
@_service_name_argument()
@_domain_argument()
@_cdx_page_argument()
def archived_query_urls_command(
data_directory: Path,
focused: bool,
service: str,
domain: str | None,
cdx_page: int | None,
) -> None:
from archive_query_log.config import SERVICES
from archive_query_log.queries.parse import ArchivedQueryUrlParser
service_config = SERVICES[service]
if len(service_config.query_parsers) == 0:
LOGGER.warning(
f"No query parsers configured for service {service}."
)
if len(service_config.page_parsers) == 0 \
and len(service_config.offset_parsers) == 0:
LOGGER.warning(
f"No page or offset parsers configured for service {service}."
)
parser = ArchivedQueryUrlParser(
query_parsers=service_config.query_parsers,
page_parsers=service_config.page_parsers,
offset_parsers=service_config.offset_parsers,
)
if focused:
data_directory = data_directory / "focused"
parser.parse_service(
data_directory=data_directory,
focused=focused,
service=service_config,
domain=domain,
cdx_page=cdx_page,
)
@make_group.command(
"archived-raw-serps",
help="Download raw SERP contents (as WARC files) for parsed queries.",
)
@_data_directory_option()
@_focused_argument()
@_service_name_argument()
@_domain_argument()
@_cdx_page_argument()
def archived_raw_serps_command(
data_directory: Path,
focused: bool,
service: str,
domain: str | None,
cdx_page: int | None,
) -> None:
from archive_query_log.config import SERVICES
from archive_query_log.download.warc import WebArchiveWarcDownloader
service_config = SERVICES[service]
downloader = WebArchiveWarcDownloader(verbose=True)
if focused:
data_directory = data_directory / "focused"
run(downloader.download_service(
data_directory=data_directory,
focused=focused,
service=service_config,
domain=domain,
cdx_page=cdx_page,
))
@make_group.command(
"archived-parsed-serps",
help="Parse SERP results from raw SERPs.",
)
@_data_directory_option()
@_focused_argument()
@_service_name_argument()
@_domain_argument()
@_cdx_page_argument()
def archived_parsed_serps_command(
data_directory: Path,
focused: bool,
service: str,
domain: str | None,
cdx_page: int | None,
) -> None:
from archive_query_log.config import SERVICES
from archive_query_log.results.parse import ArchivedParsedSerpParser
service_config = SERVICES[service]
if len(service_config.results_parsers) == 0:
LOGGER.warning(
f"No result parsers configured for service {service}."
)
if len(service_config.interpreted_query_parsers) == 0:
LOGGER.warning(
f"No interpreted query parsers configured for service {service}."
)
parser = ArchivedParsedSerpParser(
results_parsers=service_config.results_parsers,
interpreted_query_parsers=service_config.interpreted_query_parsers,
)
if focused:
data_directory = data_directory / "focused"
parser.parse_service(
data_directory=data_directory,
focused=focused,
service=service_config,
domain=domain,
cdx_page=cdx_page,
)
@make_group.command(
"archived-raw-search-results",
help="Download raw search result contents (as WARC files) "
"for parsed SERPs.",
)
@_data_directory_option()
@_focused_argument()
@_service_name_argument()
@_domain_argument()
@_cdx_page_argument()
def archived_raw_search_results_command(
data_directory: Path,
focused: bool,
service: str,
domain: str | None,
cdx_page: int | None,
) -> None:
from archive_query_log.config import SERVICES
from archive_query_log.download.warc import WebArchiveWarcDownloader
service_config = SERVICES[service]
downloader = WebArchiveWarcDownloader(verbose=True)
if focused:
data_directory = data_directory / "focused"
run(downloader.download_service(
data_directory=data_directory,
focused=focused,
service=service_config,
domain=domain,
cdx_page=cdx_page,
snippets=True,
))
@make_group.command(
"archived-parsed-search-results",
help="Parse search results from raw search result contents.",
)
@_data_directory_option()
@_focused_argument()
@_service_name_argument()
@_domain_argument()
@_cdx_page_argument()
def archived_parsed_search_results_command(
data_directory: Path,
focused: bool,
service: str,
domain: str | None,
cdx_page: int | None,
) -> None:
raise NotImplementedError()
| 7,546
| 26.344203
| 77
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/cli/index.py
|
from concurrent.futures import ThreadPoolExecutor
from contextlib import ExitStack
from pathlib import Path
from click import option, BOOL
from tqdm.auto import tqdm
from archive_query_log import DATA_DIRECTORY_PATH
from archive_query_log.cli import main
from archive_query_log.cli.util import PathParam
from archive_query_log.index import ArchivedRawSerpIndex, \
ArchivedUrlIndex, ArchivedQueryUrlIndex, ArchivedParsedSerpIndex, \
ArchivedSearchResultSnippetIndex, ArchivedRawSearchResultIndex
@main.command(
"index",
help="Generate helper indices.",
)
@option(
"-d", "--data-directory", "--data-directory-path",
type=PathParam(
exists=True,
file_okay=False,
dir_okay=True,
writable=True,
readable=False,
resolve_path=True,
path_type=Path,
),
default=DATA_DIRECTORY_PATH
)
@option(
"-f", "--focused",
type=BOOL,
default=False,
is_flag=True,
)
def index_command(
data_directory: Path,
focused: bool,
) -> None:
with ExitStack() as stack:
archived_url_index = stack.enter_context(
ArchivedUrlIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_query_url_index = stack.enter_context(
ArchivedQueryUrlIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_raw_serp_index = stack.enter_context(
ArchivedRawSerpIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_parsed_serp_index = stack.enter_context(
ArchivedParsedSerpIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_search_result_snippet_index = stack.enter_context(
ArchivedSearchResultSnippetIndex(
data_directory=data_directory,
focused=focused,
)
)
archived_raw_search_result_index = stack.enter_context(
ArchivedRawSearchResultIndex(
data_directory=data_directory,
focused=focused,
)
)
# archived_parsed_search_result_index = stack.enter_context(
# ArchivedParsedSearchResultIndex(
# data_directory=data_directory,
# focused=focused,
# )
# )
indexes = [
archived_url_index,
archived_query_url_index,
archived_raw_serp_index,
archived_parsed_serp_index,
archived_search_result_snippet_index,
archived_raw_search_result_index,
# archived_parsed_search_result_index,
]
pool = ThreadPoolExecutor()
progress = tqdm(
total=len(indexes),
desc="Build indices",
unit="index",
)
def run_index(index) -> None:
index.index()
progress.update()
for _ in pool.map(run_index, indexes):
pass
| 3,141
| 27.825688
| 71
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/index/__init__.py
|
from abc import ABC, abstractmethod
from csv import writer
from dataclasses import dataclass
from functools import cached_property
from gzip import GzipFile
from io import TextIOWrapper
from json import loads, JSONDecodeError
from pathlib import Path
from shelve import open as shelf_open, Shelf
from shutil import copyfileobj
from typing import Iterator, TypeVar, Generic, Type, IO, final, ContextManager
from uuid import UUID, uuid5, NAMESPACE_URL
from dataclasses_json import DataClassJsonMixin
from fastwarc import ArchiveIterator, FileStream, WarcRecord, \
WarcRecordType
# pylint: disable=no-name-in-module
from fastwarc.stream_io import PythonIOStreamAdapter
from marshmallow import Schema
from tqdm.auto import tqdm
from archive_query_log import DATA_DIRECTORY_PATH, LOGGER
from archive_query_log.model import ArchivedUrl, ArchivedQueryUrl, \
ArchivedParsedSerp, ArchivedSearchResultSnippet, ArchivedRawSerp, \
ArchivedRawSearchResult, CorpusJsonlLocation, CorpusJsonlSnippetLocation, \
CorpusWarcLocation
from archive_query_log.util.text import count_lines
@dataclass(frozen=True)
class _MetaIndex:
base_type: str
data_directory: Path = DATA_DIRECTORY_PATH
focused: bool = False
@cached_property
def base_path(self) -> Path:
base_path = self.data_directory
if self.focused:
base_path /= "focused"
if self.base_type == "archived-search-result-snippets":
base_path /= "archived-parsed-serps"
else:
base_path /= self.base_type
return base_path
def _is_indexable_path(self, path: Path) -> bool:
if self.base_type == "archived-urls":
return path.is_file() and path.name.endswith(".jsonl.gz")
elif self.base_type == "archived-query-urls":
return path.is_file() and path.name.endswith(".jsonl.gz")
elif self.base_type == "archived-raw-serps":
return path.is_dir()
elif self.base_type == "archived-parsed-serps":
return path.is_file() and path.name.endswith(".jsonl.gz")
elif self.base_type == "archived-search-result-snippets":
return path.is_file() and path.name.endswith(".jsonl.gz")
elif self.base_type == "archived-raw-search-results":
return path.is_dir()
elif self.base_type == "archived-parsed-search-results":
return path.is_file() and path.name.endswith(".jsonl.gz")
else:
raise ValueError(f"Unknown base type: {self.base_type}")
def _indexable_paths(self) -> Iterator[Path]:
base_path = self.base_path
if not base_path.exists():
return
for service_path in base_path.iterdir():
if not service_path.is_dir():
continue
if service_path.name.startswith("."):
continue
for pattern_path in service_path.iterdir():
if (not pattern_path.is_dir() or
pattern_path.name.startswith(".")):
continue
for path in pattern_path.iterdir():
if self._is_indexable_path(path):
yield path
def _index_path(self, path: Path) -> Path:
if self.base_type == "archived-urls":
return path.with_name(
f"{path.name.removesuffix('.jsonl.gz')}.index"
)
elif self.base_type == "archived-query-urls":
return path.with_name(
f"{path.name.removesuffix('.jsonl.gz')}.index"
)
elif self.base_type == "archived-raw-serps":
return path.with_name(
f"{path.name}.index"
)
elif self.base_type == "archived-parsed-serps":
return path.with_name(
f"{path.name.removesuffix('.jsonl.gz')}.index"
)
elif self.base_type == "archived-search-result-snippets":
return path.with_name(
f"{path.name.removesuffix('.jsonl.gz')}.snippets.index"
)
elif self.base_type == "archived-raw-search-results":
return path.with_name(
f"{path.name}.index"
)
elif self.base_type == "archived-parsed-search-results":
return path.with_name(
f"{path.name.removesuffix('.jsonl.gz')}.index"
)
else:
raise ValueError(f"Unknown base type: {self.base_type}")
def _index_jsonl(self, path: Path) -> None:
if not path.exists():
return
index_path = self._index_path(path)
if index_path.exists():
if (index_path.stat().st_size == 0 or
index_path.stat().st_mtime < path.stat().st_mtime):
# Remove empty or stale index.
index_path.unlink()
else:
# Index is up-to-date.
return
offset = 0
index: list[tuple[str, str, str]] = []
with GzipFile(path, mode="r") as gzip_file:
gzip_file: IO[str]
for line in gzip_file:
try:
record = loads(line)
except JSONDecodeError:
LOGGER.error(f"Could not index {line} at {path}.")
return
record_id = uuid5(
NAMESPACE_URL,
f"{record['timestamp']}:{record['url']}",
)
index.append((
str(record_id),
str(path.relative_to(self.data_directory)),
str(offset),
))
offset = gzip_file.tell()
try:
with index_path.open("wt") as index_file:
index_writer = writer(index_file)
index_writer.writerows(index)
except Exception as e:
LOGGER.error(e)
def _index_warc(self, dir_path: Path) -> None:
if not dir_path.exists():
return
index_path = self._index_path(dir_path)
if index_path.exists():
if (index_path.stat().st_size == 0 or
index_path.stat().st_mtime < dir_path.stat().st_mtime):
# Remove empty or stale index.
index_path.unlink()
else:
# Index is up-to-date.
return
index: list[tuple[str, str, str]] = []
for path in dir_path.iterdir():
if path.name.startswith("."):
continue
records = ArchiveIterator(
FileStream(str(path), "rb"),
record_types=WarcRecordType.response,
parse_http=False,
)
for record in records:
record: WarcRecord
offset = record.stream_pos
try:
record_url = loads(record.headers["Archived-URL"])
except JSONDecodeError:
LOGGER.error(
f"Could not index "
f"{record.headers['Archived-URL']} "
f"at {path}."
)
return
record_id = uuid5(
NAMESPACE_URL,
f"{record_url['timestamp']}:{record_url['url']}",
)
index.append((
str(record_id),
str(path.relative_to(self.data_directory)),
str(offset),
))
try:
with index_path.open("wt") as index_file:
index_writer = writer(index_file)
index_writer.writerows(index)
except Exception as e:
LOGGER.error(e)
def _index_jsonl_snippets(self, path: Path) -> None:
if not path.exists():
return
index_path = self._index_path(path)
if index_path.exists():
if (index_path.stat().st_size == 0 or
index_path.stat().st_mtime < path.stat().st_mtime):
# Remove empty or stale index.
index_path.unlink()
else:
# Index is up-to-date.
return
offset = 0
index: list[tuple[str, str, str, str]] = []
with GzipFile(path, mode="r") as gzip_file:
gzip_file: IO[str]
for line in gzip_file:
try:
record = loads(line)
except JSONDecodeError:
LOGGER.error(f"Could not index {line} at {path}.")
return
for snippet_index, snippet in enumerate(record["results"]):
record_id = uuid5(
NAMESPACE_URL,
f"{record['timestamp']}:{snippet['url']}",
)
index.append((
str(record_id),
str(path.relative_to(self.data_directory)),
str(offset),
str(snippet_index)
))
offset = gzip_file.tell()
try:
with index_path.open("wt") as index_file:
index_writer = writer(index_file)
index_writer.writerows(index)
except Exception as e:
LOGGER.error(e)
def _index(self, path: Path) -> None:
if self.base_type == "archived-urls":
self._index_jsonl(path)
elif self.base_type == "archived-query-urls":
self._index_jsonl(path)
elif self.base_type == "archived-raw-serps":
self._index_warc(path)
elif self.base_type == "archived-parsed-serps":
self._index_jsonl(path)
elif self.base_type == "archived-search-result-snippets":
self._index_jsonl_snippets(path)
elif self.base_type == "archived-raw-search-results":
self._index_warc(path)
elif self.base_type == "archived-parsed-search-results":
self._index_jsonl(path)
else:
raise ValueError(f"Unknown base type: {self.base_type}")
@cached_property
def path(self) -> Path:
if self.base_type == "archived-urls":
return self.base_path / ".index"
elif self.base_type == "archived-query-urls":
return self.base_path / ".index"
elif self.base_type == "archived-raw-serps":
return self.base_path / ".index"
elif self.base_type == "archived-parsed-serps":
return self.base_path / ".index"
elif self.base_type == "archived-search-result-snippets":
return self.base_path / ".snippets.index"
elif self.base_type == "archived-raw-search-results":
return self.base_path / ".index"
elif self.base_type == "archived-parsed-search-results":
return self.base_path / ".index"
else:
raise ValueError(f"Unknown base type: {self.base_type}")
@cached_property
def shelf_path(self) -> Path:
return self.path.with_name(f"{self.path.name}.shelf")
def index(self) -> None:
# Index each path individually.
indexed_paths = []
indexable_paths = tqdm(
self._indexable_paths(),
total=sum(1 for _ in self._indexable_paths()),
desc="Index paths",
unit="path",
)
for indexable_path in indexable_paths:
self._index(indexable_path)
indexed_paths.append(indexable_path)
# Merge all indexes into a single index.
path = self.path
num_lines = 0
with path.open("wb") as aggregated_index_file:
indexed_paths = tqdm(
indexed_paths,
desc="Merge indices",
unit="path",
)
for indexed_path in indexed_paths:
index_path = self._index_path(indexed_path)
if not index_path.exists():
continue
with index_path.open("rb") as index_file:
copyfileobj(index_file, aggregated_index_file)
with index_path.open("rb") as index_file:
num_lines += count_lines(index_file)
# TODO: This is too slow.
# # Create index shelf for efficient lookups.
# shelf_path = self.shelf_path
# with shelf_open(str(shelf_path), "c") as shelf:
# with path.open("rt") as file:
# lines = tqdm(
# file,
# total=num_lines,
# desc="Create index shelf",
# unit="line",
# )
# for line in lines:
# uuid, line = line.split(",", maxsplit=1)
# shelf[uuid] = line
_CorpusLocationType = TypeVar(
"_CorpusLocationType",
CorpusJsonlLocation, CorpusJsonlSnippetLocation, CorpusWarcLocation
)
_RecordType = TypeVar("_RecordType", bound=DataClassJsonMixin)
@dataclass(frozen=True)
class LocatedRecord(Generic[_CorpusLocationType, _RecordType]):
location: _CorpusLocationType
record: _RecordType
_T = TypeVar("_T")
@dataclass(frozen=True)
class _Index(
Generic[_CorpusLocationType, _RecordType],
ContextManager,
ABC,
):
data_directory: Path
focused: bool
@property
@abstractmethod
def base_type(self) -> str:
pass
@cached_property
def _meta_index(self) -> _MetaIndex:
return _MetaIndex(self.base_type, self.data_directory, self.focused)
@abstractmethod
def _to_corpus_location(self, csv_line: list) -> _CorpusLocationType:
pass
@cached_property
def _index_shelf_path(self) -> Path:
index_path = self.data_directory
if self.focused:
index_path /= "focused"
if self.base_type == "archived-search-result-snippets":
return index_path / "archived-parsed-serps" / \
".snippets.index.shelf"
else:
return index_path / self.base_type / ".index.shelf"
@cached_property
def _index_shelve(self) -> Shelf:
return shelf_open(str(self._index_shelf_path), "r")
def index(self) -> None:
self._meta_index.index()
@abstractmethod
def _read_record(self, location: _CorpusLocationType) -> _RecordType:
pass
def __getitem__(
self,
item: UUID
) -> LocatedRecord[_CorpusLocationType, _RecordType]:
csv_line = self._index_shelve[str(item)].split(",")
location = self._to_corpus_location(csv_line)
record = self._read_record(location)
return LocatedRecord(location, record)
def get(
self,
item: UUID
) -> LocatedRecord[_CorpusLocationType, _RecordType] | None:
if str(item) not in self._index_shelve:
return None
return self[item]
def __iter__(self) -> Iterator[UUID]:
for uuid in self._index_shelve:
yield UUID(uuid)
def __exit__(self, *args) -> None:
self._index_shelve.close()
@dataclass(frozen=True)
class _JsonLineIndex(_Index[CorpusJsonlLocation, _RecordType]):
@property
@abstractmethod
def record_type(self) -> Type[_RecordType]:
pass
@cached_property
def _schema(self) -> Schema:
return self.record_type.schema()
def _to_corpus_location(self, csv_line: list) -> CorpusJsonlLocation:
return CorpusJsonlLocation(
relative_path=Path(csv_line[1]),
byte_offset=int(csv_line[2]),
)
def _read_record(self, location: CorpusJsonlLocation) -> _RecordType:
path = self.data_directory / location.relative_path
with GzipFile(path, "rb") as gzip_file:
gzip_file: IO[bytes]
gzip_file.seek(location.byte_offset)
with TextIOWrapper(gzip_file) as text_file:
line = text_file.readline()
return self._schema.loads(line)
@dataclass(frozen=True)
class _WarcIndex(_Index[CorpusWarcLocation, _RecordType]):
def _to_corpus_location(self, csv_line: list) -> CorpusWarcLocation:
return CorpusWarcLocation(
relative_path=Path(csv_line[1]),
byte_offset=int(csv_line[2]),
)
def _read_record(self, location: CorpusJsonlLocation) -> _RecordType:
path = self.data_directory / location.relative_path
with path.open("rb") as file:
file.seek(location.byte_offset)
stream = PythonIOStreamAdapter(file)
record: WarcRecord = next(ArchiveIterator(stream))
return self._read_warc_record(record)
@abstractmethod
def _read_warc_record(self, record: WarcRecord) -> _RecordType:
pass
@dataclass(frozen=True)
class ArchivedUrlIndex(_JsonLineIndex[ArchivedUrl]):
base_type = final("archived-urls")
record_type = ArchivedUrl
data_directory: Path = DATA_DIRECTORY_PATH
focused: bool = False
@dataclass(frozen=True)
class ArchivedQueryUrlIndex(_JsonLineIndex[ArchivedQueryUrl]):
base_type = "archived-query-urls"
record_type = ArchivedQueryUrl
data_directory: Path = DATA_DIRECTORY_PATH
focused: bool = False
@dataclass(frozen=True)
class ArchivedRawSerpIndex(_WarcIndex[ArchivedRawSerp]):
base_type = "archived-raw-serps"
schema = ArchivedQueryUrl.schema()
data_directory: Path = DATA_DIRECTORY_PATH
focused: bool = False
def _read_warc_record(self, record: WarcRecord) -> ArchivedRawSerp:
archived_url: ArchivedQueryUrl = self.schema.loads(
record.headers["Archived-URL"]
)
content_type = record.http_charset
if content_type is None:
content_type = "utf8"
return ArchivedRawSerp(
url=archived_url.url,
timestamp=archived_url.timestamp,
query=archived_url.query,
page=archived_url.page,
offset=archived_url.offset,
content=record.reader.read(),
encoding=content_type,
)
@dataclass(frozen=True)
class ArchivedParsedSerpIndex(_JsonLineIndex[ArchivedParsedSerp]):
base_type = "archived-parsed-serps"
record_type = ArchivedParsedSerp
data_directory: Path = DATA_DIRECTORY_PATH
focused: bool = False
@dataclass(frozen=True)
class ArchivedSearchResultSnippetIndex(
_Index[CorpusJsonlSnippetLocation, ArchivedSearchResultSnippet]
):
base_type = "archived-search-result-snippets"
schema = ArchivedParsedSerp.schema()
data_directory: Path = DATA_DIRECTORY_PATH
focused: bool = False
def _to_corpus_location(
self,
csv_line: list
) -> CorpusJsonlSnippetLocation:
return CorpusJsonlSnippetLocation(
relative_path=Path(csv_line[1]),
byte_offset=int(csv_line[2]),
index=int(csv_line[3]),
)
def _read_record(
self,
location: CorpusJsonlSnippetLocation
) -> _RecordType:
path = self.data_directory / location.relative_path
with GzipFile(path, "rb") as gzip_file:
gzip_file: IO[bytes]
gzip_file.seek(location.byte_offset)
with TextIOWrapper(gzip_file) as text_file:
line = text_file.readline()
record: ArchivedParsedSerp = self.schema.loads(line)
return record.results[location.index]
@dataclass(frozen=True)
class ArchivedRawSearchResultIndex(_WarcIndex[ArchivedRawSearchResult]):
base_type = "archived-raw-search-results"
schema = ArchivedSearchResultSnippet.schema()
data_directory: Path = DATA_DIRECTORY_PATH
focused: bool = False
def _read_warc_record(self, record: WarcRecord) -> ArchivedRawSearchResult:
archived_url: ArchivedSearchResultSnippet = self.schema.loads(
record.headers["Archived-URL"]
)
content_type = record.http_charset
if content_type is None:
content_type = "utf8"
return ArchivedRawSearchResult(
url=archived_url.url,
timestamp=archived_url.timestamp,
rank=archived_url.rank,
title=archived_url.title,
snippet=archived_url.snippet,
content=record.reader.read(),
encoding=content_type,
)
| 20,377
| 34.013746
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/chatnoir.py
|
from dataclasses import dataclass
from typing import Pattern, Iterator
from urllib.parse import urljoin
from bs4 import Tag
from archive_query_log.model import ArchivedSearchResultSnippet, \
HighlightedText
from archive_query_log.results.parse import HtmlResultsParser
from archive_query_log.util.html import clean_html
@dataclass(frozen=True)
class ChatNoirResultsParser(HtmlResultsParser):
url_pattern: Pattern[str]
def parse_html(
self,
html: Tag,
timestamp: int,
serp_url: str,
) -> Iterator[ArchivedSearchResultSnippet]:
results = html.find("section", id="SearchResults")
if results is None:
return
results_iter = results.find_all("article", class_="search-result")
for index, result in enumerate(results_iter):
header: Tag = result.find("header")
url = header.find("a", class_="link")["href"]
url = urljoin(serp_url, url)
title = clean_html(header.find("h2"))
# Remove header. Only the snippet will be left.
header.decompose()
snippet = clean_html(result)
if len(snippet) == 0:
snippet = None
yield ArchivedSearchResultSnippet(
rank=index + 1,
url=url,
timestamp=timestamp,
title=HighlightedText(title),
snippet=HighlightedText(snippet),
)
| 1,475
| 32.545455
| 74
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/__init__.py
| 0
| 0
| 0
|
py
|
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/parse.py
|
from abc import abstractmethod, ABC
from dataclasses import dataclass
from gzip import GzipFile
from io import TextIOWrapper
from pathlib import Path
from typing import Sequence, NamedTuple, Iterator, Pattern
from urllib.parse import quote, urljoin
from bs4 import Tag, BeautifulSoup
from tqdm.auto import tqdm
from archive_query_log.download.iterable import ArchivedRawSerps
from archive_query_log.model import ArchivedRawSerp, \
ArchivedSearchResultSnippet, ResultsParser, InterpretedQueryParser, \
ArchivedParsedSerp, Service, HighlightedText
from archive_query_log.util.html import clean_html
class HtmlResultsParser(ResultsParser, ABC):
url_pattern: Pattern[str]
@abstractmethod
def parse_html(
self,
html: Tag,
timestamp: int,
serp_url: str,
) -> Iterator[ArchivedSearchResultSnippet]:
...
def parse(
self,
raw_serp: ArchivedRawSerp,
) -> Sequence[ArchivedSearchResultSnippet] | None:
if self.url_pattern.search(raw_serp.url) is None:
return None
html = BeautifulSoup(
raw_serp.content,
"html.parser",
from_encoding=raw_serp.encoding
)
results = tuple(self.parse_html(
html,
raw_serp.timestamp,
raw_serp.url,
))
return results if len(results) > 0 else None
@dataclass(frozen=True)
class HtmlSelectorResultsParser(HtmlResultsParser):
url_pattern: Pattern[str]
results_selector: str
url_selector: str
url_attribute: str
title_selector: str
snippet_selector: str | None
def parse_html(
self,
html: Tag,
timestamp: int,
serp_url: str,
) -> Iterator[ArchivedSearchResultSnippet]:
for index, result in enumerate(html.select(self.results_selector)):
if self.url_selector == ":--self":
url_tag = result
else:
url_tag = result.select_one(self.url_selector)
if url_tag is None:
continue
if self.url_attribute not in url_tag.attrs:
continue
url = url_tag.attrs[self.url_attribute]
if url is None:
continue
url = urljoin(serp_url, url)
if self.title_selector == ":--self":
title_tag = result
else:
title_tag = result.select_one(self.title_selector)
if title_tag is None:
continue
title = clean_html(title_tag)
if len(title) == 0:
continue
snippet = None
if self.snippet_selector is not None:
if self.snippet_selector == ":--self":
snippet_tags = [result]
else:
snippet_tags = result.select(self.snippet_selector)
if snippet_tags is not None and snippet_tags:
for snippet_candidate in snippet_tags:
snippet_candidate = clean_html(snippet_candidate)
if (snippet_candidate and
len(snippet_candidate) > 0 and
(not snippet or
len(snippet_candidate) > len(snippet))):
snippet = snippet_candidate
yield ArchivedSearchResultSnippet(
rank=index + 1,
url=url,
timestamp=timestamp,
title=HighlightedText(title),
snippet=HighlightedText(snippet),
)
class HtmlInterpretedQueryParser(InterpretedQueryParser, ABC):
url_pattern: Pattern[str]
@abstractmethod
def parse_html(self, html: Tag) -> str | None:
...
def parse(
self,
raw_serp: ArchivedRawSerp,
) -> str | None:
if self.url_pattern.search(raw_serp.url) is None:
return None
html = BeautifulSoup(
raw_serp.content,
"html.parser",
from_encoding=raw_serp.encoding
)
return self.parse_html(html)
@dataclass(frozen=True)
class HtmlSelectorInterpretedQueryParser(HtmlInterpretedQueryParser):
url_pattern: Pattern[str]
query_selector: str
query_attribute: str
query_text: bool = False
def parse_html(self, html: Tag) -> str | None:
search_field = html.select_one(self.query_selector)
if search_field is None:
return None
if self.query_text:
interpreted_query = search_field.text
if interpreted_query is not None and len(interpreted_query) > 0:
return interpreted_query
if (self.query_attribute is not None and
self.query_attribute in search_field.attrs):
interpreted_query = search_field.attrs[self.query_attribute]
if interpreted_query is not None and len(interpreted_query) > 0:
return interpreted_query
return None
class _CdxPage(NamedTuple):
input_path: Path
output_path: Path
@dataclass(frozen=True)
class ArchivedParsedSerpParser:
results_parsers: Sequence[ResultsParser]
interpreted_query_parsers: Sequence[InterpretedQueryParser]
overwrite: bool = False
verbose: bool = False
def parse(self, input_path: Path, output_path: Path) -> None:
if output_path.exists() and not self.overwrite:
return
output_path.parent.mkdir(parents=True, exist_ok=True)
archived_serp_contents = ArchivedRawSerps(input_path)
if self.verbose:
archived_serp_contents = tqdm(
archived_serp_contents,
desc="Parse SERP WARC records",
unit="record",
)
archived_parsed_serps = (
self.parse_single(archived_serp_content)
for archived_serp_content in archived_serp_contents
)
archived_parsed_serps = (
archived_serp
for archived_serp in archived_parsed_serps
if archived_serp is not None
)
output_schema = ArchivedParsedSerp.schema()
# noinspection PyTypeChecker
with output_path.open("wb") as file, \
GzipFile(fileobj=file, mode="wb") as gzip_file, \
TextIOWrapper(gzip_file) as text_file:
for archived_parsed_serp in archived_parsed_serps:
text_file.write(output_schema.dumps(archived_parsed_serp))
text_file.write("\n")
def parse_single(
self,
archived_serp_content: ArchivedRawSerp
) -> ArchivedParsedSerp | None:
results: Sequence[ArchivedSearchResultSnippet] | None = None
for parser in self.results_parsers:
results = parser.parse(archived_serp_content)
if results is not None:
break
interpreted_query: str | None = None
for parser in self.interpreted_query_parsers:
interpreted_query = parser.parse(archived_serp_content)
if interpreted_query is not None:
break
if results is None and interpreted_query is None:
return None
return ArchivedParsedSerp(
url=archived_serp_content.url,
timestamp=archived_serp_content.timestamp,
query=archived_serp_content.query,
page=archived_serp_content.page,
offset=archived_serp_content.offset,
interpreted_query=interpreted_query,
results=results if results is not None else [],
)
def _service_pages(
self,
data_directory: Path,
focused: bool,
service: Service,
domain: str | None,
cdx_page: int | None,
) -> Sequence[_CdxPage]:
"""
List all items that need to be downloaded.
"""
input_format_path = data_directory / "archived-raw-serps"
output_format_path = data_directory / "archived-parsed-serps"
service_path = input_format_path / service.name
if not service_path.exists():
return []
if domain is not None:
domain_paths = [service_path / domain]
else:
domain_paths = [
path
for path in service_path.iterdir()
if path.is_dir()
]
if focused:
domain_paths = [
path
for path in domain_paths
if any(
path.name.endswith(quote(prefix, safe=""))
for prefix in service.focused_url_prefixes
)
]
if cdx_page is not None:
assert domain is not None
assert len(domain_paths) == 1
cdx_page_paths = [domain_paths[0] / f"{cdx_page:010}"]
else:
cdx_page_paths = [
path
for domain_path in domain_paths
for path in domain_path.iterdir()
if (
path.is_dir() and
len(path.name) == 10 and
path.name.isdigit()
)
]
return [
_CdxPage(
input_path=cdx_page_path,
output_path=output_format_path / cdx_page_path.relative_to(
input_format_path
).with_suffix(".jsonl.gz"),
)
for cdx_page_path in cdx_page_paths
]
def parse_service(
self,
data_directory: Path,
focused: bool,
service: Service,
domain: str | None = None,
cdx_page: int | None = None,
):
pages = self._service_pages(
data_directory=data_directory,
focused=focused,
service=service,
domain=domain,
cdx_page=cdx_page,
)
if len(pages) == 0:
return
if len(pages) > 1:
pages = tqdm(
pages,
desc="Parse archived SERP URLs",
unit="page",
)
for page in pages:
self.parse(page.input_path, page.output_path)
| 10,392
| 31.68239
| 76
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_ebay_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_ebay_se_xing_mei_jia_wei_vxin_dun35358_1544323503():
verify_serp_parsing(
"https://web.archive.org/web/20181209024503id_/https://www.ebay.ca/sch/i.html?_nkw=%E8%89%B2%E6%80%A7%E5%A6%B9%E3%80%90%E5%8A%A0%E5%BE%AEv%E4%BF%A1%3Adun35358%E3%80%91%E2%8A%99",
"ebay",
)
def test_parse_query_ebay_shan_dong_11xuan_5_360cai_piao_wei_xin_weiwei776699_1538125942():
verify_serp_parsing(
"https://web.archive.org/web/20180928091222id_/https://www.ebay.ph/sch/i.html?_nkw=%E5%B1%B1%E4%B8%9C11%E9%80%895+360%E5%BD%A9%E7%A5%A8%2B%E3%80%96%E5%BE%AE%E4%BF%A1%3Aweiwei776699%E3%80%97%E2%94%A0",
"ebay",
)
def test_parse_query_ebay_smartphones_1334931269():
verify_serp_parsing(
"https://web.archive.org/web/20120420141429id_/http://www.ebay.de/sch/i.html?_nkw=Smartphones&_sacat=132286&rt=nc",
"ebay",
)
def test_parse_query_ebay_srch_str_1641320481():
verify_serp_parsing(
"https://web.archive.org/web/20220104182121id_/https://www.ebay.com.sg/sch/i.html?_nkw={srch_str}&ssPageName=GSTL",
"ebay",
)
def test_parse_query_ebay_liu_he_cai_gua_pai_wan_zheng_ban_wei_vxin_weiwei776699_1565563054():
verify_serp_parsing(
"https://web.archive.org/web/20190811223734id_/https://www.ebay.com.sg/sch/i.html?_nkw=%e5%85%ad%e5%90%88%e5%bd%a9%e6%8c%82%e7%89%8c%e5%ae%8c%e6%95%b4%e6%9d%bf%c2%b1%e3%80%90%e5%be%aev%e4%bf%a1weiwei776699%e3%80%91%e2%97%86",
"ebay",
)
def test_parse_query_ebay_1_4_1566151598():
verify_serp_parsing(
"https://web.archive.org/web/20190818180638id_/https://www.ebay.com/sch/i.html?rt=nc&_fsrp=1&_nkw=1+%3A+4+&_sacat=14964&Connector%2520A=RCA%252FPhono%2520Male",
"ebay",
)
def test_parse_query_ebay_hu_bei_kuai_san_zhong_jiang_zhu_shou_jia_wei_vxin_weiwei776699_1548361753():
verify_serp_parsing(
"https://web.archive.org/web/20190124202913id_/https://www.ebay.ph/sch/i.html?_nkw=%e6%b9%96%e5%8c%97%e5%bf%ab%e4%b8%89%e4%b8%ad%e5%a5%96%e5%8a%a9%e6%89%8b%e5%8a%a0%e3%80%8e%e5%be%aev%e4%bf%a1weiwei776699%e3%80%8f%e2%88%9a",
"ebay",
)
def test_parse_query_ebay_imaginext_batman_motorcycle_1597143554():
verify_serp_parsing(
"https://web.archive.org/web/20200811105914id_/https://www.ebay.com/sch/i.html?_nkw=imaginext+batman+motorcycle&rt=nc&_dcat=50305&_sacat=50305&vbn_id=7023320428&Featured%2520Refinements=Imaginext%2520Figure&_fsrp=1",
"ebay",
)
def test_parse_query_ebay_victoria_s_secret_1559541146():
verify_serp_parsing(
"https://web.archive.org/web/20190603055226id_/https://www.ebay.co.uk/sch/i.html?_nkw=Victoria%27s+Secret&rt=nc&_dcat=11514&_sacat=-1&cc_bn_id=21837960&_aspectname=aspect-Features&_fsrp=1",
"ebay",
)
def test_parse_query_ebay_liu_he_cai_2o2526_wei_xin_weiwei776699_1562672411():
verify_serp_parsing(
"https://web.archive.org/web/20190709114011id_/https://www.ebay.nl/sch/i.html?_nkw=%e5%85%ad%e5%90%88%e5%bd%a92o2526%2b%e3%80%8e%e5%be%ae%e4%bf%a1%3aweiwei776699%e3%80%8f%e2%96%b3",
"ebay",
)
| 3,238
| 42.77027
| 233
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_jd_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_jd_yu_diao_jiu_ning_meng_1601078153():
verify_serp_parsing(
"https://web.archive.org/web/20200925235553id_/https://search.jd.com/Search?keyword=%E9%A2%84%E8%B0%83%E9%85%92%E6%9F%A0%E6%AA%AC&enc=utf-8",
"jd",
)
def test_parse_query_jd_qing_shang_1510505248():
verify_serp_parsing(
"https://web.archive.org/web/20171112164728id_/http://search.jd.com/search?keyword=%E6%83%85%E5%95%86&enc=utf-8&qrst=1&rt=1&stop=1&book=y&vt=2&wq=%E6%83%85%E5%95%86&ev=publishers_%E5%90%89%E6%9E%97%E5%87%BA%E7%89%88%E9%9B%86%E5%9B%A2%40packstate_no%40&uc=0",
"jd",
)
def test_parse_query_jd_tai_kong_zhen_1508948206():
verify_serp_parsing(
"https://web.archive.org/web/20171025161646id_/http://search.jd.com/search?keyword=%E5%A4%AA%E7%A9%BA%E6%9E%95&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&stock=1&cid3=12198",
"jd",
)
def test_parse_query_jd_xian_xing_dai_shu_1497236213():
verify_serp_parsing(
"https://web.archive.org/web/20170612025653id_/http://search.jd.com/search?keyword=%E7%BA%BF%E6%80%A7%E4%BB%A3%E6%95%B0&enc=utf-8&qrst=1&rt=1&stop=1&spm=2.1.1&vt=2&cid3=3672",
"jd",
)
def test_parse_query_jd_iu_vdun35358z_1537907650():
verify_serp_parsing(
"https://web.archive.org/web/20180925203410id_/http://search.jd.com/search?keyword=%BC%D2%CD%A5%C2%D2%C2%D7%B5%E7%D3%B0%CD%F8%A1%BA%2B%CE%A2v%D0%C5dun35358%A1%BB%A8z&qr=&qrst=UNEXPAND&et=&rt=1&ev=exprice_0-19%40&uc=0",
"jd",
)
def test_parse_query_jd_tuan_dui_guan_li_1429813603():
verify_serp_parsing(
"https://web.archive.org/web/20150423182643id_/http://search.jd.com/search?keyword=%E5%9B%A2%E9%98%9F%E7%AE%A1%E7%90%86&enc=utf-8&qrst=1&rt=1&stop=1&book=y&499055734",
"jd",
)
def test_parse_query_jd_che_zai_cd_1507664971():
verify_serp_parsing(
"https://web.archive.org/web/20171010194931id_/http://search.jd.com/search?keyword=%E8%BD%A6%E8%BD%BDCD&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E8%BD%A6%E8%BD%BDCD&ev=exstarlevel_2%40128_2382%40&uc=0",
"jd",
)
def test_parse_query_jd_ch_1382528052():
verify_serp_parsing(
"https://web.archive.org/web/20131023113412id_/http://search.jd.com/search?keyword=%C4%DA%BF%E3%20%C3%DE&qr=%C4%D0%CA%BF%C4%DA%BF%E3&qrst=expand&et=4&rt=1&sttr=1&area=1&ev=1373_71335%40&uc=0",
"jd",
)
def test_parse_query_jd_1368578723():
verify_serp_parsing(
"https://web.archive.org/web/20130515004523id_/http://search.jd.com/search?keyword=%B4%B4%D0%C2&qr=&qrst=UNEXPAND&et=&rt=1&area=1&ev=&uc=0&bs=no",
"jd",
)
def test_parse_query_jd_guan_yin_liao_1501752634():
verify_serp_parsing(
"https://web.archive.org/web/20170803093034id_/http://search.jd.com/search?keyword=%E9%A6%86%E9%A5%AE%E6%96%99&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E9%A6%86%E9%A5%AE%E6%96%99&cid2=12202&stock=1&ev=2762_32941%40&uc=0",
"jd",
)
def test_parse_query_jd_yang_hong_wei_1429640037():
verify_serp_parsing(
"https://web.archive.org/web/20150421181357id_/http://search.jd.com/search?keyword=%E6%9D%A8%E5%AE%8F%E4%BC%9F&ev=&psort=2&book=y",
"jd",
)
def test_parse_query_jd_weiwei776699e_1537978473():
verify_serp_parsing(
"https://web.archive.org/web/20180926161433id_/http://search.jd.com/search?keyword=%BA%FE%B1%B1%BF%EC%C8%FD%CD%F8%D2%D7%BF%AA%BD%B1%BD%E1%B9%FB%CE%A2%D0%C5weiwei776699%85e&qr=&qrst=UNEXPAND&et=&rt=1&ev=exprice_0-19%40&uc=0",
"jd",
)
def test_parse_query_jd_shlrwanh_uq_2179706936_1473350033():
verify_serp_parsing(
"https://web.archive.org/web/20160908155353id_/http://search.jd.com/Search?keyword=%D5%E6%D5%FD%B1%ED%D1%DD%CA%D5%B7%D1%C5%E3%C1%C4%BA%C5%C2%EB%A1%BE%2B%F6%A6%5F%C5%AE%71%3A%32%31%37%39%37%30%36%39%33%36%A1%BF",
"jd",
)
def test_parse_query_jd_sha_tan_ku_nan_1446537277():
verify_serp_parsing(
"https://web.archive.org/web/20151103075437id_/http://search.jd.com/search?keyword=%E6%B2%99%E6%BB%A9%E8%A3%A4%20%E7%94%B7&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&sttr=1&ev=exbrand_Maro%20Homme%40&uc=0",
"jd",
)
def test_parse_query_jd_ji_guang_da_yin_1516054404():
verify_serp_parsing(
"https://web.archive.org/web/20180115221324id_/http://search.jd.com/search?keyword=%E6%BF%80%E5%85%89%E6%89%93%E5%8D%B0&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E6%BF%80%E5%85%89%E6%89%93%E5%8D%B0&ev=5345_88976%40268435461_268466585%40&uc=0",
"jd",
)
def test_parse_query_jd_dao_tian_1442736754():
verify_serp_parsing(
"https://web.archive.org/web/20150920081234id_/http://search.jd.com/search?keyword=%E7%A8%BB%E7%94%B0&enc=utf-8&qrst=1&et=5&rt=1&stop=1&vt=2&ev=exbrand_%E7%A8%BB%E7%94%B0%EF%BC%88DENTSTAR%EF%BC%89%40&uc=0",
"jd",
)
def test_parse_query_jd_song_yan_1434320372():
verify_serp_parsing(
"https://web.archive.org/web/20150614221932id_/http://search.jd.com/search?keyword=%E5%AE%8B%E5%BD%A6&cid=3267&book=y",
"jd",
)
def test_parse_query_jd_yxingaiujliu_xwcp198_1537926187():
verify_serp_parsing(
"https://web.archive.org/web/20180926014307id_/http://search.jd.com/search?keyword=%C5%B7%C3%C0xingai%D3%B0%D2%F4%CF%C8%B7%E6%A1%BA%2B%CE%A2%D0%C5xwcp198%A1%BB%A1%D1&qr=&qrst=UNEXPAND&et=&rt=1&ev=exprice_0-19%40&uc=0",
"jd",
)
def test_parse_query_jd_mo_yan_1516163665():
verify_serp_parsing(
"https://web.archive.org/web/20180117043425id_/http://search.jd.com/search?keyword=%E8%8E%AB%E8%A8%80&enc=utf-8&qrst=1&rt=1&stop=1&book=y&vt=2&ev=publishers_%E4%B8%AD%E5%9B%BD%E5%8D%8E%E4%BE%A8%E5%87%BA%E7%89%88%E7%A4%BE%40package_%E5%B9%B3%E8%A3%85%40&uc=0",
"jd",
)
def test_parse_query_jd_you_xi_1511265243():
verify_serp_parsing(
"https://web.archive.org/web/20171121115403id_/http://search.jd.com/search?keyword=%E6%B8%B8%E6%88%8F&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&offset=3&wq=%E6%B8%B8%E6%88%8F&cid2=3287&ev=packstate_no%402562_22800%40&uc=0",
"jd",
)
| 6,178
| 41.909722
| 267
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_reddit_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_reddit_teleperformance_1260472045():
verify_serp_parsing(
"https://web.archive.org/web/20091210190725id_/http://www.reddit.com:80/search?q=%22teleperformance%22&KeepThis=true&TB_iframe=true&height=550&width=800",
"reddit",
)
def test_parse_query_reddit_3ds_1364668924():
verify_serp_parsing(
"https://web.archive.org/web/20130330184204id_/http://en.reddit.com/search?q=3ds",
"reddit",
)
def test_parse_query_reddit_jailbait_1376244913():
verify_serp_parsing(
"https://web.archive.org/web/20130811181513id_/http://www.reddit.com/search?q=jailbait",
"reddit",
)
def test_parse_query_reddit_german_nebelmittelwurfanlage_1664557458():
verify_serp_parsing(
"https://web.archive.org/web/20220930170418id_/https://www.reddit.com/search/?q=german+nebelmittelwurfanlage&sort=comments&t=all",
"reddit",
)
def test_parse_query_reddit_supermoon_1403887175():
verify_serp_parsing(
"https://web.archive.org/web/20140627163935id_/http://fr.reddit.com/search?q=supermoon",
"reddit",
)
def test_parse_query_reddit_touch_meme_1663170731():
verify_serp_parsing(
"https://web.archive.org/web/20220914155211id_/https://www.reddit.com/search/?q=touch%20meme&include_over_18=1&type=link",
"reddit",
)
def test_parse_query_reddit_flair_already_posted_https_redd_it_xamfzl_1667030236():
verify_serp_parsing(
"https://web.archive.org/web/20221029075716id_/https://old.reddit.com/search/?q=flair%3A%22already+posted+https%3A%2F%2Fredd.it%2Fxamfzl%22&sort=comments&type=comment",
"reddit",
)
def test_parse_query_reddit_how_can_i_access_nature_com_articles_1666041985():
verify_serp_parsing(
"https://web.archive.org/web/20221017212625id_/https://www.reddit.com/search/?q=How%20Can%20I%20Access%20Nature.com%20Articles%3F&sort=top&type=comment",
"reddit",
)
def test_parse_query_reddit_b7_1650853917():
verify_serp_parsing(
"https://web.archive.org/web/20220425023157id_/https://www.reddit.com/search/?q=b7",
"reddit",
)
def test_parse_query_reddit_typetest_10fastfingers_1663797626():
verify_serp_parsing(
"https://web.archive.org/web/20220921220026id_/https://old.reddit.com/search?q=typetest+10fastfingers&restrict_sr=&sort=relevance&t=all",
"reddit",
)
| 2,551
| 33.486486
| 176
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_pornhub_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_pornhub_kardashian_1232246712():
verify_serp_parsing(
"https://web.archive.org/web/20090118024512id_/http://www.pornhub.com:80/video/search?search=kardashian&p=&c=32",
"pornhub",
)
def test_parse_query_pornhub_org_1219567054():
verify_serp_parsing(
"https://web.archive.org/web/20080824083734id_/http://www.pornhub.com:80/video/search?search=org",
"pornhub",
)
def test_parse_query_pornhub_mini_skirt_1257166127():
verify_serp_parsing(
"https://web.archive.org/web/20091102124847id_/http://www.pornhub.com:80/video/search?search=mini+skirt&c=37",
"pornhub",
)
def test_parse_query_pornhub_full_porno_films_1633148146():
verify_serp_parsing(
"https://web.archive.org/web/20211002041546id_/https://www.pornhub.com/video/search?search=full+porno+films",
"pornhub",
)
def test_parse_query_pornhub_brazilian_carnival_1378864891():
verify_serp_parsing(
"https://web.archive.org/web/20130911020131id_/http://www.pornhub.com:80/video/search?search=brazilian+carnival",
"pornhub",
)
def test_parse_query_pornhub_cfnm2_1271707129():
verify_serp_parsing(
"https://web.archive.org/web/20100419195849id_/http://www.pornhub.com:80/video/search?search=CFNM2&x=0&y=0",
"pornhub",
)
def test_parse_query_pornhub_12_1565436858():
verify_serp_parsing(
"https://web.archive.org/web/20190810113418id_/https://www.pornhub.com/video/search?search=12&o=mv",
"pornhub",
)
def test_parse_query_pornhub_roxy_reynolds_1222496976():
verify_serp_parsing(
"https://web.archive.org/web/20080927062936id_/http://www.pornhub.com:80/video/search?search=Roxy+Reynolds&c=1",
"pornhub",
)
def test_parse_query_pornhub_teen_facial_1237526884():
verify_serp_parsing(
"https://web.archive.org/web/20090320052804id_/http://www.pornhub.com:80/video/search?search=teen+facial&p=&c=15",
"pornhub",
)
def test_parse_query_pornhub_stripping_1627962626():
verify_serp_parsing(
"https://web.archive.org/web/20210803035026id_/https://www.pornhub.com/video/search?search=stripping%5D",
"pornhub",
)
| 2,370
| 31.040541
| 122
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_bongacams_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_bongacams_facial_1578106424():
verify_serp_parsing(
"https://web.archive.org/web/20200104025344id_/https://en.bongacams.com/male/tags/facial",
"bongacams",
)
def test_parse_query_bongacams_hd_plus_1576443879():
verify_serp_parsing(
"https://web.archive.org/web/20191215210439id_/https://bongacams.com/male/tags/hd-plus",
"bongacams",
)
def test_parse_query_bongacams_camshow_1653633300():
verify_serp_parsing(
"https://web.archive.org/web/20220527063500id_/https://bongacams.com/female/tags/camshow",
"bongacams",
)
def test_parse_query_bongacams_stripping_1623091287():
verify_serp_parsing(
"https://web.archive.org/web/20210607184127id_/https://bongacams.com/female/tags/stripping?page=3",
"bongacams",
)
def test_parse_query_bongacams_teasing_1635992326():
verify_serp_parsing(
"https://web.archive.org/web/20211104021846id_/https://bongacams.com/couples/tags/teasing",
"bongacams",
)
def test_parse_query_bongacams_asslicking_1623701105():
verify_serp_parsing(
"https://web.archive.org/web/20210614200505id_/https://bongacams.com/couples/tags/asslicking",
"bongacams",
)
def test_parse_query_bongacams_fucking_1577494848():
verify_serp_parsing(
"https://web.archive.org/web/20191228010048id_/https://en.bongacams.com/male/tags/fucking",
"bongacams",
)
def test_parse_query_bongacams_cock_sucking_1617355851():
verify_serp_parsing(
"https://web.archive.org/web/20210402093051id_/https://bongacams.com/male/tags/cock-sucking?page=2",
"bongacams",
)
def test_parse_query_bongacams_massage_1604010948():
verify_serp_parsing(
"https://web.archive.org/web/20201029223548id_/https://en.bongacams.com/male/tags/massage",
"bongacams",
)
def test_parse_query_bongacams_stripping_1648087966():
verify_serp_parsing(
"https://web.archive.org/web/20220324021246id_/https://en.bongacams.com/couples/tags/stripping",
"bongacams",
)
| 2,242
| 29.310811
| 108
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_vk_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_vk_technology_1383988940():
verify_serp_parsing(
"https://web.archive.org/web/20131109092220id_/http://vk.com/search?c%5Bq%5D=technology&c%5Bsection%5D=people&offset=0",
"vk",
)
def test_parse_query_vk_000_space_marine_1363614912():
verify_serp_parsing(
"https://web.archive.org/web/20130318135512id_/http://vk.com/search?c%5Bq%5D=000%3A%20Space%20Marine&c%5Bsection%5D=people&offset=10640",
"vk",
)
def test_parse_query_vk_gta_1389323282():
verify_serp_parsing(
"https://web.archive.org/web/20140110030802id_/http://vk.com/search?c%5Bq%5D=GTA&c%5Bsection%5D=people&offset=20",
"vk",
)
def test_parse_query_vk_radioelektronnye_sistemy_1361402773():
verify_serp_parsing(
"https://web.archive.org/web/20130220232613id_/http://vk.com/search?c[name]=0&c[section]=people&c[q]=%D1%80%D0%B0%D0%B4%D0%B8%D0%BE%D1%8D%D0%BB%D0%B5%D0%BA%D1%82%D1%80%D0%BE%D0%BD%D0%BD%D1%8B%D0%B5%20%D1%81%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D1%8B",
"vk",
)
def test_parse_query_vk_rabotaiu_na_sebia_1361533546():
verify_serp_parsing(
"https://web.archive.org/web/20130222114546id_/http://vk.com/search?c%5Bq%5D=%D1%80%D0%B0%D0%B1%D0%BE%D1%82%D0%B0%D1%8E%20%D0%BD%D0%B0%20%D1%81%D0%B5%D0%B1%D1%8F&c%5Bsection%5D=people&offset=20",
"vk",
)
def test_parse_query_vk_imperatory_illiuzii_1387125239():
verify_serp_parsing(
"https://web.archive.org/web/20131215163359id_/http://vk.com/search?c[name]=0&c[section]=people&c[q]=%D0%98%D0%BC%D0%BF%D0%B5%D1%80%D0%B0%D1%82%D0%BE%D1%80%D1%8B%20%D0%B8%D0%BB%D0%BB%D1%8E%D0%B7%D0%B8%D0%B9",
"vk",
)
def test_parse_query_vk_loco_roco_1377325114():
verify_serp_parsing(
"https://web.archive.org/web/20130824061834id_/http://vk.com/search?c[name]=0&c[section]=people&c[q]=Loco%20Roco",
"vk",
)
def test_parse_query_vk_sportivnye_mototsikly_1387080107():
verify_serp_parsing(
"https://web.archive.org/web/20131215040147id_/http://vk.com/search?c[name]=0&c[section]=people&c[q]=%D1%81%D0%BF%D0%BE%D1%80%D1%82%D0%B8%D0%B2%D0%BD%D1%8B%D0%B5%20%D0%BC%D0%BE%D1%82%D0%BE%D1%86%D0%B8%D0%BA%D0%BB%D1%8B",
"vk",
)
def test_parse_query_vk_grot_1353834377():
verify_serp_parsing(
"https://web.archive.org/web/20121125090617id_/http://vk.com:80/search?c%5Bq%5D=%D0%B3%D1%80%D0%BE%D1%82&c%5Bsection%5D=auto",
"vk",
)
def test_parse_query_vk_muzyka_1372304839():
verify_serp_parsing(
"https://web.archive.org/web/20130627034719id_/http://vk.com/search?c%5Bq%5D=%D0%9C%D0%A3%D0%97%D0%AB%D0%9A%D0%90&c%5Bsection%5D=people&offset=0",
"vk",
)
| 2,844
| 37.445946
| 252
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_manual_facebook_serp_parsing.py
|
# flake8: noqa
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_jam_of_the_day_search():
verify_serp_parsing(
'https://web.archive.org/web/20140917021101id_/https://www.facebook.com/search.php?q=%22Jam+of+the+Day&init=quick&tas=0.8517628074453497&search_first_focus=1302687872720',
'facebook'
)
def test_victoria_pynchon_search():
verify_serp_parsing(
'https://web.archive.org/web/20110110152620id_/http://www.facebook.com/search.php?q=%22Victoria+Pynchon%22&init=q',
'facebook'
)
def test_anthony_search():
verify_serp_parsing(
'https://web.archive.org/web/20130817124019id_/http://www.facebook.com/search.php?init=srp&sfxp&q=ANTHONY',
'facebook'
)
def test_7_search():
verify_serp_parsing(
'https://web.archive.org/web/20210506042113id_/http://www.facebook.com/search.php?q=7',
'facebook'
)
def test_aj_duca_search():
verify_serp_parsing(
'https://web.archive.org/web/20131226205922id_/http://www.facebook.com/search.php?q=AJ+Duca',
'facebook'
)
def test_noam_chomsky_search():
verify_serp_parsing(
'https://web.archive.org/web/20210125134751id_/http://www.facebook.com/search.php?q=3DNoam%20Chomsky&init=3Dquick=',
'facebook'
)
def test_taylor_search():
verify_serp_parsing(
'https://web.archive.org/web/20210126125305id_/http://www.facebook.com/search.php?q=3Dtaylor+company&am=',
'facebook'
)
def test_5_orsz_search():
verify_serp_parsing(
'https://web.archive.org/web/20210410211649id_/http://www.facebook.com/search.php?q=5+orsz%C3%A1gos',
'facebook'
)
def test_1_million_cards_orsz_search():
verify_serp_parsing(
'https://web.archive.org/web/20210304074906id_/http://www.facebook.com/search.php?q=1+million+cards&init=quick&tas=0.7974472279549098',
'facebook'
)
def test_abvie_search():
verify_serp_parsing(
'https://web.archive.org/web/20210610122030id_/http://www.facebook.com/search.php?q=Abbvie&type=users&init=srp',
'facebook'
)
| 2,136
| 28.273973
| 179
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_360_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_360_an_jian_diao_cha_bi_lu_1576497545():
verify_serp_parsing(
"https://web.archive.org/web/20191216115905id_/https://www.so.com/s?q=%E6%A1%88%E4%BB%B6%E8%B0%83%E6%9F%A5%E7%AC%94%E5%BD%95&src=related_2.6&psid=836a2b7f45bb7fe2b7bcbe6bbe47b867&nlpv=zzzc_base_2",
"360",
)
def test_parse_query_360_chen_teng_tan_han_han_mei_ren_jie_ji_1577810108():
verify_serp_parsing(
"https://web.archive.org/web/20191231163508id_/https://www.so.com/s?ie=utf-8&src=know_side_nlp_sohot_online&q=%E6%B2%88%E8%85%BE%E8%B0%88%E9%9F%A9%E5%AF%92%E6%B2%A1%E4%BA%BA%E6%8E%A5%E6%9C%BA&ob_ext=%7B%22rsv_cq%22%3A%22%5Cu514d%5Cu8d39vpn%22%2C%22rt%22%3A%22%5Cu5b9e%5Cu65f6%5Cu70ed%5Cu641c%22%2C%22rclken%22%3Anull%7D",
"360",
)
def test_parse_query_360_jin_ji_che_hui_1576637402():
verify_serp_parsing(
"https://web.archive.org/web/20191218025002id_/https://www.so.com/s?q=%E7%B4%A7%E6%80%A5%E6%92%A4%E5%9B%9E&src=webcache",
"360",
)
def test_parse_query_360_shi_jie_zui_ai_de_ren_qu_shi_1579483438():
verify_serp_parsing(
"https://web.archive.org/web/20200120012358id_/https://www.so.com/s?ie=utf-8&src=know_side_nlp_sohot_online&q=%E4%B8%96%E7%95%8C%E6%9C%80%E7%9F%AE%E7%9A%84%E4%BA%BA%E5%8E%BB%E4%B8%96&ob_ext=%7B%22rsv_cq%22%3A%22openeuler%22%2C%22rt%22%3A%22%5Cu5b9e%5Cu65f6%5Cu70ed%5Cu641c%22%2C%22rclken%22%3Anull%7D",
"360",
)
def test_parse_query_360_21tian_jian_fei_fa_1653652529():
verify_serp_parsing(
"https://web.archive.org/web/20220527115529id_/https://www.so.com/s?src=lm&ls=s4b0c48ca9b&q=21%E5%A4%A9%E5%87%8F%E8%82%A5%E6%B3%95&lmsid=af298c70a1d01987&lm_extend=ctype%3A20%7Clmbid%3A200%7Cjt%3A2%7Cmaxbid%3A%7Csadspace%3A%7Cadid%3A3444654665",
"360",
)
def test_parse_query_360_site_chaxun_biz_1551742698():
verify_serp_parsing(
"https://web.archive.org/web/20190304233818id_/https://www.so.com/s?q=site:chaxun.biz",
"360",
)
def test_parse_query_360_si_chuan_fa_sheng_6_8ji_di_zhen_1662384982():
verify_serp_parsing(
"https://web.archive.org/web/20220905133622id_/https://www.so.com/s?q=%E5%9B%9B%E5%B7%9D%E5%8F%91%E7%94%9F6.8%E7%BA%A7%E5%9C%B0%E9%9C%87&src=bkrc&srcg=&stat_source=sshot",
"360",
)
def test_parse_query_360_hui_ji_pei_xun_1579668897():
verify_serp_parsing(
"https://web.archive.org/web/20200122045457id_/https://www.so.com/s?src=lm&ls=s521c48db9a&q=%E4%BC%9A%E8%AE%A1%E5%9F%B9%E8%AE%AD&lmsid=59a53f2a01747c88&lm_extend=ctype%3A4%7Clmbid%3A20%2C2%2C37%2C30%2C64%2C74%2C7%2C93%2C101%2C110%2C500%7Cjt%3A2%7Cmaxbid%3A%7Csadspace%3A",
"360",
)
def test_parse_query_360_site_chaxun_biz_1550161171():
verify_serp_parsing(
"https://web.archive.org/web/20190214161931id_/https://www.so.com/s?q=site:chaxun.biz",
"360",
)
def test_parse_query_360_mao_bu_yi_xiao_chou_chao_xi_1576791478():
verify_serp_parsing(
"https://web.archive.org/web/20191219213758id_/https://www.so.com/s?q=%E6%AF%9B%E4%B8%8D%E6%98%93%E6%B6%88%E6%84%81%E6%8A%84%E8%A2%AD&src=related_2.6&psid=ce3a7e94b02d7f56b85d23614a2be0ae&nlpv=mobile_cooccur_v1",
"360",
)
def test_parse_query_360_mei_guo_qian_zheng_1579954288():
verify_serp_parsing(
"https://web.archive.org/web/20200125121128id_/https://www.so.com/s?src=lm&ls=s521c48db9a&q=%E7%BE%8E%E5%9B%BD%E7%AD%BE%E8%AF%81&lmsid=30e88c9397a90e32&lm_extend=ctype%3A4%7Clmbid%3A203%7Cjt%3A2%7Cmaxbid%3A%7Csadspace%3A",
"360",
)
def test_parse_query_360_duo_jia_ju_chang_qu_xiao_yan_chu_1579663841():
verify_serp_parsing(
"https://web.archive.org/web/20200122033041id_/https://www.so.com/s?ie=utf-8&src=know_side_nlp_sohot_online&q=%E5%A4%9A%E5%AE%B6%E5%89%A7%E5%9C%BA%E5%8F%96%E6%B6%88%E6%BC%94%E5%87%BA&ob_ext=%7B%22rsv_cq%22%3A%22%5Cu8003%5Cu7814%5Cu4f5c%5Cu5f0a%22%2C%22rt%22%3A%22%5Cu5b9e%5Cu65f6%5Cu70ed%5Cu641c%22%2C%22rclken%22%3Anull%7D",
"360",
)
def test_parse_query_360_wu_xiu_bo_yang_kun_ju_hui_hai_ge_1577858222():
verify_serp_parsing(
"https://web.archive.org/web/20200101055702id_/https://www.so.com/s?ie=utf-8&src=know_side_nlp_sohot_online&q=%E5%90%B4%E7%A7%80%E6%B3%A2%E6%9D%A8%E5%9D%A4%E8%81%9A%E4%BC%9A%E5%97%A8%E6%AD%8C&ob_ext=%7B%22rsv_cq%22%3A%22%5Cu4e2a%5Cu4eba%5Cu6570%5Cu636e%5Cu6cc4%5Cu9732%22%2C%22rt%22%3A%22%5Cu5b9e%5Cu65f6%5Cu70ed%5Cu641c%22%2C%22rclken%22%3Anull%7D",
"360",
)
def test_parse_query_360_tao_bao_ru_he_che_hui_ping_jie_1576604187():
verify_serp_parsing(
"https://web.archive.org/web/20191217173627id_/https://www.so.com/s?q=%E6%B7%98%E5%AE%9D%E5%A6%82%E4%BD%95%E6%92%A4%E5%9B%9E%E8%AF%84%E4%BB%B7&psid=ddf2765c1c51375b28acdb33c3767871&nlpv=zzzc_base_1&src=pdr_guide_2.6",
"360",
)
def test_parse_query_360_site_chaxun_biz_1550145271():
verify_serp_parsing(
"https://web.archive.org/web/20190214115431id_/https://www.so.com/s?q=site:chaxun.biz",
"360",
)
def test_parse_query_360_ju_min_jia_zhong_fei_jin_bian_fu_1585788499():
verify_serp_parsing(
"https://web.archive.org/web/20200402004819id_/https://www.so.com/s?q=%E5%B1%85%E6%B0%91%E5%AE%B6%E4%B8%AD%E9%A3%9E%E8%BF%9B%E8%9D%99%E8%9D%A0&src=know_side_nlp_sohot&tn=news",
"360",
)
def test_parse_query_360_site_eng_kaz_enacademic_com_1581787441():
verify_serp_parsing(
"https://web.archive.org/web/20200215172401id_/https://www.so.com/s?q=site%3Aeng_kaz.enacademic.com",
"360",
)
def test_parse_query_360_niu_jie_5hao_1576819311():
verify_serp_parsing(
"https://web.archive.org/web/20191220052151id_/https://www.so.com/s?q=%E7%89%9B%E6%9D%B05%E5%8F%B7&src=related_2.6&psid=7023274fd3a925bfe6b5bc6c27debf13&nlpv=zzzc_base_2",
"360",
)
def test_parse_query_360_zhang_bo_zhi_bei_gou_yao_shang_1661090641():
verify_serp_parsing(
"https://web.archive.org/web/20220821140401id_/https://www.so.com/s?ie=utf-8&src=know_side_nlp_sohot_online_clk&q=%E5%BC%A0%E6%9F%8F%E8%8A%9D%E8%A2%AB%E7%8B%97%E5%92%AC%E4%BC%A4&ob_ext=%7B%22rsv_cq%22%3A%22%5Cu6bdb%5Cu4e0d%5Cu6613%22%2C%22rt%22%3A%22%5Cu5b9e%5Cu65f6%5Cu70ed%5Cu641c%22%2C%22rclken%22%3Anull%7D",
"360",
)
def test_parse_query_360_lu_xing_she_tuan_dui_you_zan_ting_1580014331():
verify_serp_parsing(
"https://web.archive.org/web/20200126045211id_/https://www.so.com/s?ie=utf-8&q=%E6%97%85%E8%A1%8C%E7%A4%BE%E5%9B%A2%E9%98%9F%E6%B8%B8%E6%9A%82%E5%81%9C&src=no_result_hot",
"360",
)
| 6,709
| 45.597222
| 358
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/generate_tests.py
|
from collections import defaultdict
from gzip import GzipFile
from io import TextIOWrapper, BytesIO
from json import loads
from math import inf
from pathlib import Path
from random import Random
from re import compile
from textwrap import dedent
from typing import Iterable
from requests import get
from slugify import slugify
from tqdm import tqdm
from warcio import WARCWriter, StatusAndHeaders
from archive_query_log import PROJECT_DIRECTORY_PATH
from archive_query_log.config import SERVICES
from archive_query_log.model import Service, ArchivedQueryUrl
NUM_SERVICES = 11
SERVICE_NAMES = None
SERVICE_NAMES = ["google", "yahoo", "bing", "duckduckgo", "ask", "ecosia"]
NUM_QUERIES_PER_SERVICE = 50
DATA_PATH = Path(
"/mnt/ceph/storage/data-in-progress/data-research/"
"web-search/web-archive-query-log/focused/"
)
SAMPLE_QUERIES_PATH = DATA_PATH / "corpus" / "medium" / "2023-05-22" / "serps"
WARCS_PATH = PROJECT_DIRECTORY_PATH / \
"data/manual-annotations/archived-raw-serps/warcs/"
TESTS_PATH = PROJECT_DIRECTORY_PATH / \
"archive_query_log/results/test/"
PATTERN_SPECIAL_CHARS = compile(r"[^0-9a-z]+")
def main():
if SERVICE_NAMES is None:
services: Iterable[Service] = SERVICES.values()
services = sorted(
services,
key=lambda s: s.alexa_rank if s.alexa_rank is not None else inf,
)
services = services[:NUM_SERVICES]
service_names = [s.name for s in services]
else:
service_names = SERVICE_NAMES
query_urls = defaultdict(list)
for path in tqdm(
list(SAMPLE_QUERIES_PATH.glob("part*.gz")),
desc="Load service queries"
):
# noinspection PyTypeChecker
with GzipFile(path, "rb") as gf, TextIOWrapper(gf) as f:
for line in f:
if "\"serp_query_text_url\": \"" not in line:
continue
if "\"serp_warc_relative_path\": \"" not in line:
continue
query_url = loads(line)
if query_url["search_provider_name"] not in service_names:
continue
query_urls[query_url["search_provider_name"]].append(query_url)
print(f"Found {sum(len(urls) for urls in query_urls.values())} SERPs.")
query_urls = {
service_name: Random(0).sample(
query_urls[service_name], min(
NUM_QUERIES_PER_SERVICE,
len(query_urls[service_name]),
)
)
for service_name in service_names
}
print(f"Sampled {sum(len(urls) for urls in query_urls.values())} SERPs.")
print("Generate WARC files.")
schema = ArchivedQueryUrl.schema()
for service_name in service_names:
for query_url in tqdm(
query_urls[service_name], desc=service_name
):
query = query_url["serp_query_text_url"]
query = slugify(query)
query = query[:100]
name = slugify(
f"{service_name}-"
f"{query}-{query_url['serp_timestamp']}"
)
warc_path = WARCS_PATH / f"{name}.warc.gz"
if warc_path.exists():
continue
with warc_path.open("wb") as o:
writer = WARCWriter(o, gzip=True)
archived_query_url = ArchivedQueryUrl(
url=query_url["serp_url"],
timestamp=int(query_url["serp_timestamp"]),
query=query_url["serp_query_text_url"],
page=query_url["serp_page"],
offset=query_url["serp_offset"],
)
url_headers = {
"Archived-URL": schema.dumps(archived_query_url),
}
wayback_raw_url = query_url["serp_wayback_raw_url"]
response = get(
wayback_raw_url,
)
response.raise_for_status()
# noinspection PyProtectedMember
version = str(response.raw.version)
protocol = f"HTTP/{version[0]}.{version[1]}"
request_record = writer.create_warc_record(
uri=str(response.request.url),
record_type="request",
http_headers=StatusAndHeaders(
statusline=" ".join((
response.request.method,
response.request.path_url,
protocol,
)),
headers=response.request.headers,
protocol=protocol,
),
warc_headers_dict={**url_headers},
)
writer.write_record(request_record)
response_record = writer.create_warc_record(
uri=str(response.url),
record_type="response",
http_headers=StatusAndHeaders(
statusline=" ".join((
protocol,
str(response.status_code),
response.reason,
)),
headers=response.headers,
protocol=protocol
),
payload=BytesIO(response.content),
length=len(response.content),
warc_headers_dict={**url_headers},
)
writer.write_record(response_record)
print("Generate test files.")
for service_name in service_names:
if len(query_urls[service_name]) == 0:
continue
test_path = TESTS_PATH / f"test_{service_name}_serp_parsing.py"
if not test_path.exists():
with test_path.open("wt") as o:
o.write(dedent("""
# flake8: noqa
# This file is auto-generated by generate_tests.py.
""").lstrip())
o.write(dedent("""
from archive_query_log.results.test.test_utils import verify_serp_parsing
""").lstrip())
with test_path.open("rt") as f:
existing_tests = {
line.removeprefix("def test_parse_query_").removesuffix("():")
for line in f
if line.startswith("def test_parse_query_")
}
with test_path.open("at") as o:
for query_url in query_urls[service_name]:
wayback_raw_url = query_url["serp_wayback_raw_url"]
query = query_url["serp_query_text_url"]
query = slugify(query)
query = query[:100]
name = slugify(
f"{service_name}_{query}_{query_url['serp_timestamp']}",
separator="_",
)
wayback_raw_url_safe = wayback_raw_url.replace('"', '\\"')
if name in existing_tests:
continue
o.write(dedent(f"""
def test_parse_query_{name}():
verify_serp_parsing(
"{wayback_raw_url_safe}",
"{service_name}",
)
"""))
if __name__ == '__main__':
main()
| 7,349
| 35.934673
| 89
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_google_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_google_taikoo_hui_mandarin_oriental_hotel_guangzhou_1652086766():
verify_serp_parsing(
"https://web.archive.org/web/20220509085926id_/https://www.google.com/search?q=Taikoo+Hui+Mandarin+Oriental+Hotel+Guangzhou",
"google",
)
def test_parse_query_google_finance_1540158323():
verify_serp_parsing(
"https://web.archive.org/web/20181021214523id_/http://www.google.co.uk/search?q=finance&gbv=1&sei=jw_NW8voLJWv0PEPld2DuA8",
"google",
)
def test_parse_query_google_hawaii_part_ii_lyrics_1633318830():
verify_serp_parsing(
"https://web.archive.org/web/20211004034030id_/https://www.google.com/search?q=Hawaii+part+ii+lyrics&sa=X&ved=2ahUKEwjA74_v6arzAhVTwZ4KHfAcBCAQ1QJ6BAgFEAw",
"google",
)
def test_parse_query_google_attack_on_titan_season_3_part_2_1556408967():
verify_serp_parsing(
"https://web.archive.org/web/20190427234927id_/https://www.google.com/search?q=attack+on+titan+season+3+part+2&rlz=1C1CHBF_en-GBGB760GB760&oq=attac&aqs=chrome.0.69i59j0l2j69i65l3.1486j0j4&sourceid=chrome&ie=UTF-8",
"google",
)
def test_parse_query_google_win10_iso_xia_zai_1577717811():
verify_serp_parsing(
"https://web.archive.org/web/20191230145651id_/https://www.google.com/search?q=win10+iso+%E4%B8%8B%E8%BD%BD&tbm=nws&source=lnt&tbs=qdr%3Ay&sa=X&ved=0ahUKEwjurrCb3t3mAhXPjp4KHT3MDxg4FBCnBQgf",
"google",
)
def test_parse_query_google_243_1628713922():
verify_serp_parsing(
"https://web.archive.org/web/20210811203202id_/https://www.google.com/search?q=243",
"google",
)
def test_parse_query_google_does_steve_has_a_beard_1601705030():
verify_serp_parsing(
"https://web.archive.org/web/20201003060350id_/https://www.google.com/search?q=does+steve+has+a+beard&oq=does+steve+has+a+beard&aqs=chrome..69i57j0i22i30l3.6767j0j4&sourceid=chrome&ie=UTF-8",
"google",
)
def test_parse_query_google_why_is_one_foot_slightly_larger_than_the_other_one_1605140430():
verify_serp_parsing(
"https://web.archive.org/web/20201112002030id_/https://www.google.com/search?q=Why%20is%20one%20foot%20slightly%20larger%20than%20the%20other%20one%3F",
"google",
)
def test_parse_query_google_zombie_apocalypse_1565114134():
verify_serp_parsing(
"https://web.archive.org/web/20190806175534id_/https://www.google.com/search?q=zombie+apocalypse&rlz=1C1GCEA_enGB797GB797&source=lnms&tbm=vid&sa=X&ved=0ahUKEwjo4aahg-_jAhUPTcAKHa1NCZ4Q_AUIEigC",
"google",
)
def test_parse_query_google_w_fan_j_li_s_ma_n_tang_and_w_yu_april_2012_towards_certain_fixes_with_editing_rules_and_master_data_the_vldb_journal_21_2_213_238_297_10_1007_s00778_011_0253_7_1614165399():
verify_serp_parsing(
"https://web.archive.org/web/20210224111639id_/https://scholar.google.com/scholar?hl=en&q=W.+Fan%2C+J.+Li%2C+S.+Ma%2C+N.+Tang%2C+and+W.+Yu.+April+2012.+Towards+certain+fixes+with+editing+rules+and+master+data.+The+VLDB+Journal%2C+21(2)%3A+213--238.+297+10.1007%2Fs00778-011-0253-7+",
"google",
)
def test_parse_query_google_susan_boyle_make_me_a_channel_of_your_peace_1607791072():
verify_serp_parsing(
"https://web.archive.org/web/20201212163752id_/http://www.google.ca/search?sa=X&q=Susan+Boyle+Make+Me+a+Channel+of+Your+Peace&stick=H4sIAAAAAAAAAIVUPWjUYBjOnbZe0xZ6qaDcIPF0qBW9Sy4_PZcWi0KhpdLe0ikkX_Ill3z58vOld0lEBOdOgjoKTg4dnRRECg4WcXAQXKy4OSm6dNHaWvOzmSHkyfu8z_t87_vy1UaaEy2zxZG2SAIIZX7cN1RgsMTDJtmtZLFIGRD_HwxEW9NEIhcQKAArOVmS-MDVdyuTR5ADHXdIBibIsJMocafLOxmGURpwsaJkWI_dQISWmWFbsNwUDt2imk3ECGZhLbHjMLLNwipydDvM1QU_4FwgFGHX9rtcFragrQ1DG5SyzVTUSmxFCkvarm5CkpceSG2lnfo5ngNSYA7S0sn9FEVR7oWXdEBwuzhJalkcKtQHWBasPJvHlmaRoi8waQucmxbZsQbNuCyGO7hoC4qDLhDzsBXITsqVBipBvhjZ8ewLGMaWGJR8CxiLYo6tyA263FyGTc4M7O4QlfghAFJpe0iSup2ScRzBbj5fTg-QFJS7JGNRlsrLFiOrEBuKtpLPACqQxwF2CrbAoaPtyNk8GQqlJoFYz5dDk2V-btDh85bLStJ1dLdkXI9BqXTqwJB8qbyujk993f803XhRvb_95kPlWZWeWvY8YqBkzUBqZOg9j7lIj97AUT9KmAY90ai13FZbEm1_hmrQdI6YrQo9vm5EPW_F0_swYe4wt-mxFcPVjJCsQqZH04seQgaI-h5mbtLNxrkWyH-0QgN4od7HpgLQJokOcw7lWfo_nObptb8GhBDB4y9ZgNG16ky16fDK8_cP341u1AdvDw4m_aWFxqVZhj61RJY9oKL6o89XXn38_W1-dpqu9dTYw56b1Km_z_f55vmxCz-Sx1vbe_P1n7MUtdc_s3D2BEvNUNzq_q-7Ow-uLsxS914-3Rk9WatMURNPRi6vbxIVs9e9BBnsiuocvgxWZRctFWMDsR5kN7zNkL11dCP9AYhSBnypBAAA&ved=2ahUKEwixvarn35TtAhXC3J4KHZIqBmYQ-BZ6BAgSEEA",
"google",
)
def test_parse_query_google_muhammed_rashid_1656890873():
verify_serp_parsing(
"https://web.archive.org/web/20220703232753id_/https://scholar.google.com/scholar?q=Muhammed+Rashid",
"google",
)
def test_parse_query_google_a_tumeo_m_branca_l_camerini_a_dual_priority_realtime_multiprocessor_system_on_fpga_for_automotive_ap_1614181186():
verify_serp_parsing(
"https://web.archive.org/web/20210224153946id_/https://scholar.google.com/scholar?hl=en&q=A.+Tumeo%2C+M.+Branca%2C+L.+Camerini%2C+%22A+Dual-Priority+RealTime+Multiprocessor+System+on+FPGA+for+Automotive+Applications%22%2C+Design%2C+Automation+and+Test+in+Europe+(DATE)+2008%2C+pages+1039--1044.+10.1145%2F1403375.1403625+",
"google",
)
def test_parse_query_google_hola_games_1552338270():
verify_serp_parsing(
"https://web.archive.org/web/20190311210430id_/https://www.google.com/search?q=Hola+Games&stick=H4sIAAAAAAAAAONgFuLSz9U3KE7PKCk2UOLVT9c3NEw3M801Kykr1lLOTrbSTy5L10_Ozy0oLUktii_LTEnNT0_MTbUqBrLKEyuLHzHGcQu8_HFPWCp00pqT1xj9uYjRJaTBxeaaV5JZUikkx8UnheQGDQYpHi4kPs8iVmGQ1mKFnMzsVIWSjFSFovz8XACNNlkjuQAAAA&sa=X&ved=0ahUKEwi839n-i_vgAhUGMt8KHUS9AjgQ-BYIUA",
"google",
)
def test_parse_query_google_chi_zhen_nzhong_rong_keteru_1608917828():
verify_serp_parsing(
"https://web.archive.org/web/20201225173708id_/https://www.google.co.jp/search?q=%E6%AD%AF+%E7%9C%9F%E3%82%93%E4%B8%AD%E6%BA%B6%E3%81%91%E3%81%A6%E3%82%8B&ie=UTF-8&oe=UTF-8&hl=ja-jp&client=safari",
"google",
)
def test_parse_query_google_bill_easley_sophisticated_prelude_1626498182():
verify_serp_parsing(
"https://web.archive.org/web/20210717050302id_/https://www.google.com/search?q=bill+easley+sophisticated+prelude&stick=H4sIAAAAAAAAAONgFuLSz9U3MM4wNSspUeLVT9c3NMwyM8jJMCvI0xLLTrbSzy0tzkzWTywqySwusSrOz0svXsSqmJSZk6OQmlick1qpUJxfkAGUy0xOLElNUSgoSs0pTUkFAB_nXk9ZAAAA&sa=X&ved=2ahUKEwjr3cn27-PxAhUTHzQIHXLBBTAQri4wCnoECAoQVA",
"google",
)
def test_parse_query_google_atalanta_bergamasca_calcio_wikipedia_1643703998():
verify_serp_parsing(
"https://web.archive.org/web/20220201082638id_/https://www.google.com/search?q=%22Atalanta+Bergamasca+Calcio%22+-wikipedia&source=lnms&tbm=vid&sa=X&ved=0ahUKEwirydi73J_1AhVCl2oFHdPMDeUQ_AUICygE",
"google",
)
def test_parse_query_google_orlando_bedoya_site_wikipedia_org_site_wikimedia_org_1629326434():
verify_serp_parsing(
"https://web.archive.org/web/20210818224034id_/https://www.google.com/search?safe=off&tbs=sur:fmc&tbm=isch&q=%22Orlando+bedoya%22+-site:wikipedia.org+-site:wikimedia.org",
"google",
)
def test_parse_query_google_flop_thumbs_up_1568839345():
verify_serp_parsing(
"https://web.archive.org/web/20190918204225id_/https://www.google.com/search?q=flop+thumbs+up&oq=flop+thumbs+up&aqs=chrome..69i57j69i60l2j69i61j69i65l2.3098j0j7&sourceid=chrome&ie=UTF-8",
"google",
)
def test_parse_query_google_ocolc_826746_1240020639():
verify_serp_parsing(
"https://web.archive.org/web/20090418021039id_/http://www.google.com/search?q=(OCoLC)826746&safe=vss&vss=1&sa=Google+Search",
"google",
)
| 7,748
| 52.8125
| 1,159
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_github_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_github_topic_deprecated_org_bandwidth_fork_true_1634361552():
verify_serp_parsing(
"https://web.archive.org/web/20211016051912id_/https://github.com/search?q=topic%3Adeprecated+org%3ABandwidth+fork%3Atrue",
"github",
)
def test_parse_query_github_topic_docker_stack_org_issuu_1549097482():
verify_serp_parsing(
"https://web.archive.org/web/20190202085122id_/https://github.com/search?q=topic%3Adocker-stack+org%3Aissuu&type=Users",
"github",
)
def test_parse_query_github_topic_web_components_org_github_fork_true_1650012538():
verify_serp_parsing(
"https://web.archive.org/web/20220415084858id_/https://github.com/search?q=topic%3Aweb-components+org%3Agithub+fork%3Atrue",
"github",
)
def test_parse_query_github_topic_id_org_codercom_1553274306():
verify_serp_parsing(
"https://web.archive.org/web/20190322170506id_/https://github.com/search?q=topic%3Aid+org%3Acodercom&type=Repositories",
"github",
)
def test_parse_query_github_mastodon_1657854340():
verify_serp_parsing(
"https://web.archive.org/web/20220715030540id_/https://github.com/search?q=mastodon&type=registrypackages",
"github",
)
def test_parse_query_github_licensing_gethhwid_1662581846():
verify_serp_parsing(
"https://web.archive.org/web/20220907201726id_/https://github.com/search?q=licensing_gethhwid&type=repositories",
"github",
)
def test_parse_query_github_subrock_1653099784():
verify_serp_parsing(
"https://web.archive.org/web/20220521022304id_/https://github.com/search?q=subrock",
"github",
)
def test_parse_query_github_jie_shi_de_jing_mi_gong_ye_you_xian_gong_si_1597737683():
verify_serp_parsing(
"https://web.archive.org/web/20200818080123id_/https://github.com/search?q=%E6%9D%B0%E5%A3%AB%E5%BE%B7%E7%B2%BE%E5%AF%86%E5%B7%A5%E4%B8%9A%E6%9C%89%E9%99%90%E5%85%AC%E5%8F%B8",
"github",
)
def test_parse_query_github_org_zettlr_1591675518():
verify_serp_parsing(
"https://web.archive.org/web/20200609040518id_/https://github.com/search?q=org%3AZettlr+",
"github",
)
def test_parse_query_github_topic_bootswatch_org_sslcom_1552777180():
verify_serp_parsing(
"https://web.archive.org/web/20190316225940id_/https://github.com/search?q=topic%3Abootswatch+org%3ASSLcom&type=Wikis",
"github",
)
| 2,590
| 34.013514
| 184
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_csdn_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_csdn_fifo_1663204137():
verify_serp_parsing(
"https://web.archive.org/web/20220915010857id_/https://so.csdn.net/so/search?q=FIFO&spm=1001.2101.3001.7020",
"csdn",
)
def test_parse_query_csdn_rows_1665572662():
verify_serp_parsing(
"https://web.archive.org/web/20221012110422id_/https://so.csdn.net/so/search?q=rows&spm=1001.2101.3001.7020",
"csdn",
)
def test_parse_query_csdn_android_studio_1660487187():
verify_serp_parsing(
"https://web.archive.org/web/20220814142627id_/https://so.csdn.net/so/search?q=android%20studio",
"csdn",
)
| 775
| 30.04
| 117
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_manual_google_serp_parsing.py
|
# flake8: noqa
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_9_11_search():
verify_serp_parsing(
'https://web.archive.org/web/20210411015549id_/http://www.google.com/search?hl=en&lr=&ie=ISO-8859-1&q=%22+9/11+revisited%22',
'google'
)
def test_lenin_search():
verify_serp_parsing(
'https://web.archive.org/web/20210227002632id_/http://www.google.com/search?hl=en&lr=&ie=ISO-8859-1&q=%22+Lenin%22',
'google'
)
def test_august_search():
verify_serp_parsing(
'https://web.archive.org/web/20210225064449id_/http://www.google.com/search?hl=en&q=%22%22+%22Agustín',
'google'
)
def test_cortisol_search():
verify_serp_parsing(
'https://web.archive.org/web/20210609022746id_/http://www.google.com/search?hl=en&lr=&ie=ISO-8859-1&safe=off&q=++++%22+Cortisol+test%22',
'google'
)
def test_coxsackie_search():
verify_serp_parsing(
'https://web.archive.org/web/20210330123639id_/http://www.google.com/search?hl=en&ie=ISO-8859-1&q=%22+coxsackie+virus%22_',
'google'
)
def test_homemade_l_search():
verify_serp_parsing(
'https://web.archive.org/web/20210330123719id_/http://www.google.com/search?hl=en&source=hp&ie=ISO-8859-1&q=%22+homemade+l',
'google'
)
def test_homemade_dove_search():
verify_serp_parsing(
'https://web.archive.org/web/20210330123611id_/http://www.google.com/search?hl=en&ie=ISO-8859-1&q=%22+dove%22+%22soap%22+%22',
'google'
)
def test_dead_search():
verify_serp_parsing(
'https://web.archive.org/web/20210224214643id_/http://www.google.com/search?hl=en&lr=&ie=ISO-8859-1&q=%22+dead+cock%22+mortuary',
'google'
)
def test_boston_hotels_search():
verify_serp_parsing(
'https://web.archive.org/web/20030829021921id_/http://www.google.com:80/search?q=%22+www.boston-hotels-cheap.net%22',
'google'
)
def test_extractresult_search():
verify_serp_parsing(
'https://web.archive.org/web/20030831234927id_/http://www.google.com:80/search?q=%22+www.exactresult.+com/%22',
'google'
)
def test_wallstquotes_search():
verify_serp_parsing(
'https://web.archive.org/web/20030829070453id_/http://www.google.com:80/search?q=%22+www.wallstquotes.+com%22',
'google'
)
| 2,384
| 28.8125
| 145
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_baidu_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_baidu_lian_xi_qu_dian_nao_pei_xun_1643390077():
verify_serp_parsing(
"https://web.archive.org/web/20220128171437id_/http://zhannei.baidu.com/cse/site?q=%E6%BF%82%E6%BA%AA%E5%8C%BA%E7%94%B5%E8%84%91%E5%9F%B9%E8%AE%AD&p=0&cc=actwhite.cn",
"baidu",
)
def test_parse_query_baidu_mao_kui_lu_mao_cong_cong_mao_ning_kui_mao_dan_dan_mang_lu_1549835769():
verify_serp_parsing(
"https://web.archive.org/web/20190210215609id_/https://www.baidu.com/s?wd=%E8%8C%85%E8%81%B5%E9%A9%B4%E8%8C%85%E8%81%A1%E8%81%A6%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E5%BF%99%E5%8D%A4&oq=%E8%8C%85%E8%81%B5%E9%A9%B4%E8%8C%85%E8%81%A1%E8%81%A6%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E5%BF%99%E5%8D%A4&ie=utf-8&rsv_pq=87216f54001538da&rsv_t=19e3cg%2FMtz92Yq09CCIJaki1%2BUL6aGQ6QJQBgVzprgVnaNRLdUeIL6teIzY&rqlang=cn&nojs=1&bqid=87216f54001538da",
"baidu",
)
def test_parse_query_baidu_e_eeeae_1569971117():
verify_serp_parsing(
"https://web.archive.org/web/20191001230517id_/https://www.baidu.com/s?wd=%C3%A9%C2%98%C2%BF%C3%A9%C2%87%C2%8C%C3%A8%C2%9C%C2%98%C3%A8%C2%9B%C2%9B%C3%A6%C2%B1",
"baidu",
)
def test_parse_query_baidu_e_eeeae_1544443888():
verify_serp_parsing(
"https://web.archive.org/web/20181210121128id_/https://www.baidu.com/s?wd=%C3%A9%C2%98%C2%BF%C3%A9%C2%87%C2%8C%C3%A8%C2%9C%C2%98%C3%A8%C2%9B%C2%9B%C3%A6%C2%B1",
"baidu",
)
def test_parse_query_baidu_mao_kui_lu_mao_cong_cong_mao_ning_kui_mao_dan_dan_mang_lu_1564025418():
verify_serp_parsing(
"https://web.archive.org/web/20190725033018id_/https://www.baidu.com/s?wd=%E8%8C%85%E8%81%B5%E9%A9%B4%E8%8C%85%E8%81%A1%E8%81%A6%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E5%BF%99%E5%8D%A4&oq=%E8%8C%85%E8%81%B5%E9%A9%B4%E8%8C%85%E8%81%A1%E8%81%A6%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E5%BF%99%E5%8D%A4&ie=utf-8&rsv_pq=d25fbc8d000b4e82&rsv_t=fc63O4O%2FKyZwE5v5jDt8lv18HT8dHQWj2Go%2BcrYpMDHze7HivVjCvAF%2F6R0&rqlang=cn&nojs=1&bqid=d25fbc8d000b4e82",
"baidu",
)
def test_parse_query_baidu_e_eeeae_1547510808():
verify_serp_parsing(
"https://web.archive.org/web/20190115000648id_/https://www.baidu.com/s?wd=%C3%A9%C2%98%C2%BF%C3%A9%C2%87%C2%8C%C3%A8%C2%9C%C2%98%C3%A8%C2%9B%C2%9B%C3%A6%C2%B1",
"baidu",
)
def test_parse_query_baidu_e_eeeae_1567878230():
verify_serp_parsing(
"https://web.archive.org/web/20190907174350id_/https://www.baidu.com/s?wd=%C3%A9%C2%98%C2%BF%C3%A9%C2%87%C2%8C%C3%A8%C2%9C%C2%98%C3%A8%C2%9B%C2%9B%C3%A6%C2%B1",
"baidu",
)
def test_parse_query_baidu_mao_kui_lu_mao_cong_cong_mao_ning_kui_mao_dan_dan_mang_lu_1553838201():
verify_serp_parsing(
"https://web.archive.org/web/20190329054321id_/https://www.baidu.com/s?wd=%E8%8C%85%E8%81%B5%E9%A9%B4%E8%8C%85%E8%81%A1%E8%81%A6%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E5%BF%99%E5%8D%A4&oq=%E8%8C%85%E8%81%B5%E9%A9%B4%E8%8C%85%E8%81%A1%E8%81%A6%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E5%BF%99%E5%8D%A4&ie=utf-8&rsv_pq=ce502b7c00017f4e&rsv_t=429fdL0VhHNPGYvy2nJwvedkWEHrVvYOK1YdtSoHeKxoh77YPXsd%2FA%2FeSOo&rqlang=cn&nojs=1&bqid=ce502b7c00017f4e",
"baidu",
)
def test_parse_query_baidu_e_eeeae_1558963051():
verify_serp_parsing(
"https://web.archive.org/web/20190527131731id_/https://www.baidu.com/s?wd=%C3%A9%C2%98%C2%BF%C3%A9%C2%87%C2%8C%C3%A8%C2%9C%C2%98%C3%A8%C2%9B%C2%9B%C3%A6%C2%B1",
"baidu",
)
def test_parse_query_baidu_zhong_guo_lian_tong_smsc_1372160086():
verify_serp_parsing(
"https://web.archive.org/web/20130625113446id_/http://www.baidu.com/s?wd=%E4%B8%AD%E5%9B%BD%E8%81%94%E9%80%9A%20smsc&tn=baidufir",
"baidu",
)
def test_parse_query_baidu_sexinsex_1_2_1213858525():
verify_serp_parsing(
"https://web.archive.org/web/20080619065525id_/http://www.baidu.com:80/s?wd=sexinsex%B4%F3%C2%BD%C3%E2%B7%D1%C8%EB%BF%DA&lm=0&si=&rn=10&ie=gb2312&ct=0&cl=3&f=1&rsp=9",
"baidu",
)
def test_parse_query_baidu_mao_ning_kui_mao_dan_dan_mang_long_lou_mang_lu_shikato_1537858258():
verify_serp_parsing(
"https://web.archive.org/web/20180925065058id_/https://www.baidu.com/s?wd=%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E6%B0%93%E9%99%87%E6%90%82%E6%B0%93%E8%B5%82%E8%81%A2&oq=%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E6%B0%93%E9%99%87%E6%90%82%E6%B0%93%E8%B5%82%E8%81%A2&ie=utf-8&rsv_pq=9f20a69b00016e59&rsv_t=4e92hBffxcH%2BZ67tApMjf2jcV67jPo6GzU42zK0wG0%2Ff6aF%2F68rl5AeYfy8&rqlang=cn&nojs=1&bqid=9f20a69b00016e59",
"baidu",
)
def test_parse_query_baidu_long_shi_liang_1639014949():
verify_serp_parsing(
"https://web.archive.org/web/20211209015549id_/https://wenku.baidu.com/search?lm=0&od=0&ie=utf-8&dyTabStr=MCwzLDQsMSw2LDUsNyw4LDIsOQ%3D%3D&word=%22%E9%BE%8D%E4%B8%96%E6%A8%91%22",
"baidu",
)
def test_parse_query_baidu_mao_kui_lu_mao_cong_cong_mao_ning_kui_mao_dan_dan_mang_lu_1558979495():
verify_serp_parsing(
"https://web.archive.org/web/20190527175135id_/https://www.baidu.com/s?wd=%E8%8C%85%E8%81%B5%E9%A9%B4%E8%8C%85%E8%81%A1%E8%81%A6%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E5%BF%99%E5%8D%A4&oq=%E8%8C%85%E8%81%B5%E9%A9%B4%E8%8C%85%E8%81%A1%E8%81%A6%E7%8C%AB%E8%81%B9%E8%81%B5%E7%8C%AB%E8%81%B8%E8%81%B8%E5%BF%99%E5%8D%A4&ie=utf-8&rsv_pq=8904574900082530&rsv_t=9bbeaAnORF%2BIrKzJVAMdIO%2B5XlN26aMHDddm0oqJPVHpCwMWT1aVQIDgsHA&rqlang=cn&nojs=1&bqid=8904574900082530",
"baidu",
)
def test_parse_query_baidu_eru_1525597474():
verify_serp_parsing(
"https://web.archive.org/web/20180506090434id_/http://tieba.baidu.com:80/f?kw=eru&ie=utf-8",
"baidu",
)
def test_parse_query_baidu_e_eeeae_1554431691():
verify_serp_parsing(
"https://web.archive.org/web/20190405023451id_/https://www.baidu.com/s?wd=%C3%A9%C2%98%C2%BF%C3%A9%C2%87%C2%8C%C3%A8%C2%9C%C2%98%C3%A8%C2%9B%C2%9B%C3%A6%C2%B1",
"baidu",
)
def test_parse_query_baidu_lian_yun_gang_qi_yang_yue_zi_hui_suo_zhong_xin_jing_zhun_ke_hu_ying_xiao_key668_cn_1537259978():
verify_serp_parsing(
"https://web.archive.org/web/20180918083938id_/https://www.baidu.com/s?wd=%E8%BF%9E%E4%BA%91%E6%B8%AF%E9%AA%90%E6%89%AC%E6%9C%88%E5%AD%90%E4%BC%9A%E6%89%80%E4%B8%AD%E5%BF%83%E7%B2%BE%E5%87%86%E5%AE%A2%E6%88%B7%E8%90%A5%E9%94%80key668.cn",
"baidu",
)
def test_parse_query_baidu_long_feng_qu_qi_quan_qi_huo_pei_xun_1642853767():
verify_serp_parsing(
"https://web.archive.org/web/20220122121607id_/http://zhannei.baidu.com/cse/site?q=%E9%BE%99%E5%87%A4%E5%8C%BA%E6%9C%9F%E6%9D%83%E6%9C%9F%E8%B4%A7%E5%9F%B9%E8%AE%AD&p=0&cc=actunderstand.cn",
"baidu",
)
def test_parse_query_baidu_e_eeeae_1522165852():
verify_serp_parsing(
"https://web.archive.org/web/20180327155052id_/https://www.baidu.com/s?wd=%C3%A9%C2%98%C2%BF%C3%A9%C2%87%C2%8C%C3%A8%C2%9C%C2%98%C3%A8%C2%9B%C2%9B%C3%A6%C2%B1",
"baidu",
)
def test_parse_query_baidu_shang_hai_11xuan_5ding_dan_wei_xin_weiwei776699_1545685930():
verify_serp_parsing(
"https://web.archive.org/web/20181224211210id_/https://wenku.baidu.com/search?word=%E4%B8%8A%E6%B5%B711%E9%80%895%E5%AE%9A%E8%83%86%2B%E3%80%96%E5%BE%AE%E4%BF%A1weiwei776699%E3%80%97%E2%94%A0&ie=utf-8&lm=0&od=0",
"baidu",
)
| 7,569
| 51.569444
| 484
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_yandex_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_yandex_speed_force_1535973684():
verify_serp_parsing(
"https://web.archive.org/web/20180903112124id_/https://yandex.ru/images/search?p=21&text=speed%20force&img_url=https%3A%2F%2Fi.ytimg.com%2Fvi%2Fy2J5Bq71emM%2Fmqdefault.jpg&pos=648&rpt=simage",
"yandex",
)
def test_parse_query_yandex_speed_force_1535970436():
verify_serp_parsing(
"https://web.archive.org/web/20180903102716id_/https://yandex.ru/images/search?p=12&text=speed%20force&img_url=https%3A%2F%2Fmedia.dayoftheshirt.com%2Fimages%2Fshirts%2F2rR4SxCzB2Yv%2Fonceuponatee_speed-force-ii_1459188765.full.png&pos=374&rpt=simage",
"yandex",
)
def test_parse_query_yandex_speed_force_1536100368():
verify_serp_parsing(
"https://web.archive.org/web/20180904223248id_/https://yandex.ru/images/search?p=16&text=speed%20force&img_url=http%3A%2F%2Fs3.birthmoviesdeath.com%2Fimages%2Fmade%2FFFSF4_1200_1220_81_s.jpg&pos=502&rpt=simage",
"yandex",
)
def test_parse_query_yandex_spirited_away_animated_film_2001_1524478207():
verify_serp_parsing(
"https://web.archive.org/web/20180423101007id_/https://yandex.ru/video/search?text=spirited%20away%20animated%20film%202001&source=recommendations&path=main&personal=false&autoplay=1",
"yandex",
)
def test_parse_query_yandex_speed_force_1536042339():
verify_serp_parsing(
"https://web.archive.org/web/20180904062539id_/https://yandex.ru/images/search?p=2&text=speed%20force&img_url=https%3A%2F%2Fandroidapplications.ru%2Fuploads%2Fposts%2F2017-04%2F1492531591_speed_force_race_-_gonki-6.jpg&pos=82&rpt=simage",
"yandex",
)
def test_parse_query_yandex_danses_tv_periodic_1534841786():
verify_serp_parsing(
"https://web.archive.org/web/20180821085626id_/https://yandex.ru/video/search?text=danses%20tv%20periodic&source=recommendations&path=main&within=9&personal=false",
"yandex",
)
def test_parse_query_yandex_virat_swaroop_1523992633():
verify_serp_parsing(
"https://web.archive.org/web/20180417191713id_/https://yandex.ru/images/search?text=virat%20swaroop",
"yandex",
)
def test_parse_query_yandex_niusha_1515577404():
verify_serp_parsing(
"https://web.archive.org/web/20180110094324id_/https://yandex.ru/video/search?text=%D0%9D%D1%8E%D1%88%D0%B0&source=oo&entref=0oCgpydXcyNDk5MjQzEg9ydXczOTUwNDctYXNzb2MYAt5s7-k&parent-reqid=1515552693812954-1072433413594552848811520-vla1-2341-V",
"yandex",
)
def test_parse_query_yandex_speed_force_1535955246():
verify_serp_parsing(
"https://web.archive.org/web/20180903061406id_/https://yandex.ru/images/search?p=4&text=speed%20force&img_url=https%3A%2F%2Fwww.old-games.ru%2Fforum%2Fattachments%2Faveg-by_z_2016_01_04_20_06_43_66ae17e6-jpg.98982%2F&pos=132&uinfo=sw-1280-sh-800-ww-1280-wh-649-pd-1-wp-16x10_1280x800&rpt=simage",
"yandex",
)
def test_parse_query_yandex_speed_force_1535895408():
verify_serp_parsing(
"https://web.archive.org/web/20180902133648id_/https://yandex.ru/images/search?p=15&text=speed%20force&img_url=https%3A%2F%2Fget.wallhere.com%2Fphoto%2Fillustration-anime-death-God-Flash-comics-ART-color-comic-book-fiction-5714.jpg&pos=454&rpt=simage",
"yandex",
)
| 3,440
| 45.5
| 304
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_naver_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_naver_11548566_973003263():
verify_serp_parsing(
"https://web.archive.org/web/20001031144103id_/http://search.naver.com:80/search.naver?where=web&type=3&query=11548566",
"naver",
)
def test_parse_query_naver_monteria_montheria_bunyupoteu_jeongitipoteu_jeongijujeonja_bunyupoteugi_1200ml_1200mlpingkeu_hugi_1640713881():
verify_serp_parsing(
"https://web.archive.org/web/20211228175121id_/https://search.naver.com/search.naver?query=%EB%AA%AC%ED%85%8C%EB%A6%AC%EC%95%84%20Montheria%20%EB%B6%84%EC%9C%A0%ED%8F%AC%ED%8A%B8%20%EC%A0%84%EA%B8%B0%ED%8B%B0%ED%8F%AC%ED%8A%B8%20%EC%A0%84%EA%B8%B0%EC%A3%BC%EC%A0%84%EC%9E%90%20%EB%B6%84%EC%9C%A0%ED%8F%AC%ED%8A%B8%EA%B8%B0%201200ML,%201200ml%ED%95%91%ED%81%AC+%ED%9B%84%EA%B8%B0",
"naver",
)
def test_parse_query_naver_heolkeu_dari_gajin_namja_1397449417():
verify_serp_parsing(
"https://web.archive.org/web/20140414042337id_/http://search.naver.com/search.naver?where=nexearch&query=%ED%97%90%ED%81%AC+%EB%8B%A4%EB%A6%AC+%EA%B0%80%EC%A7%84+%EB%82%A8%EC%9E%90&ie=utf8&sm=nws_htk",
"naver",
)
def test_parse_query_naver_10559239_974530320():
verify_serp_parsing(
"https://web.archive.org/web/20001118065200id_/http://search.naver.com:80/search.naver?where=web&type=3&query=10559239",
"naver",
)
def test_parse_query_naver_junggangosagongbu_1627424756():
verify_serp_parsing(
"https://web.archive.org/web/20210727222556id_/https://search.naver.com/search.naver?where=view&sm=tab_viw.all&query=%EC%A4%91%EA%B0%84%EA%B3%A0%EC%82%AC%EA%B3%B5%EB%B6%80",
"naver",
)
def test_parse_query_naver_dpsxmfl_1632292908():
verify_serp_parsing(
"https://web.archive.org/web/20210922064148id_/https://search.naver.com/search.naver?where=nexearch&query=dpsxmfl&x=31&y=5&frm=t1",
"naver",
)
def test_parse_query_naver_sejongmunhwahoegwan_daegeugjang_1652993423():
verify_serp_parsing(
"https://web.archive.org/web/20220519205023id_/https://search.naver.com/search.naver?where=image&sm=tab_jum&query=%EC%84%B8%EC%A2%85%EB%AC%B8%ED%99%94%ED%9A%8C%EA%B4%80+%EB%8C%80%EA%B7%B9%EC%9E%A5",
"naver",
)
def test_parse_query_naver_geobugseon_1439284748():
verify_serp_parsing(
"https://web.archive.org/web/20150811091908id_/http://search.naver.com/search.naver?where=nexearch&query=%EA%B1%B0%EB%B6%81%EC%84%A0&sm=top_lve&ie=utf8",
"naver",
)
def test_parse_query_naver_z_974687640():
verify_serp_parsing(
"https://web.archive.org/web/20001120023400id_/http://search.naver.com:80/search.naver?where=nexearch&query=%C0%E5%BF%DC+%B9%DF%B8%C5%BC%D2",
"naver",
)
def test_parse_query_naver_t_1047137339():
verify_serp_parsing(
"https://web.archive.org/web/20030308152859id_/http://search.naver.com:80/search.naver?where=nexearch&query=%bd%ba%c6%ae%b8%b3%bc%ee%ba%b8%bf%a9%c1%e0",
"naver",
)
| 3,121
| 41.189189
| 388
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_aliexpress_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_aliexpress_barefoot_accessories_1508385234():
verify_serp_parsing(
"https://web.archive.org/web/20171019035354id_/https://www.aliexpress.com/w/wholesale-barefoot-accessories.html?site=glo&g=y&SearchText=barefoot+accessories&pvId=200001034-361187&attrRel=or",
"aliexpress",
)
def test_parse_query_aliexpress_sterling_silver_gemstone_1384701071():
verify_serp_parsing(
"https://web.archive.org/web/20131117151111id_/http://www.aliexpress.com/wholesale?SearchText=sterling+silver+gemstone&initiative_id=RS_20131116203418",
"aliexpress",
)
def test_parse_query_aliexpress_women_messenger_bags_1397198302():
verify_serp_parsing(
"https://web.archive.org/web/20140411063822id_/http://www.aliexpress.com/w/wholesale-Women-Messenger-Bags.html?g=y&SearchText=Women%2BMessenger%2BBags&CatId=100002612&pvId=200000329-200004094&shipCountry=us&initiative_id=SB_20130813182932",
"aliexpress",
)
def test_parse_query_aliexpress_q5_phone_1374387535():
verify_serp_parsing(
"https://web.archive.org/web/20130721061855id_/http://www.aliexpress.com/wholesale/wholesale-q5-phone.html?SearchText=q5%2Bphone&CatId=0&SortType=total_weight_score_desc",
"aliexpress",
)
def test_parse_query_aliexpress_sterling_silver_pandora_charms_1457090417():
verify_serp_parsing(
"https://web.archive.org/web/20160304112017id_/http://www.aliexpress.com:80/w/wholesale-sterling-silver-pandora-charms.html?site=glo&shipCountry=us&g=y&SearchText=sterling+silver+pandora+charms&pvId=14-496&CatId=200160001",
"aliexpress",
)
def test_parse_query_aliexpress_children_school_bag_1388612710():
verify_serp_parsing(
"https://web.archive.org/web/20140101214510id_/http://www.aliexpress.com/w/wholesale-CHILDREN-SCHOOL-BAG.html?SearchText=CHILDREN%2BSCHOOL%2BBAG&CatId=380520&initiative_id=SB_20130923225029&SortType=total_weight_score_desc",
"aliexpress",
)
def test_parse_query_aliexpress_transmission_shaft_1435605057():
verify_serp_parsing(
"https://web.archive.org/web/20150629191057id_/http://www.aliexpress.com/w/wholesale-transmission-shaft.html?SearchText=transmission%2Bshaft&CatId=0&pvId=200001211-2277&shipCountry=us",
"aliexpress",
)
def test_parse_query_aliexpress_jewelry_sets_1390214039():
verify_serp_parsing(
"https://web.archive.org/web/20140120103359id_/http://www.aliexpress.com/w/wholesale-Jewelry-Sets.html?g=y&SearchText=Jewelry%2BSets&CatId=100006750&pvId=200000785-200003778%2C20252-1551&initiative_id=SB_20130731213711",
"aliexpress",
)
def test_parse_query_aliexpress_kids_wear_1293514157():
verify_serp_parsing(
"https://web.archive.org/web/20101228052917id_/http://www.aliexpress.com:80/wholesale?SearchText=kids+wear&CatId=0",
"aliexpress",
)
def test_parse_query_aliexpress_waterproof_winter_boots_women_1500418630():
verify_serp_parsing(
"https://web.archive.org/web/20170718225710id_/https://www.aliexpress.com/w/wholesale-waterproof-winter-boots-women.html?site=glo&g=y&SearchText=waterproof+winter+boots+women&pvId=200009123-1886&attrRel=or",
"aliexpress",
)
| 3,369
| 44.540541
| 248
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_xvideos_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_xvideos_drunk_1325431509():
verify_serp_parsing(
"https://web.archive.org/web/20120101152509id_/http://www.xvideos.com:80/?k=drunk",
"xvideos",
)
def test_parse_query_xvideos_escola_sao_luis_1657929903():
verify_serp_parsing(
"https://web.archive.org/web/20220716000503id_/https://www.xvideos.com/?k=escola+s%C3%A3o+lu%C3%ADs&sort=relevance&datef=all",
"xvideos",
)
def test_parse_query_xvideos_free_hardcore_porn_videos_japanese_video_1428541087():
verify_serp_parsing(
"https://web.archive.org/web/20150409005807id_/http://www.xvideos.com/?k=free-hardcore-porn-videos+japanese+video",
"xvideos",
)
def test_parse_query_xvideos_gay_whitezilla_go_to_www_lovesbo_ru_1643416941():
verify_serp_parsing(
"https://web.archive.org/web/20220129004221id_/https://www.xvideos.com/?k=gay+whitezilla+%E2%8F%A9+go+to+%E2%9D%A4%EF%B8%8F%EF%B8%8F+www.lovesbo.ru&sort=relevance&datef=week&typef=gay",
"xvideos",
)
def test_parse_query_xvideos_robber_1248193170():
verify_serp_parsing(
"https://web.archive.org/web/20090721161930id_/http://www.xvideos.com:80/?k=robber",
"xvideos",
)
def test_parse_query_xvideos_casey_cavert_go_to_www_lovesbo_ru_1642784433():
verify_serp_parsing(
"https://web.archive.org/web/20220121170033id_/https://www.xvideos.com/?k=casey+cavert+%E2%8F%A9+go+to+%E2%9D%A4%EF%B8%8F%EF%B8%8F+www.lovesbo.ru&sort=relevance&datef=week",
"xvideos",
)
def test_parse_query_xvideos_gay_peeing_porn_go_to_www_lovesbo_ru_1643357466():
verify_serp_parsing(
"https://web.archive.org/web/20220128081106id_/https://www.xvideos.com/?k=gay+peeing+porn+%E2%8F%A9+go+to+%E2%9D%A4%EF%B8%8F%EF%B8%8F+www.lovesbo.ru&sort=rating&typef=gay",
"xvideos",
)
def test_parse_query_xvideos_evander_marius_go_to_www_lovesbo_ru_1642716634():
verify_serp_parsing(
"https://web.archive.org/web/20220120221034id_/https://www.xvideos.com/?k=evander+marius+%E2%8F%A9+go+to+%E2%9D%A4%EF%B8%8F%EF%B8%8F+www.lovesbo.ru&sort=relevance&datef=3month",
"xvideos",
)
def test_parse_query_xvideos_claire_dames_1325320932():
verify_serp_parsing(
"https://web.archive.org/web/20111231084212id_/http://www.xvideos.com:80/?k=claire+dames",
"xvideos",
)
def test_parse_query_xvideos_solo_1341894522():
verify_serp_parsing(
"https://web.archive.org/web/20120710042842id_/http://www.xvideos.com:80/?k=solo&sort=relevance&durf=1-3min&datef=all",
"xvideos",
)
| 2,736
| 35.986486
| 193
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_yahoo_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_yahoo_diver_lg_u8180_1620023310():
verify_serp_parsing(
"https://web.archive.org/web/20210503062830id_/https://it.search.yahoo.com/search?p=+diver+LG+U8180&fr=yfp-t-501&ei=UTF-8&rd=r1",
"yahoo",
)
def test_parse_query_yahoo_civil_code_1658358754():
verify_serp_parsing(
"https://web.archive.org/web/20220720231234id_/https://search.yahoo.com/search?p=Civil%20Code",
"yahoo",
)
def test_parse_query_yahoo_world_greek_kata_periokhe_boreia_amerike_kanadas_1647085990():
verify_serp_parsing(
"https://web.archive.org/web/20220312115310id_/https://search.yahoo.com/search?p=World+Greek+%CE%9A%CE%B1%CF%84%CE%AC+%CE%A0%CE%B5%CF%81%CE%B9%CE%BF%CF%87%CE%AE+%CE%92%CF%8C%CF%81%CE%B5%CE%B9%CE%B1+%CE%91%CE%BC%CE%B5%CF%81%CE%B9%CE%BA%CE%AE+%CE%9A%CE%B1%CE%BD%CE%B1%CE%B4%CE%AC%CF%82&ei=UTF-8&nojs=1",
"yahoo",
)
def test_parse_query_yahoo_lm_1376048795():
verify_serp_parsing(
"https://web.archive.org/web/20130809114635id_/http://search.yahoo.com/search?p=%3Blm",
"yahoo",
)
def test_parse_query_yahoo_ashland_oregon_1015423512():
verify_serp_parsing(
"https://web.archive.org/web/20020306140512id_/http://search.yahoo.com:80/search?p=ashland+oregon+",
"yahoo",
)
def test_parse_query_yahoo_lawyer_career_information_970996615():
verify_serp_parsing(
"https://web.archive.org/web/20001008091655id_/http://search.yahoo.com:80/search?p=lawyer+career+information",
"yahoo",
)
def test_parse_query_yahoo_world_hebrew_hbrh_shlvm_1489427128():
verify_serp_parsing(
"https://web.archive.org/web/20170313174528id_/https://search.yahoo.com/search?p=World+Hebrew+%D7%97%D7%91%D7%A8%D7%94+%D7%A9%D7%9C%D7%95%D7%9D",
"yahoo",
)
def test_parse_query_yahoo_john_mccain_956454509():
verify_serp_parsing(
"https://web.archive.org/web/20000423014829id_/http://search.yahoo.com:80/search?p=John+McCain&hn=48",
"yahoo",
)
def test_parse_query_yahoo_bc_gov_971492707():
verify_serp_parsing(
"https://web.archive.org/web/20001014030507id_/http://search.yahoo.com:80/search?p=bc+gov",
"yahoo",
)
def test_parse_query_yahoo_region_de_coquimbo_970960857():
verify_serp_parsing(
"https://web.archive.org/web/20001007232057id_/http://search.yahoo.com:80/search?p=region+de+coquimbo",
"yahoo",
)
def test_parse_query_yahoo_learn_to_focus_1619373855():
verify_serp_parsing(
"https://web.archive.org/web/20210425180415id_/https://search.yahoo.com/search?p=learn+to+focus&fr2=sb-top&sao=1",
"yahoo",
)
def test_parse_query_yahoo_castle_heights_angeles_1611631735():
verify_serp_parsing(
"https://web.archive.org/web/20210126032855id_/http://search.yahoo.com/search?fr=sbcfp&p=%22castle+heights%22+angeles",
"yahoo",
)
def test_parse_query_yahoo_dunwells_army_of_friends_1617218301():
verify_serp_parsing(
"https://web.archive.org/web/20210331191821id_/https://search.yahoo.com/search?p=dunwells+army+of+friends&fr=iphone&.tsrc=apple&pcarrier=AT&T&pmcc=310&pmnc=410",
"yahoo",
)
def test_parse_query_yahoo_sam_bush_1016625678():
verify_serp_parsing(
"https://web.archive.org/web/20020320120118id_/http://search.yahoo.com:80/search?p=Sam+Bush",
"yahoo",
)
def test_parse_query_yahoo_telekwiaciarnia_pl_1436841282():
verify_serp_parsing(
"https://web.archive.org/web/20150714023442id_/http://nl.search.yahoo.com/search?fr=yhs-invalid&p=telekwiaciarnia.pl",
"yahoo",
)
def test_parse_query_yahoo_jello_gelatin_971145484():
verify_serp_parsing(
"https://web.archive.org/web/20001010023804id_/http://search.yahoo.com:80/search?p=jello+gelatin",
"yahoo",
)
def test_parse_query_yahoo_molly_shannon_1640079691():
verify_serp_parsing(
"https://web.archive.org/web/20211221094131id_/https://search.yahoo.com/search?p=Molly%20Shannon",
"yahoo",
)
def test_parse_query_yahoo_dating_sites_1647656764():
verify_serp_parsing(
"https://web.archive.org/web/20220319022604id_/https://search.yahoo.com/search?ei=UTF-8&p=dating+sites&fr2=p%3As%2Cv%3Asfp%2Cm%3Aovncp&fr=sfp",
"yahoo",
)
def test_parse_query_yahoo_metallurgi_1042532635():
verify_serp_parsing(
"https://web.archive.org/web/20030114082355id_/http://search.yahoo.com:80/search?p=metallurgi",
"yahoo",
)
def test_parse_query_yahoo_futurama_1314932107():
verify_serp_parsing(
"https://web.archive.org/web/20110902025507id_/http://tv.yahoo.com:80/search?cs=bz&p=Futurama&fr=tv-tts",
"yahoo",
)
| 4,884
| 32.923611
| 309
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_weibo_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_weibo_sheng_fu_lang_xi_si_1603546157():
verify_serp_parsing(
"https://web.archive.org/web/20201024132917id_/https://s.weibo.com/weibo?q=%23%E5%9C%A3%C2%B7%E5%BC%97%E6%9C%97%E8%A5%BF%E6%96%AF%23&xsort=hot&Refer=hotmore",
"weibo",
)
def test_parse_query_weibo_bu_xiang_jiao_hun_li_de_fen_zi_qian_refer_focus_lx_stopic_box_1527256388():
verify_serp_parsing(
"https://web.archive.org/web/20180525135308id_/http://s.weibo.com:80/weibo/%E4%B8%8D%E6%83%B3%E4%BA%A4%E5%A9%9A%E7%A4%BC%E7%9A%84%E4%BB%BD%E5%AD%90%E9%92%B1&Refer=focus_lx_STopic_box",
"weibo",
)
def test_parse_query_weibo_ji_huo_ma_b_1_page_2_1433297692():
verify_serp_parsing(
"https://web.archive.org/web/20150603021452id_/http://s.weibo.com:80/weibo/%E6%BF%80%E6%B4%BB%E7%A0%81&b=1&page=2",
"weibo",
)
def test_parse_query_weibo_li_wei_yi_refer_user_weibo_1517733046():
verify_serp_parsing(
"https://web.archive.org/web/20180204083046id_/http://s.weibo.com:80/weibo/%E6%9D%8E%E5%BE%AE%E6%BC%AA&Refer=user_weibo",
"weibo",
)
def test_parse_query_weibo_hun_li_li_jie_refer_stopic_box_1505917853():
verify_serp_parsing(
"https://web.archive.org/web/20170920143053id_/http://s.weibo.com:80/weibo/%E5%A9%9A%E7%A4%BC%E7%A4%BC%E8%8A%82&Refer=STopic_box",
"weibo",
)
def test_parse_query_weibo_kan_bu_jian_de_yan_pi_1409920649():
verify_serp_parsing(
"https://web.archive.org/web/20140905123729id_/http://s.weibo.com:80/weibo/%E7%9C%8B%E4%B8%8D%E8%A7%81%E5%BE%97%E7%9C%BC%E7%9A%AE?",
"weibo",
)
def test_parse_query_weibo_xiao_xiao_bin_refer_stopic_box_1518290095():
verify_serp_parsing(
"https://web.archive.org/web/20180210191455id_/http://s.weibo.com:80/weibo/%E5%B0%8F%E5%B0%8F%E5%BD%AC&Refer=STopic_box",
"weibo",
)
def test_parse_query_weibo_meng_lin_dexiao_wu_1437753277():
verify_serp_parsing(
"https://web.archive.org/web/20150724155437id_/http://s.weibo.com:80/weibo/%E5%AD%9F%E6%9E%97de%E5%B0%8F%E5%B1%8B?",
"weibo",
)
def test_parse_query_weibo_fu_cai_3dshu_ju_jia_wei_xin_xw639_2018_1555261848():
verify_serp_parsing(
"https://web.archive.org/web/20190414171048id_/https://s.weibo.com/weibo/%E7%A6%8F%E5%BD%A93d%E6%95%B0%E6%8D%AE%E5%8A%A0%E5%BE%AE%E2%98%85%E4%BF%A1xw639-2018%E2%80%BB",
"weibo",
)
def test_parse_query_weibo_quan_ye_cha_page_13_1517932436():
verify_serp_parsing(
"https://web.archive.org/web/20180206155356id_/http://s.weibo.com:80/weibo/%E7%8A%AC%E5%A4%9C%E5%8F%89&page=13",
"weibo",
)
| 2,790
| 36.716216
| 192
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_linkedin_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_linkedin_vizthink_1229875736():
verify_serp_parsing(
"https://web.archive.org/web/20081221160856id_/http://www.linkedin.com:80/search?search=&sortCriteria=3&keywords=VizThink&goback=%2Esrp_1_1211730310191_in",
"linkedin",
)
def test_parse_query_linkedin_parent_information_center_of_nj_https_www_linkedin_com_feed_1537815970():
verify_serp_parsing(
"https://web.archive.org/web/20180924190610id_/https://www.linkedin.com/search/results/all/?keywords=Parent%20Information%20Center%20of%20NJ/../https://www.linkedin.com/feed/",
"linkedin",
)
def test_parse_query_linkedin_g_d_goenka_international_school_surat_wisdom_valley_campus_nr_anuvrat_dwar_new_city_light_road_rcc_canal_road_bar_surat_gujrat_1568400864():
verify_serp_parsing(
"https://web.archive.org/web/20190913185424id_/https://www.linkedin.com/search/results/all/?keywords=G.%20D.%20Goenka%20International%20School%20Surat%20Wisdom%20Valley%20campus,%20Nr.%20Anuvrat%20Dwar,%20New%20City%20Light%20Road,%20RCC%20Canal%20Road,%20Bar%20Surat%20GUJRAT",
"linkedin",
)
def test_parse_query_linkedin_test_1563279735():
verify_serp_parsing(
"https://web.archive.org/web/20190716122215id_/https://www.linkedin.com/search/results/content/?keywords=test&origin=test%20UNION%20ALL%20SELECT%20NULL%2CNULL%2CNULL--%20FPED&",
"linkedin",
)
def test_parse_query_linkedin_cryptocurrencies_1556702372():
verify_serp_parsing(
"https://web.archive.org/web/20190501091932id_/https://www.linkedin.com/search/results/content/?keywords=%23cryptocurrencies&origin=GLOBAL_SEARCH_HEADER",
"linkedin",
)
def test_parse_query_linkedin_james_margolin_fbi_1571382052():
verify_serp_parsing(
"https://web.archive.org/web/20191018070052id_/https://www.linkedin.com/search/results/all/?keywords=james%20margolin%20fbi&origin=GLOBAL_SEARCH_HEADER",
"linkedin",
)
def test_parse_query_linkedin_parent_information_center_of_nj_https_www_linkedin_com_search_results_all_keywords_parent_information_center_of_nj_1537815969():
verify_serp_parsing(
"https://web.archive.org/web/20180924190609id_/https://www.linkedin.com/search/results/all/?keywords=Parent%20Information%20Center%20of%20NJ/../https://www.linkedin.com/search/results/all/?keywords=Parent%20Information%20Center%20of%20NJ",
"linkedin",
)
| 2,568
| 47.471698
| 286
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_twitch_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_twitch_juliversal_1629750531():
verify_serp_parsing(
"https://web.archive.org/web/20210823202851id_/https://www.twitch.tv/search?term=juliversal",
"twitch",
)
def test_parse_query_twitch_simplevar_1662753027():
verify_serp_parsing(
"https://web.archive.org/web/20220909195027id_/https://www.twitch.tv/search?term=%09%20SimpleVar",
"twitch",
)
def test_parse_query_twitch_cpentagon_1640060311():
verify_serp_parsing(
"https://web.archive.org/web/20211221041831id_/https://www.twitch.tv/search?term=CPentagon",
"twitch",
)
def test_parse_query_twitch_rubberboy2001_1661349876():
verify_serp_parsing(
"https://web.archive.org/web/20220824140436id_/https://www.twitch.tv/search?term=rubberboy2001",
"twitch",
)
def test_parse_query_twitch_central_fluminense_1630540704():
verify_serp_parsing(
"https://web.archive.org/web/20210901235824id_/https://www.twitch.tv/search?term=central%20fluminense",
"twitch",
)
def test_parse_query_twitch_xxlillythefallenangelneko_1638019769():
verify_serp_parsing(
"https://web.archive.org/web/20211127132929id_/https://www.twitch.tv/search?term=xxlillythefallenangelneko",
"twitch",
)
def test_parse_query_twitch_a_plague_tale_innocence_1638358324():
verify_serp_parsing(
"https://web.archive.org/web/20211201113204id_/https://www.twitch.tv/search?term=A%20Plague%20Tale%3A%20Innocence&type=categories&ref=NEXARDA",
"twitch",
)
| 1,702
| 31.132075
| 151
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_qq_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_qq_danil_kozlovsky_1360453772():
verify_serp_parsing(
"https://web.archive.org/web/20130209234932id_/http://v.qq.com/search.html?pagetype=3&stag=word.tag&ms_key=danil%20kozlovsky",
"qq",
)
def test_parse_query_qq_feng_mi_lian_1380895166():
verify_serp_parsing(
"https://web.archive.org/web/20131004135926id_/http://v.qq.com/search.html?pagetype=3&stag=word.tag&ms_key=%E5%B3%B0%E5%B9%82%E6%81%8B",
"qq",
)
def test_parse_query_qq_ji_lin_yan_ji_1360789033():
verify_serp_parsing(
"https://web.archive.org/web/20130213205713id_/http://v.qq.com:80/search.html?pagetype=3&ms_key=%E5%90%89%E6%9E%97%E5%BB%B6%E5%90%89",
"qq",
)
def test_parse_query_qq_zhong_nian_wei_ji_1408289827():
verify_serp_parsing(
"https://web.archive.org/web/20140817153707id_/http://v.qq.com/search.html?pagetype=3&stag=word.tag&ms_key=%E4%B8%AD%E5%B9%B4%E5%8D%B1%E6%9C%BA",
"qq",
)
def test_parse_query_qq_jie_ke_luo_de_wei_er_1445892389():
verify_serp_parsing(
"https://web.archive.org/web/20151026204629id_/http://v.qq.com/search.html?pagetype=3&stag=txt.playpage.sports&ms_key=%E6%9D%B0%E5%85%8B.%E7%BD%97%E5%BE%B7%E7%BB%B4%E5%B0%94",
"qq",
)
def test_parse_query_qq_lin_xi_tong_1507483207():
verify_serp_parsing(
"https://web.archive.org/web/20171008172007id_/http://v.qq.com/search.html?pagetype=3&stj2=tiket&ms_key=%E6%9E%97%E7%86%B9%E7%9E%B3",
"qq",
)
def test_parse_query_qq_shen_shan_zheng_er_lang_1405119212():
verify_serp_parsing(
"https://web.archive.org/web/20140711225332id_/http://v.qq.com//search.html?pagetype=3&ms_key=%E7%A5%9E%E5%B1%B1%E5%BE%81%E4%BA%8C%E9%83%8E",
"qq",
)
def test_parse_query_qq_james_boshier_1449178049():
verify_serp_parsing(
"https://web.archive.org/web/20151203212729id_/http://v.qq.com//search.html?pagetype=3&ms_key=James+Boshier",
"qq",
)
def test_parse_query_qq_ji_zhou_dao_ttglao_hu_ji_ping_tai_guan_wang_x_fa33n_comx_1488991663():
verify_serp_parsing(
"https://web.archive.org/web/20170308164743id_/http://v.qq.com/search.html?pagetype=3&stj2=search.search&stag=txt.index&ms_key=%E6%B5%8E%E5%B7%9E%E5%B2%9Bttg%E8%80%81%E8%99%8E%E6%9C%BA%E5%B9%B3%E5%8F%B0%E5%AE%98%E7%BD%91%E3%80%93FA33N.COM%E3%80%93",
"qq",
)
def test_parse_query_qq_hua_ze_xiang_cai_1408309927():
verify_serp_parsing(
"https://web.archive.org/web/20140817211207id_/http://v.qq.com//search.html?pagetype=3&ms_key=%E8%8A%B1%E6%B3%BD%E9%A6%99%E8%8F%9C",
"qq",
)
def test_parse_query_qq_zhang_dong_jian_1446133153():
verify_serp_parsing(
"https://web.archive.org/web/20151029153913id_/http://v.qq.com//search.html?pagetype=3&ms_key=%E5%BC%A0%E4%B8%9C%E5%81%A5",
"qq",
)
def test_parse_query_qq_jin_xiu_long_1320298924():
verify_serp_parsing(
"https://web.archive.org/web/20111103054204id_/http://v.qq.com/search.html?pagetype=3&ms_key=%E9%87%91%E7%A7%80%E9%BE%99",
"qq",
)
def test_parse_query_qq_xing_xing_di_qiu_2_1582812539():
verify_serp_parsing(
"https://web.archive.org/web/20200227140859id_/http://v.qq.com:80/search.html?ms_key=%E8%A1%8C%E6%98%9F%E5%9C%B0%E7%90%832",
"qq",
)
def test_parse_query_qq_jin_cheng_wu_1319745059():
verify_serp_parsing(
"https://web.archive.org/web/20111027195059id_/http://v.qq.com/search.html?pagetype=3&ms_key=%E9%87%91%E5%9F%8E%E6%AD%A6",
"qq",
)
def test_parse_query_qq_turn_that_finger_around_1324266860():
verify_serp_parsing(
"https://web.archive.org/web/20111219035420id_/http://v.qq.com:80/search.html?pagetype=3&ms_key=Turn+That+Finger+Around",
"qq",
)
def test_parse_query_qq_statue_1536122094():
verify_serp_parsing(
"https://web.archive.org/web/20180905043454id_/http://v.qq.com:80/search.html?pagetype=3&stj2=search.search&stag=txt.index&ms_key=statue",
"qq",
)
def test_parse_query_qq_erin_1359911760():
verify_serp_parsing(
"https://web.archive.org/web/20130203171600id_/http://v.qq.com//search.html?pagetype=3&ms_key=Erin",
"qq",
)
def test_parse_query_qq_jin_cheng_wu_2_1319745059():
verify_serp_parsing(
"https://web.archive.org/web/20111027195059id_/http://v.qq.com/search.html?pagetype=3&ms_key=%E9%87%91%E5%9F%8E%E6%AD%A6",
"qq",
)
def test_parse_query_qq_tag_encode_1341871817():
verify_serp_parsing(
"https://web.archive.org/web/20120709221017id_/http://v.qq.com/search.html?pagetype=3&ms_key=${tag|encode}",
"qq",
)
def test_parse_query_qq_niamh_cusack_1319741306():
verify_serp_parsing(
"https://web.archive.org/web/20111027213506id_/http://v.qq.com/search.html?pagetype=3&ms_key=Niamh+Cusack",
"qq",
)
| 5,030
| 33.9375
| 257
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_indeed_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_indeed_origins_macy_s_1353165507():
verify_serp_parsing(
"https://web.archive.org/web/20121117151827id_/http://www.indeed.com/jobs?q=Origins%2Fmacy%27s",
"indeed",
)
def test_parse_query_indeed_hca_corporate_1334425152():
verify_serp_parsing(
"https://web.archive.org/web/20120414173912id_/http://www.indeed.com/jobs?q=HCA-Corporate&l=Orlando,+FL",
"indeed",
)
def test_parse_query_indeed_60000_1450529560():
verify_serp_parsing(
"https://web.archive.org/web/20151219125240id_/http://www.indeed.com/jobs?q=$60,000&l=Saratoga,+CA&sort=date",
"indeed",
)
def test_parse_query_indeed_concept_development_integration_1647187061():
verify_serp_parsing(
"https://web.archive.org/web/20220313155741id_/https://de.indeed.com/jobs?q=Concept+Development+Integration&l=M%C3%BCnchen",
"indeed",
)
def test_parse_query_indeed_jvm_lending_1586272513():
verify_serp_parsing(
"https://web.archive.org/web/20200407151513id_/https://www.indeed.com/jobs?q=JVM+Lending&rbl=Salt+Lake+City,+UT&jlid=029db43e55e0cfca",
"indeed",
)
def test_parse_query_indeed_customer_service_1447983624():
verify_serp_parsing(
"https://web.archive.org/web/20151120014024id_/http://www.indeed.com/jobs?q=Customer+Service&rbl=Phoenix,+AZ&jlid=b4ff2ac66b411fd8",
"indeed",
)
def test_parse_query_indeed_higher_education_freelance_network_1585036169():
verify_serp_parsing(
"https://web.archive.org/web/20200324074929id_/https://www.indeed.com/jobs?q=Higher+Education+Freelance+Network&rbc=The+University+of+Chicago&jcid=5cb727313a823dd6",
"indeed",
)
def test_parse_query_indeed_steiner_business_solutions_1577184429():
verify_serp_parsing(
"https://web.archive.org/web/20191224104709id_/https://www.indeed.com/jobs?q=Steiner+Business+Solutions&explvl=mid_level",
"indeed",
)
def test_parse_query_indeed_international_english_prep_academy_iepa_1586237411():
verify_serp_parsing(
"https://web.archive.org/web/20200407053011id_/https://www.indeed.com/jobs?q=International+English+Prep+Academy+(iepa)&l=Stafford,+VA&nc=jasx",
"indeed",
)
def test_parse_query_indeed_pepsico_1487893385():
verify_serp_parsing(
"https://web.archive.org/web/20170223234305id_/http://www.indeed.com/jobs?q=PepsiCo&jt=contract",
"indeed",
)
| 2,596
| 34.094595
| 173
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_amazon_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_amazon_under_armour_socks_1553107494():
verify_serp_parsing(
"https://web.archive.org/web/20190320184454id_/https://www.amazon.ca/s?k=Under+armour+socks&rh=n%253A2242989011%252Cn%253A2439137011&s=price-asc-rank&dc&qid=1553111078&rnid=5264023011&ref=sr_nr_n_14",
"amazon",
)
def test_parse_query_amazon_joseph_menn_1605789427():
verify_serp_parsing(
"https://web.archive.org/web/20201119123707id_/https://www.amazon.com/s?k=Joseph+Menn",
"amazon",
)
def test_parse_query_amazon_lovense_1618431049():
verify_serp_parsing(
"https://web.archive.org/web/20210414201049id_/https://www.amazon.com/s?k=LOVENSE&i=hpc&ref=nb_sb_noss_2",
"amazon",
)
def test_parse_query_amazon_notebook_dell_1613232661():
verify_serp_parsing(
"https://web.archive.org/web/20210213161101id_/https://www.amazon.com.br/s?k=notebook+dell&__mk_pt_BR=%C3%85M%C3%85%C5%BD%C3%95%C3%91&linkCode=sl2&tag=katlivros-20&linkId=e69cdc65575812a23eb13b5de77ffae8&language=pt_BR&ref_=as_li_ss_tl",
"amazon",
)
def test_parse_query_amazon_deng_shi_jia_meng_www_baidu_com_aaaa_oq1_2018nian_8yue_19ri_19shi_32fen_18miao_1564357198():
verify_serp_parsing(
"https://web.archive.org/web/20190728233958id_/https://www.amazon.cn/s?k=%E7%81%AF%E9%A5%B0%E5%8A%A0%E7%9B%9F+www.baidu.com%2Faaaa+oq1+2018%E5%B9%B48%E6%9C%8819%E6%97%A519%E6%97%B632%E5%88%8618%E7%A7%92",
"amazon",
)
def test_parse_query_amazon_tazas_te_transparente_1613035337():
verify_serp_parsing(
"https://web.archive.org/web/20210211092217id_/https://www.amazon.es/s?k=tazas+te+transparente&__mk_es_ES=%C3%85M%C3%85%C5%BD%C3%95%C3%91&crid=12A9DD0FYHVU2&sprefix=tazas+te+tra,aps,244&linkCode=sl2&tag=loxlo3-21&linkId=3e3ee226a50fb1466af6122597a0551c&language=es_ES&ref_=as_li_ss_tl",
"amazon",
)
def test_parse_query_amazon_pillow_fsa_1615964514():
verify_serp_parsing(
"https://web.archive.org/web/20210317070154id_/https://www.amazon.com/s?i=aps&k=pillow+fsa&ref=nb_sb_noss_1&url=search-alias%3Daps&pldnSite=1",
"amazon",
)
def test_parse_query_amazon_yangumagazin_yanmagasado_1625727694():
verify_serp_parsing(
"https://web.archive.org/web/20210708070134id_/https://www.amazon.co.jp/s?k=%22%E3%83%A4%E3%83%B3%E3%82%B0%E3%83%9E%E3%82%AC%E3%82%B8%E3%83%B3%22%7C%22%E3%83%A4%E3%83%B3%E3%83%9E%E3%82%AC%E3%82%B5%E3%83%BC%E3%83%89%22&x=0&y=0&linkCode=ll2&tag=rapfoodnewe0c-22&linkId=76c8bdecef23a7536b9c5ec384e267da&language=ja_JP&ref_=as_li_ss_tl",
"amazon",
)
def test_parse_query_amazon_monitor_1625069899():
verify_serp_parsing(
"https://web.archive.org/web/20210630161819id_/https://www.amazon.de/s?k=monitor&rh=p_n_condition-type%3A776950031&dc&__mk_de_DE=AMAZON&qid=1625076577&rnid=776942031",
"amazon",
)
def test_parse_query_amazon_gaming_monitor_1639065822():
verify_serp_parsing(
"https://web.archive.org/web/20211209160342id_/https://www.amazon.ca/s?k=Gaming+Monitor&_encoding=UTF8&camp=15121&creative=330641&linkCode=ur2&linkId=769e0014053b086f96c5e9f93948f632&tag=liquipedia07-20",
"amazon",
)
def test_parse_query_amazon_the_risks_of_prescription_drugs_1657820625():
verify_serp_parsing(
"https://web.archive.org/web/20220714174345id_/https://www.amazon.com/s?k=the+risks+of+prescription+drugs&x=15&y=19&ref=nb_sb_noss",
"amazon",
)
def test_parse_query_amazon_eksa_1662208504():
verify_serp_parsing(
"https://web.archive.org/web/20220903123504id_/https://www.amazon.in/s?k=eksa&ref=nb_sb_noss&tag=prosenjit0c-21",
"amazon",
)
def test_parse_query_amazon_qian_lian_mo_hua_orizinarusaundotoratsuku_1657685343():
verify_serp_parsing(
"https://web.archive.org/web/20220713040903id_/https://www.amazon.co.jp/s?k=%E5%8D%83%E6%81%8B%EF%BC%8A%E4%B8%87%E8%8A%B1+%E3%82%AA%E3%83%AA%E3%82%B8%E3%83%8A%E3%83%AB%E3%82%B5%E3%82%A6%E3%83%B3%E3%83%89%E3%83%88%E3%83%A9%E3%83%83%E3%82%AF&__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&crid=BFBUJUC89AS1&sprefix=%E5%8D%83%E6%81%8B+%E4%B8%87%E8%8A%B1+original+soundtrack%2Caps%2C546&ref=nb_sb_noss",
"amazon",
)
def test_parse_query_amazon_zao_dian_jia_meng_pin_pai_www_baidu_com_aaaa_zv9_2018nian_8yue_21ri_17shi_49fen_4miao_1564430721():
verify_serp_parsing(
"https://web.archive.org/web/20190729200521id_/https://www.amazon.cn/s?k=%E6%97%A9%E7%82%B9%E5%8A%A0%E7%9B%9F%E5%93%81%E7%89%8C+www.baidu.com%2Faaaa+zv9+2018%E5%B9%B48%E6%9C%8821%E6%97%A517%E6%97%B649%E5%88%864%E7%A7%92",
"amazon",
)
def test_parse_query_amazon_gym_mat_tile_1634791656():
verify_serp_parsing(
"https://web.archive.org/web/20211021044736id_/https://www.amazon.com/s?k=gym+mat+tile&ref=nb_sb_noss_1",
"amazon",
)
def test_parse_query_amazon_guang_huaiba_pawameta_1632443655():
verify_serp_parsing(
"https://web.archive.org/web/20210924003415id_/https://www.amazon.co.jp/s?k=%E5%85%89%E3%83%95%E3%82%A1%E3%82%A4%E3%83%90%E3%83%BC+%E3%83%91%E3%83%AF%E3%83%BC%E3%83%A1%E3%83%BC%E3%82%BF%E3%83%BC&ref=nb_sb_noss_2",
"amazon",
)
def test_parse_query_amazon_omega_labyrinth_z_1616510370():
verify_serp_parsing(
"https://web.archive.org/web/20210323143930id_/https://www.amazon.com/s?k=omega+labyrinth+z",
"amazon",
)
def test_parse_query_amazon_solefit_1663015835():
verify_serp_parsing(
"https://web.archive.org/web/20220912205035id_/https://www.amazon.in/s?k=Solefit&bbn=1968024031&rh=n:1571271031,n:1968024031,n:1968120031,p_36:12700-&dc=&qid=1626028382&rnid=4595083031&linkCode=sl2&tag=couponguruj0b-21&linkId=16c64e11c927eb2d1115416374b80d53&language=en_IN&ref_=as_li_ss_tl",
"amazon",
)
def test_parse_query_amazon_the_montells_you_can_t_make_me_1617199271():
verify_serp_parsing(
"https://web.archive.org/web/20210331140111id_/https://music.amazon.com/search/The+Montells+You+Can't+Make+Me",
"amazon",
)
def test_parse_query_amazon_dnd_flash_mosquito_1633240377():
verify_serp_parsing(
"https://web.archive.org/web/20211003055257id_/https://www.amazon.in/s?k=DND+Flash+Mosquito&i=lawngarden&rh=p_6:AT95IG9ONZD7S&s=price-asc-rank&dc=&linkCode=sl2&-21=&language=en_IN&ref_=as_li_ss_tl&tag=dealsnalert-21",
"amazon",
)
| 6,543
| 44.444444
| 417
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_canva_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_canva_instagram_reels_video_1607594697():
verify_serp_parsing(
"https://web.archive.org/web/20201210100457id_/https://www.canva.com/design/play?category=tAEEZ_gIPxM&template=EAD_mTZpt2k&type=TAD9VKDO5tI&layoutQuery=Instagram%20Reels%20Video&analyticsCorrelationId=f6767fc3-7f33-445e-9740-a004311e5afe",
"canva",
)
| 502
| 44.727273
| 247
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_wikimedia_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_wikimedia_figures_in_theatrical_costumes_claude_gillot_1673_1722_class_photo_description_french_painter_drawer_1632572254():
verify_serp_parsing(
"https://web.archive.org/web/20210925121734id_/https://www.wikidata.org/w/index.php?title=Special:Search&search=Figures+in+Theatrical+Costumes+%0A%0A%0AClaude+Gillot%0A%26nbsp%3B%281673%26ndash%3B1722%29%26nbsp%3B%26nbsp%3B%0A%0A%0A%7Cclass%3Dphoto%0A%0A%0ADescription%0AFrench+painter%2C+drawer+and+graphic+artist%0A%0ADate+of+birth%2Fdeath%0A%0A28+April+1673+%2F+27+April+1673%26nbsp%3B%0A4+May+1722%26nbsp%3B%0A%0ALocation+of+birth%2Fdeath%0A%0ALangres%0AParis%0A%0AWork+location%0A%0AParis%26nbsp%3B%0A%0AAuthority+control%0A%0A%2A%3A+Q559969%0A%2AVIAF%3A%26thinsp%3B27335846%0A%2AISNI%3A%26thinsp%3B0000+0001+2277+7433%0A%2AULAN%3A%26thinsp%3B500115197%0A%2ALCCN%3A%26thinsp%3Bnr93039721%0A%2AWGA%3A%26thinsp%3BGILLOT%2C+Claude%0A%2AWorldCat%0A%0A%0A%0A%0A%0A%0A%0A%0A%0AAntoine+Watteau%0A%26nbsp%3B%281684%26ndash%3B1721%29%26nbsp%3B%26nbsp%3B%26nbsp%3B%0A%0A%0A%7Cclass%3Dphoto%0A%0A%0AAlternative+names%0A%0AJean-Antoine+Watteau%0A%0ADescription%0AFrench+painter%2C+graphic+artist%2C+drawer%2C+artist%2C+printmaker+and+draftsperson%0A%0ADate+of+birth%2Fdeath%0A%0A10+October+1684%26nbsp%3B%0A18+July+1721%26nbsp%3B%0A%0ALocation+of+birth%2Fdeath%0A%0AValenciennes%0ANogent-sur-Marne%0A%0AWork+location%0A%0AParis+%28after+1702+%29%2C+Valenciennes+%281709-1710%29%2C+London+%281719-1720%29%0A%0AAuthority+control%0A%0A%2A%3A+Q183221%0A%2AVIAF%3A%26thinsp%3B46882028%0A%2AISNI%3A%26thinsp%3B0000+0001+2131+1877%0A%2AULAN%3A%26thinsp%3B500032644%0A%2ALCCN%3A%26thinsp%3Bn50043693%0A%2ANLA%3A%26thinsp%3B36213816%0A%2AWorldCat%0A%0A%0A%0A%0A%0A+10.45.15+haswbstatement%3AP195%3DQ160236&profile=advanced&fulltext=1&ns0=1",
"wikimedia",
)
def test_parse_query_wikimedia_group_portrait_on_doorstep_collins_tudor_washington_1898_1970_photographer_75426_object_number_haswb_1629837781():
verify_serp_parsing(
"https://web.archive.org/web/20210824204301id_/https://www.wikidata.org/w/index.php?search=(Group+portrait+on+doorstep)+Collins,+Tudor+Washington,+1898-1970,+photographer+75426+(object+number)+haswbstatement:P195%3DQ758657&title=Special:Search&profile=advanced&fulltext=1&ns0=1",
"wikimedia",
)
def test_parse_query_wikimedia_mou_lin_han_site_pku_edu_cn_1576868395():
verify_serp_parsing(
"https://web.archive.org/web/20191220185955id_/https://en.wikipedia.org/w/index.php?title=Special%3ASearch&profile=images&search=%E7%89%9F%E6%9E%97%E7%BF%B0+site%3Apku.edu.cn&fulltext=1&ns0=1",
"wikimedia",
)
def test_parse_query_wikimedia_nito_1655733503():
verify_serp_parsing(
"https://web.archive.org/web/20220620135823id_/https://commons.wikimedia.org/w/index.php?title=Special%3AMediaSearch&search=Nit%C6%A1",
"wikimedia",
)
def test_parse_query_wikimedia_prob_1543207125():
verify_serp_parsing(
"https://web.archive.org/web/20181126043845id_/https://zh.wikipedia.org/w/index.php?search=prob&title=Special%3A%E6%90%9C%E7%B4%A2&profile=default&fulltext=1",
"wikimedia",
)
def test_parse_query_wikimedia_ao_xian_yu_le_du_bo_ping_tai_1430758639():
verify_serp_parsing(
"https://web.archive.org/web/20150504165719id_/http://zh.wikipedia.org/w/index.php?search=%E6%BE%B3%E7%BA%BF%E5%A8%B1%E4%B9%90%E8%B5%8C%E5%8D%9A%E5%B9%B3%E5%8F%B0",
"wikimedia",
)
def test_parse_query_wikimedia_zao_can_bao_zi_jia_meng_www_baidu_com_aaaa_4ws_2018nian_8yue_4ri_21shi_19fen_39miao_1537939782():
verify_serp_parsing(
"https://web.archive.org/web/20180926052942id_/https://zh.wikipedia.org/w/index.php?search=%E6%97%A9%E9%A4%90%E5%8C%85%E5%AD%90%E5%8A%A0%E7%9B%9F%20www.baidu.com/aaaa%204ws%202018%E5%B9%B48%E6%9C%884%E6%97%A521%E6%97%B619%E5%88%8639%E7%A7%92",
"wikimedia",
)
def test_parse_query_wikimedia_oxygen_1596274448():
verify_serp_parsing(
"https://web.archive.org/web/20200801093408id_/https://commons.wikimedia.org/w/index.php?title=Special:Search&search=Oxygen&ns0=1&ns6=1&ns12=1&ns14=1&ns100=1&ns106=1",
"wikimedia",
)
def test_parse_query_wikimedia_recaptchalogo_svg_1509131182():
verify_serp_parsing(
"https://web.archive.org/web/20171027190622id_/https://www.wikidata.org/w/index.php?title=Special:Search&profile=images&search=RecaptchaLogo.svg&fulltext=1",
"wikimedia",
)
def test_parse_query_wikimedia_la_dian_zi_ji_1517046182():
verify_serp_parsing(
"https://web.archive.org/web/20180127094302id_/https://zh.wikipedia.org/w/index.php?title=Special:%E6%90%9C%E7%B4%A2&search=%E6%8B%89%E9%9B%BB%E5%AD%90%E5%9F%BA",
"wikimedia",
)
| 4,897
| 65.189189
| 1,633
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_youtube_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_youtube_pudding_1563068696():
verify_serp_parsing(
"https://web.archive.org/web/20190714014456id_/https://www.youtube.com/results?search_query=%23pudding&sp=EgQYASABSGTqAwA%253D",
"youtube",
)
def test_parse_query_youtube_q2_2017_arizona_west_regional_1547871680():
verify_serp_parsing(
"https://web.archive.org/web/20190119042120id_/https://www.youtube.com/results?search_query=Q2+2017+Arizona%20West%20Regional",
"youtube",
)
def test_parse_query_youtube_lataji_1563086980():
verify_serp_parsing(
"https://web.archive.org/web/20190714064940id_/https://www.youtube.com/results?search_query=%23LataJi&sp=CAESBhABIAEoAQ%253D%253D",
"youtube",
)
def test_parse_query_youtube_robloks_mip_siti_2019_1583715307():
verify_serp_parsing(
"https://web.archive.org/web/20200309005507id_/https://www.youtube.com/results?search_query=%D1%80%D0%BE%D0%B1%D0%BB%D0%BE%D0%BA%D1%81+%D0%BC%D0%B8%D0%BF+%D1%81%D0%B8%D1%82%D0%B8+2019&sp=SDzqAwA%253D",
"youtube",
)
def test_parse_query_youtube_ampatuanmassacre_1583309425():
verify_serp_parsing(
"https://web.archive.org/web/20200304081025id_/https://www.youtube.com/results?search_query=%23AmpatuanMassacre&app=desktop",
"youtube",
)
def test_parse_query_youtube_hp_probook_640_g1_razborka_1579996098():
verify_serp_parsing(
"https://web.archive.org/web/20200125234818id_/https://www.youtube.com/results?search_query=hp+probook+640+g1+%D1%80%D0%B0%D0%B7%D0%B1%D0%BE%D1%80%D0%BA%D0%B0",
"youtube",
)
def test_parse_query_youtube_razbor_shchetki_pylesosa_samsung_1584184489():
verify_serp_parsing(
"https://web.archive.org/web/20200314111449id_/https://www.youtube.com/results?search_query=%D1%80%D0%B0%D0%B7%D0%B1%D0%BE%D1%80+%D1%89%D0%B5%D1%82%D0%BA%D0%B8+%D0%BF%D1%8B%D0%BB%D0%B5%D1%81%D0%BE%D1%81%D0%B0+samsung",
"youtube",
)
def test_parse_query_youtube_literatura_6_klass_biografiia_pushkina_1595705363():
verify_serp_parsing(
"https://web.archive.org/web/20200725192923id_/https://www.youtube.com/results?search_query=%D0%BB%D0%B8%D1%82%D0%B5%D1%80%D0%B0%D1%82%D1%83%D1%80%D0%B0+6+%D0%BA%D0%BB%D0%B0%D1%81%D1%81+%D0%B1%D0%B8%D0%BE%D0%B3%D1%80%D0%B0%D1%84%D0%B8%D1%8F+%D0%BF%D1%83%D1%88%D0%BA%D0%B8%D0%BD%D0%B0",
"youtube",
)
def test_parse_query_youtube_ne_prosto_bekkhen_reaktsiia_1582514079():
verify_serp_parsing(
"https://web.archive.org/web/20200224031439id_/https://www.youtube.com/results?search_query=%D0%BD%D0%B5+%D0%BF%D1%80%D0%BE%D1%81%D1%82%D0%BE+%D0%B1%D1%8D%D0%BA%D1%85%D0%B5%D0%BD+%D1%80%D0%B5%D0%B0%D0%BA%D1%86%D0%B8%D1%8F",
"youtube",
)
def test_parse_query_youtube_prokhozhdenie_igry_madagaskar_2_chast_4_1562167916():
verify_serp_parsing(
"https://web.archive.org/web/20190703153156id_/https://www.youtube.com/results?search_query=%D0%BF%D1%80%D0%BE%D1%85%D0%BE%D0%B6%D0%B4%D0%B5%D0%BD%D0%B8%D0%B5+%D0%B8%D0%B3%D1%80%D1%8B+%D0%BC%D0%B0%D0%B4%D0%B0%D0%B3%D0%B0%D1%81%D0%BA%D0%B0%D1%80+2+%D1%87%D0%B0%D1%81%D1%82%D1%8C+4&sp=CAESCwgFEAQYAiAByAEB",
"youtube",
)
def test_parse_query_youtube_zadnii_most_moskvich_412_ustroistvo_1584150216():
verify_serp_parsing(
"https://web.archive.org/web/20200314014336id_/https://www.youtube.com/results?search_query=%D0%B7%D0%B0%D0%B4%D0%BD%D0%B8%D0%B9+%D0%BC%D0%BE%D1%81%D1%82+%D0%BC%D0%BE%D1%81%D0%BA%D0%B2%D0%B8%D1%87+412+%D1%83%D1%81%D1%82%D1%80%D0%BE%D0%B9%D1%81%D1%82%D0%B2%D0%BE",
"youtube",
)
def test_parse_query_youtube_vasilii_emelianenko_tefteli_1578285143():
verify_serp_parsing(
"https://web.archive.org/web/20200106043223id_/https://www.youtube.com/results?search_query=%D0%B2%D0%B0%D1%81%D0%B8%D0%BB%D0%B8%D0%B9+%D0%B5%D0%BC%D0%B5%D0%BB%D1%8C%D1%8F%D0%BD%D0%B5%D0%BD%D0%BA%D0%BE+%D1%82%D0%B5%D1%84%D1%82%D0%B5%D0%BB%D0%B8",
"youtube",
)
def test_parse_query_youtube_kutyaplya_1561171748():
verify_serp_parsing(
"https://web.archive.org/web/20190622024908id_/https://www.youtube.com/results?search_query=kutyaplya",
"youtube",
)
def test_parse_query_youtube_kak_sdelat_vzryv_v_cinema_4d_1578732009():
verify_serp_parsing(
"https://web.archive.org/web/20200111084009id_/https://www.youtube.com/results?search_query=%D0%BA%D0%B0%D0%BA+%D1%81%D0%B4%D0%B5%D0%BB%D0%B0%D1%82%D1%8C+%D0%B2%D0%B7%D1%80%D1%8B%D0%B2+%D0%B2+cinema+4d",
"youtube",
)
def test_parse_query_youtube_shang_yue_xian_rupu_1582390054():
verify_serp_parsing(
"https://web.archive.org/web/20200222164734id_/https://www.youtube.com/results?search_query=%E4%B8%8A%E8%B6%8A%E7%B7%9A+%E3%83%AB%E3%83%BC%E3%83%97",
"youtube",
)
def test_parse_query_youtube_pora_tiulpanov_aktery_1574991226():
verify_serp_parsing(
"https://web.archive.org/web/20191129013346id_/https://www.youtube.com/results?search_query=%D0%BF%D0%BE%D1%80%D0%B0+%D1%82%D1%8E%D0%BB%D1%8C%D0%BF%D0%B0%D0%BD%D0%BE%D0%B2+%D0%B0%D0%BA%D1%82%D0%B5%D1%80%D1%8B",
"youtube",
)
def test_parse_query_youtube_uzbekskaia_svadba_v_shymkente_1574145605():
verify_serp_parsing(
"https://web.archive.org/web/20191119064005id_/https://www.youtube.com/results?search_query=%D1%83%D0%B7%D0%B1%D0%B5%D0%BA%D1%81%D0%BA%D0%B0%D1%8F+%D1%81%D0%B2%D0%B0%D0%B4%D1%8C%D0%B1%D0%B0+%D0%B2+%D1%88%D1%8B%D0%BC%D0%BA%D0%B5%D0%BD%D1%82%D0%B5",
"youtube",
)
def test_parse_query_youtube_chto_budet_esli_sobrat_vse_rezinki_v_bully_1599555287():
verify_serp_parsing(
"https://web.archive.org/web/20200908085447id_/https://www.youtube.com/results?search_query=%D1%87%D1%82%D0%BE+%D0%B1%D1%83%D0%B4%D0%B5%D1%82+%D0%B5%D1%81%D0%BB%D0%B8+%D1%81%D0%BE%D0%B1%D1%80%D0%B0%D1%82%D1%8C+%D0%B2%D1%81%D0%B5+%D1%80%D0%B5%D0%B7%D0%B8%D0%BD%D0%BA%D0%B8+%D0%B2+bully",
"youtube",
)
def test_parse_query_youtube_kak_vybit_dushu_moba_1599057231():
verify_serp_parsing(
"https://web.archive.org/web/20200902143351id_/https://www.youtube.com/results?search_query=%D0%BA%D0%B0%D0%BA+%D0%B2%D1%8B%D0%B1%D0%B8%D1%82%D1%8C+%D0%B4%D1%83%D1%88%D1%83+%D0%BC%D0%BE%D0%B1%D0%B0",
"youtube",
)
def test_parse_query_youtube_cardistry_asmr_1577113546():
verify_serp_parsing(
"https://web.archive.org/web/20191223150546id_/https://www.youtube.com/results?search_query=cardistry+asmr",
"youtube",
)
| 6,643
| 45.138889
| 313
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_sogou_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_sogou_lofke_yi_kua_shi_chang_mai_mai_1333092705():
verify_serp_parsing(
"https://web.archive.org/web/20120330073145id_/http://www.sogou.com/web?query=lof%E5%8F%AF%E4%BB%A5%E8%B7%A8%E5%B8%82%E5%9C%BA%E4%B9%B0%E5%8D%96&p=02210102&fhintidx=7",
"sogou",
)
def test_parse_query_sogou_xing_ai_ji_qiao_1576522689():
verify_serp_parsing(
"https://web.archive.org/web/20191216185809id_/https://www.sogou.com/sogou?query=%E6%80%A7%E7%88%B1%E6%8A%80%E5%B7%A7&ie=utf8&insite=baike.sogou.com&pid=sogou-wsse-17737832ac17be52",
"sogou",
)
def test_parse_query_sogou_j_1341637724():
verify_serp_parsing(
"https://web.archive.org/web/20120707050844id_/http://www.sogou.com:80/web?query=%D7%F6%BF%D5%BB%FA%B9%B9%CF%E3%E9%DA&p=31210100&fhintidx=3",
"sogou",
)
def test_parse_query_sogou_ju_min_shen_fen_zheng_hao_ma_he_xing_ming_1508756794():
verify_serp_parsing(
"https://web.archive.org/web/20171023110634id_/http://www.sogou.com/sogou?user_ip=207.241.229.48&sourceid=hint&bh=1&hintidx=11&query=%E5%B1%85%E6%B0%91%E8%BA%AB%E4%BB%BD%E8%AF%81%E5%8F%B7%E7%A0%81%E5%92%8C%E5%A7%93%E5%90%8D&pid=sogou-site-d54ce9de9df77c57&duppid=1&w=01020600&interation=&interV=kKIOkrELjboLmLkEkLoTkKIMkrELjboImLkEk74TkKILmrELjb8TkKIKmrELjbkI_-1099283536&htdbg=idc%3Ebjdjt%7CdbgID%3E01%7Cabt%3E0%7Cmth%3E1&ie=utf8&s_from=hint_last",
"sogou",
)
def test_parse_query_sogou_yuan_dai_ma_xie_lu_1578201651():
verify_serp_parsing(
"https://web.archive.org/web/20200105052051id_/https://www.sogou.com/sogou?pid=sogou-wsse-ff111e4a5406ed40&insite=zhihu.com&p=42351201&query=%E6%BA%90%E4%BB%A3%E7%A0%81%E6%B3%84%E9%9C%B2&ie=utf8&p=42351201&query=%E6%BA%90%E4%BB%A3%E7%A0%81%E6%B3%84%E9%9C%B2&ie=utf8",
"sogou",
)
def test_parse_query_sogou_cesuk_163163163_cn_1493482889():
verify_serp_parsing(
"https://web.archive.org/web/20170429162129id_/http://www.sogou.com/sogou?pid=sogou-wsse-3f7bcd0b3ea82268&ie=utf-8&query=cesuk.163163163.cn",
"sogou",
)
def test_parse_query_sogou_l_1346233371():
verify_serp_parsing(
"https://web.archive.org/web/20120829094251id_/http://www.sogou.com:80/web?query=%BF%F7%CB%F0%B9%FD%B5%C4%B9%CC%B6%A8%CA%D5%D2%E6%D0%C5%CD%D0&p=02210102&fhintidx=7",
"sogou",
)
def test_parse_query_sogou_eed_1_4a1_a_1332923188():
verify_serp_parsing(
"https://web.archive.org/web/20120328082628id_/http://www.sogou.com/web?query=%C2%B1%C3%88%C3%88%C3%B0%C2%BC%C2%AA%C2%B9%C2%B7%C3%81%C2%B8&p=03210100",
"sogou",
)
def test_parse_query_sogou_ti_gong_jin_kou_qi_qiang_1647324224():
verify_serp_parsing(
"https://web.archive.org/web/20220315060344id_/https://www.sogou.com/web?query=%E6%8F%90%E4%BE%9B%E8%BF%9B%E5%8F%A3%E6%B0%94%E6%9E%AA&ie=utf8",
"sogou",
)
def test_parse_query_sogou_tu_guan_xin_gai_kuan_1332227339():
verify_serp_parsing(
"https://web.archive.org/web/20120320070859id_/http://www.sogou.com/web?query=%E9%80%94%E8%A7%82%E6%96%B0%E6%94%B9%E6%AC%BE&p=02210102&fhintidx=3",
"sogou",
)
| 3,284
| 43.391892
| 457
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_cnn_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_cnn_kindly_check_1642508434():
verify_serp_parsing(
"https://web.archive.org/web/20220118122034id_/https://edition.cnn.com/search?q=kindly%20check",
"cnn",
)
def test_parse_query_cnn_achievable_possibility_1642416655():
verify_serp_parsing(
"https://web.archive.org/web/20220117105055id_/https://edition.cnn.com/search?q=achievable%20possibility",
"cnn",
)
def test_parse_query_cnn_apple_watch_2016_1662575890():
verify_serp_parsing(
"https://web.archive.org/web/20220907183810id_/https://www.cnn.com/search?q=apple%20watch%202016",
"cnn",
)
def test_parse_query_cnn_march_4_1633908556():
verify_serp_parsing(
"https://web.archive.org/web/20211010232916id_/https://www.cnn.com/search?q=March+4",
"cnn",
)
def test_parse_query_cnn_dxa_1634498874():
verify_serp_parsing(
"https://web.archive.org/web/20211017192754id_/https://www.cnn.com/search?q=dxa",
"cnn",
)
def test_parse_query_cnn_trigger_1638271567():
verify_serp_parsing(
"https://web.archive.org/web/20211130112607id_/https://edition.cnn.com/search?q=trigger",
"cnn",
)
def test_parse_query_cnn_wright_1616229078():
verify_serp_parsing(
"https://web.archive.org/web/20210320083118id_/https://www.cnn.com/search?size=10&q=Wright",
"cnn",
)
def test_parse_query_cnn_skilled_1643388398():
verify_serp_parsing(
"https://web.archive.org/web/20220128164638id_/https://edition.cnn.com/search?q=skilled",
"cnn",
)
def test_parse_query_cnn_biteukoinsaryeomyeon_www_99m_kr_bei_biteukoinsaryelie_biteukoinsaeobbbiteukoinsaeobjadeungrog_biteukoinsayongbangbeob8reduplicate_1647153716():
verify_serp_parsing(
"https://web.archive.org/web/20220313064156id_/https://edition.cnn.com/search?q=%EB%B9%84%ED%8A%B8%EC%BD%94%EC%9D%B8%EC%82%AC%EB%A0%A4%EB%A9%B4%28WWW%C2%B899M%C2%B8KR%29%E5%AD%9B%EB%B9%84%ED%8A%B8%EC%BD%94%EC%9D%B8%EC%82%AC%EB%A1%80%E5%8A%BD%EB%B9%84%ED%8A%B8%EC%BD%94%EC%9D%B8%EC%82%AC%EC%97%85%E0%AA%AC%EB%B9%84%ED%8A%B8%EC%BD%94%EC%9D%B8%EC%82%AC%EC%97%85%EC%9E%90%EB%93%B1%EB%A1%9D%E2%97%81%EB%B9%84%ED%8A%B8%EC%BD%94%EC%9D%B8%EC%82%AC%EC%9A%A9%EB%B0%A9%EB%B2%958%E2%83%A3reduplicate/",
"cnn",
)
def test_parse_query_cnn_north_dakota_coronavirus_1632793271():
verify_serp_parsing(
"https://web.archive.org/web/20210928014111id_/https://www.cnn.com/search?q=North+Dakota+coronavirus",
"cnn",
)
| 2,672
| 35.121622
| 498
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_etsy_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_etsy_embroidery_kit_1375805089():
verify_serp_parsing(
"https://web.archive.org/web/20130806160449id_/http://www.etsy.com/search?includes[]=tags&q=embroidery+kit",
"etsy",
)
def test_parse_query_etsy_sundress_1374472455():
verify_serp_parsing(
"https://web.archive.org/web/20130722055415id_/http://www.etsy.com:80/search?includes[]=tags&q=Sundress",
"etsy",
)
def test_parse_query_etsy_storage_and_organization_1632881953():
verify_serp_parsing(
"https://web.archive.org/web/20210929021913id_/https://www.etsy.com/search?q=storage+and+organization&mosv=sese&moci=1040853552759&mosi=1027452046072&ref=hp_bubbles_Fall21_US_GB_CA_FR_DE&anchor_listing_id=830217119",
"etsy",
)
def test_parse_query_etsy_andrew_kim_1566153959():
verify_serp_parsing(
"https://web.archive.org/web/20190818184559id_/https://www.etsy.com/search?q=Andrew+Kim&explicit=1&order=date_desc",
"etsy",
)
def test_parse_query_etsy_pendants_1667648711():
verify_serp_parsing(
"https://web.archive.org/web/20221105114511id_/https://www.etsy.com/search?locale_override=USD%7Cen-US%7CUS&explicit=1&q=pendants&anchor_listing_id=200491601&gf_type=interest",
"etsy",
)
def test_parse_query_etsy_pacifier_1336437253():
verify_serp_parsing(
"https://web.archive.org/web/20120508003413id_/http://www.etsy.com:80/search?includes[]=tags&q=pacifier",
"etsy",
)
def test_parse_query_etsy_invitation_1367234841():
verify_serp_parsing(
"https://web.archive.org/web/20130429112721id_/http://www.etsy.com/search?q=invitation&ref=exp_listing",
"etsy",
)
def test_parse_query_etsy_yan_dong_kuai_le_shi_fen_shou_ji_ban_jia_wei_xin_xw639_2018_1537911471():
verify_serp_parsing(
"https://web.archive.org/web/20180925213751id_/https://www.etsy.com/search?q=%E5%B9%BF%E4%B8%9C%E5%BF%AB%E4%B9%90%E5%8D%81%E5%88%86%E6%89%8B%E6%9C%BA%E7%89%88%E5%8A%A0%E5%BE%AE%E3%80%85%E4%BF%A1xw639-2018%E2%99%80",
"etsy",
)
def test_parse_query_etsy_hard_plastic_1333819223():
verify_serp_parsing(
"https://web.archive.org/web/20120407172023id_/http://www.etsy.com:80/search?includes[]=materials&q=hard+plastic",
"etsy",
)
def test_parse_query_etsy_small_animal_1378528595():
verify_serp_parsing(
"https://web.archive.org/web/20130907043635id_/http://www.etsy.com/search?includes[]=tags&q=Small+Animal",
"etsy",
)
| 2,661
| 34.972973
| 224
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_imgur_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_imgur_search_term_string_1565643838():
verify_serp_parsing(
"https://web.archive.org/web/20190812210358id_/https://imgur.com/search?q={search_term_string}",
"imgur",
)
def test_parse_query_imgur_search_term_string_1547858079():
verify_serp_parsing(
"https://web.archive.org/web/20190119003439id_/https://imgur.com/search?q={search_term_string}",
"imgur",
)
| 571
| 30.777778
| 104
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_roblox_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_roblox_survival_1656759229():
verify_serp_parsing(
"https://web.archive.org/web/20220702105349id_/https://www.roblox.com/discover/?Keyword=survival",
"roblox",
)
def test_parse_query_roblox_survive_the_killer_1640549700():
verify_serp_parsing(
"https://web.archive.org/web/20211226201500id_/https://www.roblox.com/discover/?Keyword=survive+the+killer",
"roblox",
)
def test_parse_query_roblox_chernobyl_rp_1666555966():
verify_serp_parsing(
"https://web.archive.org/web/20221023201246id_/https://www.roblox.com/discover/?Keyword=chernobyl%20rp",
"roblox",
)
def test_parse_query_roblox_shorts_1357668619():
verify_serp_parsing(
"https://web.archive.org/web/20130108181019id_/http://www.roblox.com:80/catalog/browse.aspx?Subcategory=13&Keyword=shorts&LegendExpanded=true&Category=3",
"roblox",
)
def test_parse_query_roblox_xo_so_88_gg8_run_1659417607():
verify_serp_parsing(
"https://web.archive.org/web/20220802052007id_/https://www.roblox.com/discover/?Keyword=x%E1%BB%95%20s%E1%BB%91%2088%20GG8.Run",
"roblox",
)
def test_parse_query_roblox_welcome_to_the_town_of_robloxia_uncopylocked_1660007534():
verify_serp_parsing(
"https://web.archive.org/web/20220809011214id_/https://www.roblox.com/discover/?Keyword=welcome%20to%20the%20town%20of%20robloxia%20uncopylocked",
"roblox",
)
def test_parse_query_roblox_build_to_survive_black_people_1663785067():
verify_serp_parsing(
"https://web.archive.org/web/20220921183107id_/https://www.roblox.com/discover/?Keyword=build%20to%20survive%20black%20people",
"roblox",
)
def test_parse_query_roblox_cut_1633682743():
verify_serp_parsing(
"https://web.archive.org/web/20211008084543id_/https://www.roblox.com/discover/?Keyword=cut",
"roblox",
)
def test_parse_query_roblox_znation8000_1650350926():
verify_serp_parsing(
"https://web.archive.org/web/20220419064846id_/https://www.roblox.com/discover/?Keyword=znation8000",
"roblox",
)
def test_parse_query_roblox_soviet_union_1657601847():
verify_serp_parsing(
"https://web.archive.org/web/20220712045727id_/https://www.roblox.com/discover/?Keyword=soviet+union",
"roblox",
)
| 2,491
| 32.675676
| 162
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_imdb_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_imdb_pulse_1283006912():
verify_serp_parsing(
"https://web.archive.org/web/20100828144832id_/http://www.imdb.com:80/find?s=all&q=Pulse",
"imdb",
)
def test_parse_query_imdb_0609265_s_nm_1329020836():
verify_serp_parsing(
"https://web.archive.org/web/20120212042716id_/http://www.imdb.com:80/find?q=0609265;s=nm",
"imdb",
)
def test_parse_query_imdb_angelina_jolie_1452700725():
verify_serp_parsing(
"https://web.archive.org/web/20160113155845id_/http://www.imdb.com:80/find?ref_=nv_sr_fn&q=Angelina+Jolie&s=all",
"imdb",
)
def test_parse_query_imdb_marcela_gomez_montoya_1614546944():
verify_serp_parsing(
"https://web.archive.org/web/20210228211544id_/http://www.imdb.com/find?q=Marcela%20G%C3%B3mez%20Montoya&exact=true",
"imdb",
)
def test_parse_query_imdb_murder_world_1268209692():
verify_serp_parsing(
"https://web.archive.org/web/20100310082812id_/http://www.imdb.com:80/find?s=all&q=murder+world",
"imdb",
)
def test_parse_query_imdb_gundula_rapsch_1628094679():
verify_serp_parsing(
"https://web.archive.org/web/20210804163119id_/https://www.imdb.com/find?q=Gundula+Rapsch&s=nm",
"imdb",
)
def test_parse_query_imdb_sam_claflin_1472223834():
verify_serp_parsing(
"https://web.archive.org/web/20160826150354id_/http://www.imdb.com:80/find?q=Sam%20Claflin&s=nm",
"imdb",
)
def test_parse_query_imdb_the_expanse_1521743964():
verify_serp_parsing(
"https://web.archive.org/web/20180322183924id_/http://www.imdb.com:80/find?s=all&q=the+expanse",
"imdb",
)
def test_parse_query_imdb_dogville_1187303706():
verify_serp_parsing(
"https://web.archive.org/web/20070816223506id_/http://www.imdb.com:80/find?s=all&q=dogville",
"imdb",
)
def test_parse_query_imdb_hunger_games_1518585690():
verify_serp_parsing(
"https://web.archive.org/web/20180214052130id_/http://www.imdb.com:80/find?q=hunger+games&s=tt&ref_=fn_tt",
"imdb",
)
| 2,248
| 29.391892
| 125
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/__init__.py
| 0
| 0
| 0
|
py
|
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_manual_youtube_serp_parsing.py
|
# flake8: noqa
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_chaoz_time_search():
verify_serp_parsing(
'https://web.archive.org/web/20220510040811id_/https://www.youtube.com/results?search_query=%21%21%21Chaoz+time%21%21%21',
'youtube'
)
| 301
| 29.2
| 130
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_twitter_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_twitter_rabble_ca_lang_ar_1442999332():
verify_serp_parsing(
"https://web.archive.org/web/20150923090852id_/https://mobile.twitter.com/search?vertical=default&near=me&q=rabble.ca%3Flang%3Dar&locale=ro",
"twitter",
)
def test_parse_query_twitter_rabble_ca_lang_fr_lang_id_1443046297():
verify_serp_parsing(
"https://web.archive.org/web/20150923221137id_/https://twitter.com/search?q=rabble.ca%3Flang%3Dfr%3Flang%3Did&f=videos&vertical=default&lang=sv",
"twitter",
)
def test_parse_query_twitter_lauramajor_1652839406():
verify_serp_parsing(
"https://web.archive.org/web/20220518020326id_/https://twitter.com/search?q=lauramajor&lang=no",
"twitter",
)
def test_parse_query_twitter_ecotourism_1557315321():
verify_serp_parsing(
"https://web.archive.org/web/20190508113521id_/https://twitter.com/search?q=%23Ecotourism",
"twitter",
)
def test_parse_query_twitter_freemariabutina_1563062689():
verify_serp_parsing(
"https://web.archive.org/web/20190714000449id_/https://twitter.com/search?q=%23FreeMariaButina&l=is&vertical=default&lang=mr",
"twitter",
)
def test_parse_query_twitter_http_shop_pre_com_corona02_p_409033_1648579256():
verify_serp_parsing(
"https://web.archive.org/web/20220329184056id_/https://mobile.twitter.com/search?q=http%3A%2F%2Fshop-pre.com%2Fcorona02%2F%3Fp%3D409033&lang=kn",
"twitter",
)
def test_parse_query_twitter_clubs_lang_sr_1591413117():
verify_serp_parsing(
"https://web.archive.org/web/20200606031157id_/https://twitter.com/search?q=clubs%3Flang%3Dsr&lang=en",
"twitter",
)
def test_parse_query_twitter_freemariabutina_1555124003():
verify_serp_parsing(
"https://web.archive.org/web/20190413025323id_/https://mobile.twitter.com/search?vertical=default&locale=es&q=%23FreeMariaButina&l=ja",
"twitter",
)
def test_parse_query_twitter_freemariabutina_1562498575():
verify_serp_parsing(
"https://web.archive.org/web/20190707112255id_/https://mobile.twitter.com/search?vertical=default&locale=id&q=%23FreeMariaButina&f=tweets",
"twitter",
)
def test_parse_query_twitter_campaigns_1481768285():
verify_serp_parsing(
"https://web.archive.org/web/20161215021805id_/https://twitter.com/search?q=campaigns&lang=ko",
"twitter",
)
| 2,571
| 33.756757
| 153
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_stackoverflow_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_stackoverflow_objective_c_1354546520():
verify_serp_parsing(
"https://web.archive.org/web/20121203145520id_/http://stackoverflow.com/questions/tagged/objective-c",
"stackoverflow",
)
def test_parse_query_stackoverflow_scala_1355718931():
verify_serp_parsing(
"https://web.archive.org/web/20121217043531id_/http://stackoverflow.com/questions/tagged/scala",
"stackoverflow",
)
def test_parse_query_stackoverflow_remote_execution_1645839151():
verify_serp_parsing(
"https://web.archive.org/web/20220226013231id_/https://stackoverflow.com/questions/tagged/remote-execution",
"stackoverflow",
)
def test_parse_query_stackoverflow_text_mining_1522322354():
verify_serp_parsing(
"https://web.archive.org/web/20180329111914id_/https://stackoverflow.com/questions/tagged/text-mining",
"stackoverflow",
)
def test_parse_query_stackoverflow_vue_js_php_1647710017():
verify_serp_parsing(
"https://web.archive.org/web/20220319171337id_/https://stackoverflow.com/questions/tagged/vue.js+php",
"stackoverflow",
)
def test_parse_query_stackoverflow_ruby_on_rails_plugins_1613971498():
verify_serp_parsing(
"https://web.archive.org/web/20210222052458id_/https://stackoverflow.com/questions/tagged/ruby-on-rails-plugins",
"stackoverflow",
)
def test_parse_query_stackoverflow_ruby_1_9_3_heroku_1398351399():
verify_serp_parsing(
"https://web.archive.org/web/20140424145639id_/http://stackoverflow.com/questions/tagged/ruby-1.9.3+heroku",
"stackoverflow",
)
def test_parse_query_stackoverflow_fonts_swing_jtextpane_1412916125():
verify_serp_parsing(
"https://web.archive.org/web/20141010044205id_/http://stackoverflow.com/questions/tagged/fonts+swing+jtextpane",
"stackoverflow",
)
def test_parse_query_stackoverflow_xampp_java_1547581001():
verify_serp_parsing(
"https://web.archive.org/web/20190115193641id_/https://stackoverflow.com/questions/tagged/xampp+java",
"stackoverflow",
)
def test_parse_query_stackoverflow_numpy_einsum_1516773912():
verify_serp_parsing(
"https://web.archive.org/web/20180124060512id_/https://stackoverflow.com/questions/tagged/numpy-einsum",
"stackoverflow",
)
| 2,495
| 32.72973
| 121
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_espn_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_espn_ball_state_1619440827():
verify_serp_parsing(
"https://web.archive.org/web/20210426124027id_/https://www.espn.com/search/_/q/Ball%20State/o/watch",
"espn",
)
def test_parse_query_espn_ball_state_1615730301():
verify_serp_parsing(
"https://web.archive.org/web/20210314135821id_/https://www.espn.com/search/_/q/Ball%20State/o/watch",
"espn",
)
| 561
| 30.222222
| 109
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_tribunnews_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_tribunnews_peringatan_dini_bmkg_kamis_18_februari_2021_1632679908():
verify_serp_parsing(
"https://web.archive.org/web/20210926181148id_/https://www.tribunnews.com/search?q=peringatan+dini+bmkg+kamis+18+februari+2021",
"tribunnews",
)
def test_parse_query_tribunnews_search_term_string_1607106977():
verify_serp_parsing(
"https://web.archive.org/web/20201204183617id_/https://www.tribunnews.com/search?q={search_term_string}",
"tribunnews",
)
def test_parse_query_tribunnews_search_term_string_1607225592():
verify_serp_parsing(
"https://web.archive.org/web/20201206033312id_/https://www.tribunnews.com/search?q={search_term_string}",
"tribunnews",
)
def test_parse_query_tribunnews_search_term_string_1607224881():
verify_serp_parsing(
"https://web.archive.org/web/20201206032121id_/https://www.tribunnews.com/search?q={search_term_string}",
"tribunnews",
)
def test_parse_query_tribunnews_search_term_string_1607130238():
verify_serp_parsing(
"https://web.archive.org/web/20201205010358id_/https://www.tribunnews.com/search?q={search_term_string}",
"tribunnews",
)
def test_parse_query_tribunnews_polsek_tallo_1663047668():
verify_serp_parsing(
"https://web.archive.org/web/20220913054108id_/http://www.tribunnews.com/search?q=polsek+tallo",
"tribunnews",
)
def test_parse_query_tribunnews_ricky_natapradja_1663049121():
verify_serp_parsing(
"https://web.archive.org/web/20220913060521id_/http://www.tribunnews.com/search?q=ricky+natapradja",
"tribunnews",
)
def test_parse_query_tribunnews_pilgub_dki_jakarta_2017_1491952670():
verify_serp_parsing(
"https://web.archive.org/web/20170411231750id_/http://www.tribunnews.com:80/search?q=pilgub+dki+jakarta+2017",
"tribunnews",
)
def test_parse_query_tribunnews_search_term_string_1607120098():
verify_serp_parsing(
"https://web.archive.org/web/20201204221458id_/https://www.tribunnews.com/search?q={search_term_string}",
"tribunnews",
)
def test_parse_query_tribunnews_linkid_js_1533168227():
verify_serp_parsing(
"https://web.archive.org/web/20180802000347id_/http://www.tribunnews.com/search?q=linkid.js",
"tribunnews",
)
| 2,496
| 32.743243
| 136
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_facebook_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_facebook_vanilla_1481832838():
verify_serp_parsing(
"https://web.archive.org/web/20161215201358id_/https://www.facebook.com/search/photos/?q=%23vanilla&ref=top_filter",
"facebook",
)
def test_parse_query_facebook_virpi_soikkeli_1623257178():
verify_serp_parsing(
"https://web.archive.org/web/20210609164618id_/http://www.facebook.com/search/?q=virpi+soikkeli&o=2048&init=ffs",
"facebook",
)
def test_parse_query_facebook_deanna_sanchez_1629215596():
verify_serp_parsing(
"https://web.archive.org/web/20210817155316id_/https://www.facebook.com/search/web/direct_search.php?q=deanna+sanchez&dpr=1&ajaxpipe=1&ajaxpipe_token=AXivh35bR9s2xXXV&quickling%5Bversion%5D=3128950%3B0%3B&__user=100004323191030&__a=1&__dyn=5V4cjEzUGByC5A9UrEwlg94qbxqbAKGiyEyfirYw8ovyui9zob4q2i5UK3u2CEaUZ1ebkwy6UnGieKcVrDG4XzEa8iGt0gKum4UpKqqbAWCDxi5UWfz8gAxu1iyECQum2m4oqyU9omUmC-Wx2vgqx-Eth8gUKElCUmyE8XDh45EgAwzCwYyrK4rGUohES-9yaBy8CEO784afxK9yUvy8lUGaHCG2C&__af=j0&__req=jsonp_4&__be=0&__pc=PHASED%3ADEFAULT&__rev=3128950&__spin_r=3128950&__spin_b=trunk&__spin_t=1498867474&__adt=4",
"facebook",
)
def test_parse_query_facebook_mr_robot_1469187052():
verify_serp_parsing(
"https://web.archive.org/web/20160722113052id_/https://www.facebook.com/search/top/?q=mr%20robot",
"facebook",
)
def test_parse_query_facebook_https_peelarchivesblog_com_about_peel_1599241783():
verify_serp_parsing(
"https://web.archive.org/web/20200904174943id_/https://www.facebook.com/search/top?q=https%3A%2F%2Fpeelarchivesblog.com%2Fabout-peel%2F",
"facebook",
)
def test_parse_query_facebook_trumptrain_1461904486():
verify_serp_parsing(
"https://web.archive.org/web/20160429043446id_/https://www.facebook.com/search/top/?q=%23TrumpTrain&ref=top_filter&_fb_noscript=1",
"facebook",
)
def test_parse_query_facebook_bernieorbust_1467812085():
verify_serp_parsing(
"https://web.archive.org/web/20160706133445id_/https://www.facebook.com/search/latest/?q=%23BernieOrBust&ref=top_filter",
"facebook",
)
def test_parse_query_facebook_wisconsin_1463064570():
verify_serp_parsing(
"https://web.archive.org/web/20160512144930id_/https://www.facebook.com/search/top/?q=wisconsin&_fb_noscript=1",
"facebook",
)
def test_parse_query_facebook_alda_lesbiennes_refugiees_1615284371():
verify_serp_parsing(
"https://web.archive.org/web/20210309100611id_/https://www.facebook.com/search/top/?q=alda%20-%20lesbiennes%20r%C3%A9fugi%C3%A9es&epa=SEARCH_BOX",
"facebook",
)
def test_parse_query_facebook_mens_health_survival_of_the_fittest_1619473718():
verify_serp_parsing(
"https://web.archive.org/web/20210426214838id_/http://www.facebook.com/search/?q=mens+health+survival+of+the+fittest&init=quick",
"facebook",
)
def test_parse_query_facebook_www_9xcb_biz_webex_setup_was_unsuccessful_error_23_1404412853():
verify_serp_parsing(
"https://web.archive.org/web/20140703184053id_/http://www.facebook.com/search.php?q=www.9xcb.biz/?WebEx+Setup+Was+Unsuccessful+Error+23",
"facebook",
)
def test_parse_query_facebook_tag_someone_who_needs_this_1587554575():
verify_serp_parsing(
"https://web.archive.org/web/20200422112255id_/https://www.facebook.com/search/videos/?q=tag%20someone%20who%20needs%20this&epa=FILTERS&filters=eyJycF9jcmVhdGlvbl90aW1lIjoie1wibmFtZVwiOlwiY3JlYXRpb25fdGltZVwiLFwiYXJnc1wiOlwie1xcXCJzdGFydF95ZWFyXFxcIjpcXFwiMjAyMFxcXCIsXFxcInN0YXJ0X21vbnRoXFxcIjpcXFwiMjAyMC0wNFxcXCIsXFxcImVuZF95ZWFyXFxcIjpcXFwiMjAyMFxcXCIsXFxcImVuZF9tb250aFxcXCI6XFxcIjIwMjAtMDRcXFwiLFxcXCJzdGFydF9kYXlcXFwiOlxcXCIyMDIwLTA0LTIwXFxcIixcXFwiZW5kX2RheVxcXCI6XFxcIjIwMjAtMDQtMjZcXFwifVwifSJ9",
"facebook",
)
def test_parse_query_facebook_blog_post_334_bootload_1567494170():
verify_serp_parsing(
"https://web.archive.org/web/20190903070250id_/https://developers.facebook.com/search/?q=blog+post+334+bootload¬found=0&search_filter_option=docs",
"facebook",
)
def test_parse_query_facebook_rosy_20gupta_1494524363():
verify_serp_parsing(
"https://web.archive.org/web/20170511173923id_/https://www.facebook.com/search/top/?q=rosy%2520gupta",
"facebook",
)
def test_parse_query_facebook_social_plugins_boutons_jaime_envoyer_partager_et_citations_js_exec_je31_1567485463():
verify_serp_parsing(
"https://web.archive.org/web/20190903043743id_/https://developers.facebook.com/search/?q=social+plugins+boutons+jaime+envoyer+partager+et+citations+js+exec+Je31¬found=1&search_filter_option=docs",
"facebook",
)
def test_parse_query_facebook_greet_1623235952():
verify_serp_parsing(
"https://web.archive.org/web/20210609105232id_/http://www.facebook.com/search/?q=Greet&o=2048&init=ffs",
"facebook",
)
def test_parse_query_facebook_cruzcrew_1459272010():
verify_serp_parsing(
"https://web.archive.org/web/20160329172010id_/https://www.facebook.com/search/photos/?q=%23CruzCrew&ref=top_filter&_fb_noscript=1",
"facebook",
)
def test_parse_query_facebook_ineligible_1466870871():
verify_serp_parsing(
"https://web.archive.org/web/20160625160751id_/https://www.facebook.com/search/people/?q=%23ineligible&ref=top_filter",
"facebook",
)
def test_parse_query_facebook_solcellespecialisten_1389488036():
verify_serp_parsing(
"https://web.archive.org/web/20140112005356id_/http://da-dk.facebook.com/search.php?q=Solcellespecialisten&_fb_noscript=1",
"facebook",
)
def test_parse_query_facebook_blog_post_319_je31_1567459151():
verify_serp_parsing(
"https://web.archive.org/web/20190902211911id_/https://developers.facebook.com/search/?q=blog+post+319+Je31¬found=1&search_filter_option=news",
"facebook",
)
| 6,107
| 41.416667
| 596
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_bing_serp_parsing.py
|
# flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_bing_uscis_forms_400_1486690408():
verify_serp_parsing(
"https://web.archive.org/web/20170210013328id_/http://www.bing.com/images/search?q=USCIS+Forms+400&FORM=RESTAB",
"bing",
)
def test_parse_query_bing_regional_asia_taiwan_localities_taichung_city_1580952978():
verify_serp_parsing(
"https://web.archive.org/web/20200206013618id_/https://www.bing.com/search?q=Regional+Asia+Taiwan+Localities+Taichung+City",
"bing",
)
def test_parse_query_bing_intel_i7_chip_1656625238():
verify_serp_parsing(
"https://web.archive.org/web/20220630214038id_/https://www.bing.com/search?q=intel+i7+chip&lvl=1&FORM=PMETIS&filters=ans%3A%22cvns%22+level%3A%220%22+mcid%3A%229363152adf5f441aa0dd1d28d3cf21f2%22&idx=0",
"bing",
)
def test_parse_query_bing_rpg_title_screen_1656104004():
verify_serp_parsing(
"https://web.archive.org/web/20220624205324id_/https://www.bing.com/images/search?q=RPG+Title+Screen&FORM=IRMHRS",
"bing",
)
def test_parse_query_bing_win10_iso_xia_zai_1580349240():
verify_serp_parsing(
"https://web.archive.org/web/20200130015400id_/https://www.bing.com/search?q=win10+iso+%E4%B8%8B%E8%BD%BD&filters=ex1%3A%22ez1%22&qpvt=win10+iso+%E4%B8%8B%E8%BD%BD",
"bing",
)
def test_parse_query_bing_great_blue_heron_sound_1660552923():
verify_serp_parsing(
"https://web.archive.org/web/20220815084203id_/https://www.bing.com/search?q=great+blue+heron+sound&FORM=QSRE1",
"bing",
)
def test_parse_query_bing_wonder_kids_beginning_1652132287():
verify_serp_parsing(
"https://web.archive.org/web/20220509213807id_/https://www.bing.com/search?q=wonder+kids+beginning&pc=0TRO&ptag=C1N6612A70619013F5&form=CONBNT&conlogo=CT3210127",
"bing",
)
def test_parse_query_bing_mesozoic_wikipedia_1643229373():
verify_serp_parsing(
"https://web.archive.org/web/20220126203613id_/https://www.bing.com/search?q=Mesozoic+wikipedia&form=WIKIRE",
"bing",
)
def test_parse_query_bing_kumolife_1388896361():
verify_serp_parsing(
"https://web.archive.org/web/20140105043241id_/http://www.bing.com/search?q=kumolife&form=MSSRPD",
"bing",
)
def test_parse_query_bing_florence_caillon_bandcamp_1647897680():
verify_serp_parsing(
"https://web.archive.org/web/20220321212120id_/https://www.bing.com/search?q=Florence+Caillon+bandcamp&qs=n&form=QBRE&=Search+%7B0%7D+for+%7B1%7D&=Search+work+for+%7B0%7D&=%25eManage+Your+Search+History%25E&sp=-1&pq=florence+caillon+bandcamp&sc=1-25&sk=&cvid=E5A3CCFC449440CABF3E217E535C5DD6",
"bing",
)
| 2,823
| 37.162162
| 301
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.