|
|
import torch |
|
|
import seaborn as sns |
|
|
import matplotlib.pyplot as plt |
|
|
from matplotlib.colors import ListedColormap |
|
|
import torch |
|
|
|
|
|
|
|
|
def quantization_error(tensor, dequantized_tensor): |
|
|
return (dequantized_tensor - tensor).abs().square().mean() |
|
|
|
|
|
|
|
|
def linear_q_with_scale_and_zero_point( |
|
|
r_tensor, scale, zero_point, dtype=torch.int8): |
|
|
""" |
|
|
Performs simple linear quantization given |
|
|
the scale and zero-point. |
|
|
""" |
|
|
|
|
|
|
|
|
scaled_and_shifted_tensor = r_tensor / scale + zero_point |
|
|
|
|
|
|
|
|
rounded_tensor = torch.round(scaled_and_shifted_tensor) |
|
|
|
|
|
|
|
|
q_min, q_max = torch.iinfo(dtype).min, torch.iinfo(dtype).max |
|
|
q_tensor = rounded_tensor.clamp(q_min, q_max).to(dtype) |
|
|
return q_tensor |
|
|
|
|
|
|
|
|
def linear_dequantization(quantized_tensor, scale, zero_point): |
|
|
""" |
|
|
Linear de-quantization |
|
|
""" |
|
|
dequantized_tensor = scale * (quantized_tensor.float() - zero_point) |
|
|
|
|
|
return dequantized_tensor |
|
|
|
|
|
def plot_matrix(tensor, ax, title, vmin=0, vmax=1, cmap=None): |
|
|
""" |
|
|
Plot a heatmap of tensors using seaborn |
|
|
""" |
|
|
sns.heatmap(tensor.cpu().numpy(), ax=ax, vmin=vmin, vmax=vmax, cmap=cmap, annot=True, fmt=".2f", cbar=False) |
|
|
ax.set_title(title) |
|
|
ax.set_yticklabels([]) |
|
|
ax.set_xticklabels([]) |
|
|
|
|
|
|
|
|
def plot_quantization_errors(original_tensor, quantized_tensor, dequantized_tensor, dtype = torch.int8, n_bits = 8): |
|
|
""" |
|
|
A method that plots 4 matrices, the original tensor, the quantized tensor |
|
|
the de-quantized tensor and the error tensor. |
|
|
""" |
|
|
|
|
|
fig, axes = plt.subplots(1, 4, figsize=(15, 4)) |
|
|
|
|
|
|
|
|
plot_matrix(original_tensor, axes[0], 'Original Tensor', cmap=ListedColormap(['white'])) |
|
|
|
|
|
|
|
|
q_min, q_max = torch.iinfo(dtype).min, torch.iinfo(dtype).max |
|
|
plot_matrix(quantized_tensor, axes[1], f'{n_bits}-bit Linear Quantized Tensor', vmin=q_min, vmax=q_max, cmap='coolwarm') |
|
|
|
|
|
|
|
|
plot_matrix(dequantized_tensor, axes[2], 'Dequantized Tensor', cmap='coolwarm') |
|
|
|
|
|
|
|
|
q_error_tensor = abs(original_tensor - dequantized_tensor) |
|
|
plot_matrix(q_error_tensor, axes[3], 'Quantization Error Tensor', cmap=ListedColormap(['white'])) |
|
|
|
|
|
fig.tight_layout() |
|
|
plt.show() |
|
|
|
|
|
|
|
|
def get_q_scale_and_zero_point(r_tensor, dtype=torch.int8): |
|
|
""" |
|
|
Get quantization parameters (scale, zero point) |
|
|
for a floating point tensor |
|
|
""" |
|
|
q_min, q_max = torch.iinfo(dtype).min, torch.iinfo(dtype).max |
|
|
r_min, r_max = r_tensor.min().item(), r_tensor.max().item() |
|
|
|
|
|
scale = (r_max-r_min)/(q_max-q_min) |
|
|
|
|
|
zero_point = q_min-(r_min/scale) |
|
|
|
|
|
|
|
|
if zero_point < q_min or zero_point > q_max: |
|
|
zero_point = q_min |
|
|
else: |
|
|
|
|
|
zero_point = int(round(zero_point)) |
|
|
return scale, zero_point |
|
|
|
|
|
def linear_quantization(r_tensor, n_bits, dtype=torch.int8): |
|
|
""" |
|
|
linear quantization |
|
|
""" |
|
|
|
|
|
scale, zero_point = get_q_scale_and_zero_point(r_tensor) |
|
|
|
|
|
quantized_tensor = linear_q_with_scale_and_zero_point( |
|
|
r_tensor, scale=scale, zero_point=zero_point, dtype=dtype) |
|
|
|
|
|
return quantized_tensor, scale, zero_point |
|
|
|