swsuws's picture
Upload 200 files
0c08f5a verified
raw
history blame
3.55 kB
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
# colors for visualization
COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
[0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]
def plot_results(model, pil_img, results):
plt.figure(figsize=(16,10))
plt.imshow(pil_img)
ax = plt.gca()
scores, labels, boxes = results["scores"], results["labels"], results["boxes"]
colors = COLORS * 100
for score, label, (xmin, ymin, xmax, ymax),c in zip(scores.tolist(), labels.tolist(), boxes.tolist(), colors):
ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
fill=False, color=c, linewidth=3))
text = f'{model.config.id2label[label]}: {score:0.2f}'
ax.text(xmin, ymin, text, fontsize=15,
bbox=dict(facecolor='yellow', alpha=0.5))
plt.axis('off')
plt.show()
############# From the previous lesson(s) of "Building your own Quantizer"
def w8_a16_forward(weight, input, scales, bias=None):
casted_weights = weight.to(input.dtype)
output = F.linear(input, casted_weights) * scales
if bias is not None:
output = output + bias
return output
class W8A16LinearLayer(nn.Module):
def __init__(self, in_features, out_features,
bias=True, dtype=torch.float32):
super().__init__()
self.register_buffer(
"int8_weights",
torch.randint(
-128, 127, (out_features, in_features), dtype=torch.int8
)
)
self.register_buffer("scales",
torch.randn((out_features), dtype=dtype))
if bias:
self.register_buffer("bias",
torch.randn((1, out_features),
dtype=dtype))
else:
self.bias = None
def quantize(self, weights):
w_fp32 = weights.clone().to(torch.float32)
scales = w_fp32.abs().max(dim=-1).values / 127
scales = scales.to(weights.dtype)
int8_weights = torch.round(weights
/scales.unsqueeze(1)).to(torch.int8)
self.int8_weights = int8_weights
self.scales = scales
def forward(self, input):
return w8_a16_forward(self.int8_weights,
input, self.scales, self.bias)
def replace_linear_with_target_and_quantize(module,
target_class, module_name_to_exclude):
for name, child in module.named_children():
if isinstance(child, nn.Linear) and not \
any([x == name for x in module_name_to_exclude]):
old_bias = child.bias
old_weight = child.weight
new_module = target_class(child.in_features,
child.out_features,
old_bias is not None,
child.weight.dtype)
setattr(module, name, new_module)
getattr(module, name).quantize(old_weight)
if old_bias is not None:
getattr(module, name).bias = old_bias
else:
# Recursively call the function for nested modules
replace_linear_with_target_and_quantize(child,
target_class, module_name_to_exclude)
###################################