File size: 442 Bytes
f9e119d | 1 2 3 4 5 6 7 8 | from transformers.models.perception_lm.modeling_perception_lm import PerceptionLMForConditionalGeneration
from .int4_quant import quantize_model_to_int4
class PerceptionLMForConditionalGenerationInt4(PerceptionLMForConditionalGeneration):
def __init__(self, config):
super().__init__(config)
exclude = getattr(config, "int4_exclude_patterns", [])
quantize_model_to_int4(self, name_exclude_patterns=tuple(exclude))
|