from transformers import DataCollatorForSeq2Seq,LogitsProcessor,LogitsProcessorList, AutoModelForCausalLM import torch class ConstrainedQwenModel(AutoModelForCausalLM): def generate(self, *args, **kwargs): if "logits_processor" not in kwargs: kwargs["logits_processor"] = LogitsProcessorList() kwargs["logits_processor"].append( LogitsProcessor(self.tokenizer) ) return super().generate(*args, **kwargs) if __name__ == '__main__': pass