| | from transformers import AutoModelForSequenceClassification, AutoTokenizer |
| | import torch |
| |
|
| | |
| | class MultilabelPipeline: |
| | def init(self, model_name): |
| | self.model = AutoModelForSequenceClassification.from_pretrained(model_name) |
| | self.tokenizer = AutoTokenizer.from_pretrained(model_name) |
| |
|
| | def call(self, input_text): |
| | inputs = self.tokenizer(input_text, return_tensors="pt") |
| | with torch.no_grad(): |
| | outputs = self.model(**inputs) |
| | logits = outputs.logits |
| |
|
| | |
| | probabilities = torch.sigmoid(logits) |
| |
|
| | return probabilities.tolist() |
| |
|
| | |
| | pipe = MultilabelPipeline("TheStrangerOne/gemma-2-9b-it-bnb-4bit-lora-multilabel") |
| |
|
| | |
| | probs = pipe("Your input prompt here") |
| | print("Probabilities:", probs) |