File size: 1,459 Bytes
50af094
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification

class VibeCheckInference:
    def __init__(self, model_path):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        self.model = AutoModelForSequenceClassification.from_pretrained(model_path).to(self.device)
        self.model.eval()

    def analyze(self, text):
        inputs = self.tokenizer(
            text, 
            return_tensors="pt", 
            padding=True, 
            truncation=True, 
            max_length=128
        ).to(self.device)

        with torch.no_grad():
            outputs = self.model(**inputs)
            probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
            conf, pred = torch.max(probs, dim=-1)

        result = "POSITIVE" if pred.item() == 1 else "NEGATIVE"
        return {
            "text": text,
            "label": result,
            "confidence": f"{conf.item() * 100:.2f}%"
        }

# Usage
if __name__ == "__main__":
    # Point this to your unzipped folder
    engine = VibeCheckInference("./VibeCheck_v1_HF")
    
    sample = "Did you see the new movie?' B: 'Yeah, it was okay, but the ending felt a bit rushed.' A: 'I totally agree, it could have been better.'"
    prediction = engine.analyze(sample)
    print(f"Result: {prediction['label']} | Confidence: {prediction['confidence']}")