mishrabp commited on
Commit
f506d9b
·
verified ·
1 Parent(s): 1f34088

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +68 -9
README.md CHANGED
@@ -44,16 +44,75 @@ The base model is **`FacebookAI/roberta-base`**, and the LoRA adapters have been
44
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
45
  import torch
46
 
47
- model = AutoModelForSequenceClassification.from_pretrained("mishrabp/roberta-sentiment-analysis-merged")
48
- tokenizer = AutoTokenizer.from_pretrained("mishrabp/roberta-sentiment-analysis-merged")
49
-
50
- text = "I really loved the new Batman movie!"
51
- inputs = tokenizer(text, return_tensors="pt")
52
- outputs = model(**inputs)
53
- label_id = torch.argmax(outputs.logits).item()
54
-
 
 
 
 
 
 
 
55
  id2label = {0: "NEGATIVE", 1: "POSITIVE"}
56
- print(id2label[label_id])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  ```
58
 
59
  ---
 
44
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
45
  import torch
46
 
47
+ # -----------------------------
48
+ # Load merged model and tokenizer
49
+ # -----------------------------
50
+ model_name = "mishrabp/roberta-sentiment-analysis-merged"
51
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
52
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
53
+
54
+ # Make sure model is on the correct device and in evaluation mode
55
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
56
+ model.to(device)
57
+ model.eval()
58
+
59
+ # -----------------------------
60
+ # Label mapping (same as training)
61
+ # -----------------------------
62
  id2label = {0: "NEGATIVE", 1: "POSITIVE"}
63
+
64
+ # -----------------------------
65
+ # Texts for validation
66
+ # -----------------------------
67
+ text_list = [
68
+ # Positive
69
+ "I loved the new Batman movie!",
70
+ "What an amazing experience!",
71
+ "The service was surprisingly good, even though the restaurant was packed.",
72
+ "Absolutely fantastic performance, though a bit too long for my taste.",
73
+ "The concert had incredible energy, yet the sound quality was pleasing.",
74
+ "The dessert was delightful.",
75
+ "I loved the artwork.",
76
+ "The new phone works well.",
77
+ "The flight was smooth.",
78
+ "I was thrilled by the surprise party.",
79
+
80
+ # Negative
81
+ "The food at that restaurant was terrible.",
82
+ "I will never go back to that place again.",
83
+ "I was disappointed that my favorite dish was sold out.",
84
+ "The book was thrilling at first, but the ending left me mindblowing.", # could be positive/negative, marking negative
85
+ "The hotel room looked nothing like the photos online, but the staff were friendly.", # neutral → marking negative
86
+ "The movie had stunning visuals, but the plot was overly predictable.",
87
+ "The customer support solved my issue quickly, though I had to wait on hold for a long time.",
88
+ "I appreciated the thoughtful gift, but the packaging was damaged upon delivery."
89
+ ]
90
+
91
+ # -----------------------------
92
+ # Inference
93
+ # -----------------------------
94
+ # Tokenize all texts as a batch (avoids inconsistencies and is faster)
95
+ inputs = tokenizer(
96
+ text_list,
97
+ return_tensors="pt",
98
+ truncation=True,
99
+ padding=True,
100
+ max_length=512
101
+ )
102
+
103
+ # Move all inputs to device
104
+ inputs = {k: v.to(device) for k, v in inputs.items()}
105
+
106
+ # Run inference
107
+ with torch.no_grad():
108
+ outputs = model(**inputs)
109
+ predictions = torch.argmax(outputs.logits, dim=-1)
110
+
111
+ # Print results
112
+ for text, pred in zip(text_list, predictions):
113
+ print(f"Text: {text}")
114
+ print(f"Predicted Sentiment: {id2label[pred.item()]}")
115
+ print("-" * 50)
116
  ```
117
 
118
  ---