opinder2906 commited on
Commit
6afd016
Β·
verified Β·
1 Parent(s): 488d680

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -102
app.py CHANGED
@@ -1,104 +1,18 @@
1
  # app.py
2
 
3
- import streamlit as st
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
- import json
8
- import pickle
9
-
10
- # ─── Model definitions ─────────────────────────────────────────────────────────
11
-
12
- class PositionalEncoding(nn.Module):
13
- def __init__(self, d_model, max_len=32):
14
- super().__init__()
15
- pe = torch.zeros(max_len, d_model)
16
- position = torch.arange(0, max_len).unsqueeze(1)
17
- div_term = torch.exp(torch.arange(0, d_model, 2) * (-torch.log(torch.tensor(10000.0)) / d_model))
18
- pe[:, 0::2] = torch.sin(position * div_term)
19
- pe[:, 1::2] = torch.cos(position * div_term)
20
- self.pe = pe.unsqueeze(0)
21
- def forward(self, x):
22
- return x + self.pe[:, : x.size(1)].to(x.device)
23
-
24
- class EmotionTransformer(nn.Module):
25
- def __init__(self, vocab_size, embed_dim, num_heads, num_classes, pad_idx, max_len=32):
26
- super().__init__()
27
- self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=pad_idx)
28
- self.pos_enc = PositionalEncoding(embed_dim, max_len)
29
- layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=num_heads, batch_first=True)
30
- self.trans_enc = nn.TransformerEncoder(layer, num_layers=2)
31
- self.dropout = nn.Dropout(0.3)
32
- self.fc = nn.Linear(embed_dim, num_classes)
33
- def forward(self, x):
34
- mask = x == pad_idx
35
- x = self.embedding(x)
36
- x = self.pos_enc(x)
37
- x = self.trans_enc(x, src_key_padding_mask=mask)
38
- x = self.dropout(x.mean(dim=1))
39
- return self.fc(x)
40
-
41
- # ─── Load artifacts ────────────────────────────────────────────────────────────
42
-
43
- @st.cache(allow_output_mutation=True)
44
- def load_model_and_artifacts():
45
- # load vocab and label encoder
46
- with open("vocab.json", "r") as vf:
47
- vocab = json.load(vf)
48
- with open("label_encoder.pkl", "rb") as lf:
49
- le = pickle.load(lf)
50
- # instantiate & load model
51
- pad_idx = vocab.get("<PAD>", 0)
52
- num_classes = len(le.classes_)
53
- model = EmotionTransformer(
54
- vocab_size=len(vocab),
55
- embed_dim=64,
56
- num_heads=4,
57
- num_classes=num_classes,
58
- pad_idx=pad_idx,
59
- max_len=32,
60
- )
61
- state = torch.load("emotion_transformer_model.pth", map_location="cpu")
62
- model.load_state_dict(state)
63
- model.eval()
64
- return model, vocab, le
65
-
66
- model, vocab, le = load_model_and_artifacts()
67
-
68
- # ─── Preprocessing & Inference ─────────────────────────────────────────────────
69
-
70
- def predict_emotion(text: str):
71
- # simple clean + tokenize
72
- tokens = text.lower().split()
73
- ids = [vocab.get(w, vocab.get("<UNK>", 1)) for w in tokens]
74
- # pad/truncate
75
- if len(ids) < 32:
76
- ids = ids + [vocab.get("<PAD>", 0)] * (32 - len(ids))
77
- else:
78
- ids = ids[:32]
79
- x = torch.tensor([ids])
80
- with torch.no_grad():
81
- logits = model(x)
82
- probs = F.softmax(logits, dim=1)[0]
83
- idx = torch.argmax(probs).item()
84
- return le.inverse_transform([idx])[0]
85
-
86
- # ─── UI ────────────────────────────────────────────────────────────────────────
87
-
88
- st.set_page_config(page_title="EmotiBot Lite", layout="centered")
89
- st.title("🌿 EmotiBot (Lite Inference)")
90
-
91
- user_input = st.text_input("How are you feeling?")
92
-
93
- if user_input:
94
- emotion = predict_emotion(user_input)
95
- st.write(f"**Predicted emotion:** {emotion}")
96
-
97
- suggestions = {
98
- "sadness": "It’s okay to feel down sometimes. I’m here to support you.",
99
- "anger": "That must have been frustrating. Want to vent about it?",
100
- "love": "That’s beautiful to hear! What made you feel that way?",
101
- "happiness": "That's awesome! What’s bringing you joy today?",
102
- "neutral": "Got it. I’m here if you want to share more.",
103
- }
104
- st.write("**Advice:**", suggestions.get(emotion, "I’m here for you."))
 
1
  # app.py
2
 
3
+ from src.inference import predict
4
+ from src.responses import get_response
5
+
6
+ print("EmotiBot 🌿: Hi! How are you feeling today? (Type 'exit' to quit)")
7
+
8
+ while True:
9
+ user_raw = input("You: ").strip()
10
+ if user_raw.lower() in ["exit", "quit"]:
11
+ print("EmotiBot 🌿: Take care! I’m here whenever you want to talk.")
12
+ break
13
+
14
+ emotion = predict(user_raw)
15
+ reply, done = get_response(emotion, user_raw)
16
+ print(f"EmotiBot 🌿: {reply}")
17
+ if done:
18
+ break