gowdaman commited on
Commit
1757213
·
1 Parent(s): 03e3f3a
Files changed (8) hide show
  1. .env +14 -0
  2. .gitignore +28 -0
  3. ai/emotion.py +114 -0
  4. ai/sentiment.py +27 -0
  5. app.py +83 -3
  6. requirements.txt +54 -0
  7. routes/feedback.py +23 -0
  8. routes/index.py +8 -0
.env ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Project name
2
+ PROJECT_NAME="Sentiment Analysis for Lms"
3
+ # Project version
4
+ PROJECT_VERSION="0.1.0"
5
+ #mongoDB connection string
6
+ DATABASE_URL=mongodb://localhost:27017
7
+ # Database name
8
+ DATABASE_NAME="lms-saas"
9
+ # JWT algorithm
10
+ ALGORITHM="HS256"
11
+ # Access token expiration time in minutes
12
+ ACCESS_TOKEN_EXPIRE_MINUTES=60
13
+ # Secret key for JWT
14
+ SECRET_KEY="sdaskiupo9865r392hri97t9trbw86trp97wxwjyt9r876acp9eirba7t9aw"
.gitignore ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #fastapi gitignore
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ /**/__pycache__/
5
+ *.py[cod]
6
+ *.pyo
7
+ *.pyd
8
+ *.pdb
9
+ *.egg-info/
10
+ *.egg
11
+ *.whl
12
+
13
+ /.venv
14
+ /ai/models/*
15
+ # Distribution / packaging
16
+ .Python
17
+ build/
18
+ develop-eggs/
19
+ dist/
20
+ eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ wheelfile
28
+ .installed.cfg
ai/emotion.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoImageProcessor, AutoModelForImageClassification
3
+ from PIL import Image
4
+ import cv2
5
+ import torchvision.transforms as transforms
6
+ import os
7
+ import numpy as np
8
+
9
+ class Emotion:
10
+ def __init__(self, model_path):
11
+ self.model = AutoModelForImageClassification.from_pretrained(model_path)
12
+ self.model.eval()
13
+ self.id2label = self.model.config.id2label
14
+ self.transform = transforms.Compose([
15
+ transforms.Resize((224, 224)),
16
+ transforms.ToTensor(),
17
+ transforms.Normalize(mean=[0.5]*3, std=[0.5]*3)
18
+ ])
19
+ self.face_cascade = cv2.CascadeClassifier(
20
+ cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
21
+ )
22
+
23
+ def predict_from_frame(self, frame):
24
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
25
+ faces = self.face_cascade.detectMultiScale(
26
+ gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
27
+ )
28
+ if len(faces) > 0:
29
+ x, y, w, h = faces[0]
30
+ face_roi = frame[y:y+h, x:x+w]
31
+ img = Image.fromarray(cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB))
32
+ input_tensor = self.transform(img).unsqueeze(0)
33
+ with torch.no_grad():
34
+ outputs = self.model(input_tensor)
35
+ predicted_class = torch.argmax(outputs.logits, dim=1).item()
36
+ label = self.id2label[predicted_class]
37
+ return label, faces[0]
38
+ else:
39
+ return None, None
40
+
41
+ def predict_from_frame_bytes(self, frame_bytes):
42
+ # Convert bytes to numpy array
43
+ nparr = np.frombuffer(frame_bytes, np.uint8)
44
+ # Decode image (assuming JPEG/PNG)
45
+ img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
46
+ if img is None:
47
+ raise ValueError("Could not decode image from bytes")
48
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
49
+ faces = self.face_cascade.detectMultiScale(
50
+ gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
51
+ )
52
+ if len(faces) > 0:
53
+ x, y, w, h = faces[0]
54
+ face_roi = img[y:y+h, x:x+w]
55
+ img = Image.fromarray(cv2.cvtColor(face_roi, cv2.COLOR_BGR2RGB))
56
+ input_tensor = self.transform(img).unsqueeze(0)
57
+ with torch.no_grad():
58
+ outputs = self.model(input_tensor)
59
+ predicted_class = torch.argmax(outputs.logits, dim=1).item()
60
+ label = self.id2label[predicted_class]
61
+ return label, faces[0]
62
+ else:
63
+ return None, None
64
+
65
+ model_path = os.path.join(os.path.dirname(__file__), "models", "emotion")
66
+ emotion_model = Emotion(model_path)
67
+
68
+ def webcam_demo(emotion_model):
69
+ cap = cv2.VideoCapture(0)
70
+ print("Press 'q' to quit")
71
+ while True:
72
+ ret, frame = cap.read()
73
+ if not ret:
74
+ break
75
+ label, face = emotion_model.predict_from_frame(frame)
76
+ if label:
77
+ cv2.putText(frame, f"Prediction: {label}", (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
78
+ else:
79
+ cv2.putText(frame, "Not attending the class", (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
80
+ cv2.imshow("Webcam - Expression Classification", frame)
81
+ if cv2.waitKey(1) & 0xFF == ord('q'):
82
+ break
83
+ cap.release()
84
+ cv2.destroyAllWindows()
85
+
86
+ def analyze_local_video(emotion_model, video_path, output_path):
87
+ cap = cv2.VideoCapture(video_path)
88
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
89
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
90
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
91
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
92
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
93
+ while cap.isOpened():
94
+ ret, frame = cap.read()
95
+ if not ret:
96
+ break
97
+ label, face = emotion_model.predict_from_frame(frame)
98
+ if label:
99
+ cv2.putText(frame, f"Prediction: {label}", (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
100
+ else:
101
+ cv2.putText(frame, "Not attending the class", (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
102
+ out.write(frame)
103
+ cv2.imshow("Video - Expression Classification", frame)
104
+ if cv2.waitKey(1) & 0xFF == ord('q'):
105
+ break
106
+ cap.release()
107
+ out.release()
108
+ cv2.destroyAllWindows()
109
+
110
+
111
+ # if __name__ == "__main__":
112
+ # model_path = os.path.join(os.path.dirname(__file__), "models", "emotion")
113
+ # emotion_model = Emotion(model_path)
114
+ # webcam_demo(emotion_model)
ai/sentiment.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ from transformers import AutoModelForSequenceClassification, BertTokenizerFast
4
+
5
+ modal_path = os.path.join(os.path.dirname(__file__), "models", "sentiment")
6
+ tokenizer = BertTokenizerFast.from_pretrained(modal_path)
7
+ model = AutoModelForSequenceClassification.from_pretrained(modal_path, return_dict=True)
8
+
9
+
10
+ class Sentiment:
11
+ def __init__(self):
12
+ pass
13
+
14
+ def predict(self, text: str) -> int:
15
+ inputs = tokenizer(
16
+ text, max_length=512, padding=True, truncation=True, return_tensors="pt"
17
+ )
18
+ outputs = model(**inputs)
19
+ probs = torch.nn.functional.softmax(outputs.logits, dim=1)
20
+ predicted = {
21
+ "feedback_positive_score": int(probs[0][1].item() * 100),
22
+ "feedback_negative_score": int(probs[0][2].item() * 100),
23
+ }
24
+ return predicted
25
+
26
+
27
+ sentiment_model = Sentiment()
app.py CHANGED
@@ -1,7 +1,87 @@
1
- from fastapi import FastAPI
 
 
 
2
 
3
  app = FastAPI()
4
 
 
5
  @app.get("/")
6
- def greet_json():
7
- return {"Hello": "World!"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, WebSocket, WebSocketDisconnect
2
+ from routes.index import router
3
+ import websockets
4
+ from ai.emotion import emotion_model
5
 
6
  app = FastAPI()
7
 
8
+
9
  @app.get("/")
10
+ def read_root():
11
+ return {"message": "Hello, FastAPI!"}
12
+
13
+
14
+ # To run: uvicorn main:app --reload
15
+
16
+
17
+ # pymongo
18
+
19
+ from pymongo.mongo_client import MongoClient
20
+
21
+ uri = "mongodb+srv://gowdaman:gowdaman@cluster0.z5dooqf.mongodb.net/?retryWrites=true&w=majority&appName=Cluster0"
22
+ client = MongoClient(uri)
23
+ db = client["ai-saas"]
24
+ emotion = db["emotion"]
25
+ sentiment = db["sentiments"]
26
+
27
+
28
+ # emotion detection
29
+ import base64
30
+ import json
31
+ from bson.objectid import ObjectId
32
+ from bson import ObjectId
33
+
34
+
35
+ @app.websocket("/emotion")
36
+ async def websocket_emotion(websocket: WebSocket):
37
+ await websocket.accept()
38
+ course_id = None
39
+ try:
40
+ while True:
41
+ data = await websocket.receive_text()
42
+ msg = json.loads(data)
43
+ image_b64 = msg.get("image")
44
+ course_id = msg.get("course_id") # Get the courseId from the message
45
+ if not image_b64:
46
+ await websocket.send_text("No image data received")
47
+ continue
48
+ image_bytes = base64.b64decode(image_b64)
49
+ label, face = emotion_model.predict_from_frame_bytes(image_bytes)
50
+ await websocket.send_text(str(label))
51
+ except WebSocketDisconnect:
52
+ # Do not call await websocket.close() here
53
+ pass
54
+ finally:
55
+ print(f"WebSocket connection closed for user with course ID {course_id}")
56
+ # Convert course_id to ObjectId if it's a string
57
+ try:
58
+ course_obj_id = ObjectId(course_id)
59
+ # course_obj_id = course_id # fallback if already ObjectId or invalid
60
+ emotion_data = emotion.find_one({"course_id": course_obj_id})
61
+ total_len = len(emotion_data['emotion'])
62
+ print(emotion_data['emotion'].count(0)) # negetive
63
+ print(emotion_data['emotion'].count(1)) # positive
64
+ #positive percentage
65
+ positive_percentage = round((emotion_data['emotion'].count(1)/total_len)*100,2)
66
+ negative_percentage = round((emotion_data['emotion'].count(0)/total_len)*100,2)
67
+ print(positive_percentage)
68
+ print(negative_percentage)
69
+ sentiment.update_one(
70
+ {"course_id": course_obj_id},
71
+ {
72
+ "$set": {
73
+ "expression_positive_score": positive_percentage,
74
+ "expression_negetive_score": negative_percentage,
75
+ }
76
+ },
77
+ )
78
+ except Exception:
79
+ pass
80
+
81
+
82
+ app.include_router(router)
83
+
84
+ # if __name__ == "__main__":
85
+ # import uvicorn
86
+
87
+ # uvicorn.run("main:app", host="0.0.0.0", port=7000, reload=True)
requirements.txt CHANGED
@@ -1,2 +1,56 @@
1
  fastapi
2
  uvicorn[standard]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  fastapi
2
  uvicorn[standard]
3
+ annotated-types==0.7.0
4
+ anyio==4.9.0
5
+ certifi==2025.4.26
6
+ charset-normalizer==3.4.1
7
+ click==8.1.8
8
+ dnspython==2.7.0
9
+ filelock==3.18.0
10
+ fsspec==2025.3.2
11
+ h11==0.16.0
12
+ huggingface-hub==0.30.2
13
+ idna==3.10
14
+ Jinja2==3.1.6
15
+ MarkupSafe==3.0.2
16
+ mpmath==1.3.0
17
+ networkx==3.4.2
18
+ numpy==2.2.5
19
+ nvidia-cublas-cu12==12.6.4.1
20
+ nvidia-cuda-cupti-cu12==12.6.80
21
+ nvidia-cuda-nvrtc-cu12==12.6.77
22
+ nvidia-cuda-runtime-cu12==12.6.77
23
+ nvidia-cudnn-cu12==9.5.1.17
24
+ nvidia-cufft-cu12==11.3.0.4
25
+ nvidia-cufile-cu12==1.11.1.6
26
+ nvidia-curand-cu12==10.3.7.77
27
+ nvidia-cusolver-cu12==11.7.1.2
28
+ nvidia-cusparse-cu12==12.5.4.2
29
+ nvidia-cusparselt-cu12==0.6.3
30
+ nvidia-nccl-cu12==2.26.2
31
+ nvidia-nvjitlink-cu12==12.6.85
32
+ nvidia-nvtx-cu12==12.6.77
33
+ opencv-python==4.11.0.86
34
+ packaging==25.0
35
+ pillow==11.2.1
36
+ pydantic==2.11.4
37
+ pydantic_core==2.33.2
38
+ pymongo==4.12.1
39
+ PyYAML==6.0.2
40
+ regex==2024.11.6
41
+ requests==2.32.3
42
+ safetensors==0.5.3
43
+ setuptools==80.1.0
44
+ sniffio==1.3.1
45
+ starlette==0.46.2
46
+ sympy==1.14.0
47
+ tokenizers==0.21.1
48
+ torch==2.7.0
49
+ torchvision==0.22.0
50
+ tqdm==4.67.1
51
+ transformers==4.51.3
52
+ triton==3.3.0
53
+ typing-inspection==0.4.0
54
+ typing_extensions==4.13.2
55
+ urllib3==2.4.0
56
+ websockets==15.0.1
routes/feedback.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from ai.sentiment import sentiment_model
3
+ from pydantic import BaseModel
4
+
5
+ feedback_router = APIRouter(
6
+ prefix="/feedback",
7
+ tags=["Feedback"],
8
+ responses={404: {"description": "Not found"}},
9
+ )
10
+
11
+
12
+ @feedback_router.get("/")
13
+ async def root():
14
+ return {"message": "Hello World"}
15
+
16
+
17
+ class FeedbackModel(BaseModel):
18
+ text: str
19
+
20
+
21
+ @feedback_router.post("/")
22
+ def get_feedback_setiment(data: FeedbackModel):
23
+ return sentiment_model.predict(text=data.text)
routes/index.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from .feedback import feedback_router
3
+
4
+ router = APIRouter(
5
+ prefix="/api",
6
+ )
7
+
8
+ router.include_router(feedback_router)