Spaces:
Sleeping
Sleeping
Upload 5 files
Browse files- README.md +6 -6
- app.py +603 -0
- requirements.txt +6 -0
- runtime.txt +1 -0
- yolo11m_affectnet_best.pt +3 -0
README.md
CHANGED
|
@@ -1,14 +1,14 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: apache-2.0
|
| 11 |
-
short_description:
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: SentimentDemo
|
| 3 |
+
emoji: π
|
| 4 |
+
colorFrom: gray
|
| 5 |
+
colorTo: pink
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 5.17.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: apache-2.0
|
| 11 |
+
short_description: Demo application for Video Sentiment Analysis
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,603 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""ITI110_Final.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1wAe1__d6108Sb-qIL2rOlwhLXhE3B_Yo
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
# Install and import necessary libraries to access Groq.
|
| 11 |
+
|
| 12 |
+
import subprocess
|
| 13 |
+
import sys
|
| 14 |
+
|
| 15 |
+
# Install required packages
|
| 16 |
+
def install_packages():
|
| 17 |
+
packages = ["groq", "gradio", "ultralytics", "moviepy", "requests", "soundfile", "pandas", "datetime", "openai",
|
| 18 |
+
"pydub", "matplotlib", "numpy", "fpdf", "azure-cognitiveservices-speech", "azure-ai-textanalytics", "azure-core", "azure-identity"]
|
| 19 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install"] + packages)
|
| 20 |
+
|
| 21 |
+
install_packages() # Call function to install packages
|
| 22 |
+
|
| 23 |
+
import os
|
| 24 |
+
os.system("pip uninstall -y moviepy && pip install --no-cache-dir moviepy")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# FOR SENTIMENT ANALYSIS - SETYANI
|
| 28 |
+
# Install and import necessary libraries to access Groq
|
| 29 |
+
#!pip install groq gradio opencv-python moviepy requests soundfile pydub matplotlib numpy fpdf
|
| 30 |
+
import os
|
| 31 |
+
import groq
|
| 32 |
+
from groq import Groq
|
| 33 |
+
import gradio as gr
|
| 34 |
+
import numpy as np
|
| 35 |
+
import tempfile
|
| 36 |
+
import requests
|
| 37 |
+
from moviepy import VideoFileClip
|
| 38 |
+
from pydub import AudioSegment
|
| 39 |
+
import matplotlib.pyplot as plt
|
| 40 |
+
import time
|
| 41 |
+
import seaborn as sns
|
| 42 |
+
from collections import Counter
|
| 43 |
+
from fpdf import FPDF
|
| 44 |
+
|
| 45 |
+
# Global Variables
|
| 46 |
+
sentiment_scores = {"positive": 1, "neutral": 0, "negative": -1}
|
| 47 |
+
sentiment_history = []
|
| 48 |
+
transcribed_text = "Listening..."
|
| 49 |
+
|
| 50 |
+
report_path = "sentiment_report.pdf"
|
| 51 |
+
sentiment_trend_path = "sentiment_trend.png"
|
| 52 |
+
sentiment_heatmap_path = "sentiment_heatmap.png"
|
| 53 |
+
sentiment_pie_chart_path = "sentiment_pie_chart.png"
|
| 54 |
+
emotion_trend_path = "emotiont_trend.png"
|
| 55 |
+
emotion_heatmap_path = "emotion_heatmap.png"
|
| 56 |
+
emotion_pie_chart_path = "emotion_pie_chart.png"
|
| 57 |
+
|
| 58 |
+
# Define the Groq API key. The API key is included here to avoid invalid key.
|
| 59 |
+
api_key = "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"
|
| 60 |
+
# Initialize Groq Client
|
| 61 |
+
grog_client = groq.Groq(api_key=api_key)
|
| 62 |
+
|
| 63 |
+
# MAIN function to convert audio into text using Groq Whisper speech-to-text service
|
| 64 |
+
def transcribe_audio(audio_file_path):
|
| 65 |
+
# Open the audio file
|
| 66 |
+
with open(audio_file_path, "rb") as file:
|
| 67 |
+
# Create an audio transcription using the grog_client API
|
| 68 |
+
transcription = grog_client.audio.transcriptions.create(
|
| 69 |
+
file=(audio_file_path, file.read()), # Read the audio file from the specified path and send it as input
|
| 70 |
+
model="whisper-large-v3", # chosen Whisper model to be used for transcription
|
| 71 |
+
#model="whisper-large-v3-turbo", # tested another Whisper model
|
| 72 |
+
#model="distil-whisper-large-v3-en", # tested another Whisper model
|
| 73 |
+
prompt="Specify context or spelling", # Optional prompt to provide context or spelling preferences
|
| 74 |
+
response_format="json", # Specify the format of the response (JSON format in this case)
|
| 75 |
+
language="en", # Specify the language of the audio (English in this case)
|
| 76 |
+
temperature=0.0 # Control the randomness of the output (0.0 means deterministic output)
|
| 77 |
+
)
|
| 78 |
+
return transcription.text
|
| 79 |
+
|
| 80 |
+
# MAIN function to do sentiment analysis using Groq LLM model llama3-8b-8192
|
| 81 |
+
def analyze_sentiment(text):
|
| 82 |
+
# Create a completion using the grog_client API
|
| 83 |
+
response = grog_client.chat.completions.create(
|
| 84 |
+
model="llama3-8b-8192", # Specify the model to be used for generating the completion
|
| 85 |
+
messages=[
|
| 86 |
+
{"role": "system", "content": "Analyze the sentiment of this text and return only 'Positive', 'Negative', or 'Neutral'."},
|
| 87 |
+
{"role": "user", "content": text}
|
| 88 |
+
],
|
| 89 |
+
temperature=0.0, # Control the randomness of the output (0.0 means deterministic output)
|
| 90 |
+
max_tokens=10 # Limit the response length to 200 tokens
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
sentiment = response.choices[0].message.content
|
| 94 |
+
print(sentiment)
|
| 95 |
+
sentiment_history.append(sentiment_scores.get(sentiment.lower(), 0))
|
| 96 |
+
print(sentiment_history)
|
| 97 |
+
return sentiment
|
| 98 |
+
|
| 99 |
+
# Integrated and tested AZURE services for Speech-to-text using Whisper and
|
| 100 |
+
# Azure Sentiment Analysis using gpt-35-turbo-16k vs Azure LANGUAGE service for text analytic
|
| 101 |
+
|
| 102 |
+
#!pip install azure-cognitiveservices-speech azure-ai-textanalytics azure-core azure-identity
|
| 103 |
+
|
| 104 |
+
from openai import AzureOpenAI
|
| 105 |
+
|
| 106 |
+
AZURE_ENDPOINT = 'https://78382-m7ewtltu-eastus2.cognitiveservices.azure.com/'
|
| 107 |
+
OPENAI_KEY = '9hRh4afsE668lu7Qgv1HzcDDFIutOFjVseeH8H74GO8BTB5YxVyWJQQJ99BBACHYHv6XJ3w3AAAAACOGupoL'
|
| 108 |
+
DEPLOYMENT_NAME = 'whisper'
|
| 109 |
+
OPENAI_VERSION = '2024-06-01'
|
| 110 |
+
|
| 111 |
+
client = AzureOpenAI(
|
| 112 |
+
api_key=OPENAI_KEY,
|
| 113 |
+
api_version=OPENAI_VERSION,
|
| 114 |
+
azure_endpoint=AZURE_ENDPOINT)
|
| 115 |
+
|
| 116 |
+
# Use Azure Whisper to convert audio into text
|
| 117 |
+
def transcribe_audio_azure(audio_file_path):
|
| 118 |
+
# Open the audio file
|
| 119 |
+
with open(audio_file_path, "rb") as file:
|
| 120 |
+
# Create an audio transcription using the grog_client API
|
| 121 |
+
transcription = client.audio.transcriptions.create(
|
| 122 |
+
file=(audio_file_path, file.read()), # Read the audio file from the specified path and send it as input
|
| 123 |
+
model=DEPLOYMENT_NAME, # Whisper model to be used for transcription
|
| 124 |
+
prompt="Specify context or spelling", # Optional prompt to provide context or spelling preferences
|
| 125 |
+
response_format="json", # Specify the format of the response (JSON format in this case)
|
| 126 |
+
language="en", # Specify the language of the audio (English in this case)
|
| 127 |
+
temperature=0.0 # Control the randomness of the output (0.0 means deterministic output)
|
| 128 |
+
)
|
| 129 |
+
print(f'Transcription done using Azure Whisper')
|
| 130 |
+
return transcription.text
|
| 131 |
+
|
| 132 |
+
AZURE_GPT_ENDPOINT = 'https://78382-m7ewtltu-eastus2.openai.azure.com/'
|
| 133 |
+
OPENAI_GPT_KEY = '9hRh4afsE668lu7Qgv1HzcDDFIutOFjVseeH8H74GO8BTB5YxVyWJQQJ99BBACHYHv6XJ3w3AAAAACOGupoL'
|
| 134 |
+
DEPLOYMENT_NAME_GPT = 'gpt-35-turbo-16k'
|
| 135 |
+
OPENAI_GPT_VERSION = '2024-08-01-preview'
|
| 136 |
+
|
| 137 |
+
client = AzureOpenAI(
|
| 138 |
+
api_key=OPENAI_GPT_KEY,
|
| 139 |
+
api_version=OPENAI_GPT_VERSION,
|
| 140 |
+
azure_endpoint=AZURE_GPT_ENDPOINT)
|
| 141 |
+
|
| 142 |
+
# Use Azure GPT for sentiment analysis
|
| 143 |
+
def analyze_sentiment_gpt(text):
|
| 144 |
+
msg=[
|
| 145 |
+
{"role": "system", "content": "You are an expert in sentiment analysis. Analyze the sentiment of this text and return only 'Positive', 'Negative', or 'Neutral'."},
|
| 146 |
+
{"role": "user", "content": text}
|
| 147 |
+
]
|
| 148 |
+
# Create chat completion for sentiment analysis
|
| 149 |
+
response = client.chat.completions.create(
|
| 150 |
+
model=DEPLOYMENT_NAME_GPT, # Specify the model to be used for generating the completion
|
| 151 |
+
messages=msg,
|
| 152 |
+
temperature=0.0, # Control the randomness of the output (0.0 means deterministic output)
|
| 153 |
+
max_tokens=10 # Limit the response length to 200 tokens
|
| 154 |
+
)
|
| 155 |
+
sentiment = response.choices[0].message.content
|
| 156 |
+
print(sentiment)
|
| 157 |
+
sentiment_history.append(sentiment_scores.get(sentiment.lower(), 0))
|
| 158 |
+
print(sentiment_history)
|
| 159 |
+
return sentiment
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
from azure.ai.textanalytics import TextAnalyticsClient
|
| 163 |
+
from azure.core.credentials import AzureKeyCredential
|
| 164 |
+
|
| 165 |
+
# Set up your Azure Cognitive Services credentials
|
| 166 |
+
text_analytics_key = "35Xbh3QVy7lkr36Z7GR8I8qdB6Xj3gjn8uMtct4onf82rVNPwmqvJQQJ99BBACYeBjFXJ3w3AAAaACOGdqeM"
|
| 167 |
+
text_analytics_endpoint = "https://projectlanguage110.cognitiveservices.azure.com/"
|
| 168 |
+
|
| 169 |
+
# Function to authenticate Text Analytics client
|
| 170 |
+
def authenticate_text_analytics_client():
|
| 171 |
+
credential = AzureKeyCredential(text_analytics_key)
|
| 172 |
+
client = TextAnalyticsClient(endpoint=text_analytics_endpoint, credential=credential)
|
| 173 |
+
return client
|
| 174 |
+
|
| 175 |
+
# Use Azure Language Service for sentiment analysis
|
| 176 |
+
def analyze_sentiment_azure(client, text):
|
| 177 |
+
response = client.analyze_sentiment(documents=[text])[0]
|
| 178 |
+
#print(response)
|
| 179 |
+
sentiment = response.sentiment
|
| 180 |
+
#print(sentiment)
|
| 181 |
+
sentiment_history.append(sentiment_scores.get(sentiment.lower(), 0))
|
| 182 |
+
#print(sentiment_history)
|
| 183 |
+
return sentiment
|
| 184 |
+
|
| 185 |
+
# CLEANUP transcribed text before doing Sentiment Analysis
|
| 186 |
+
import re #used for regular expressions
|
| 187 |
+
|
| 188 |
+
# Helper function to remove suffixes from numbers in the input text.
|
| 189 |
+
def remove_suffixes(text):
|
| 190 |
+
# Regular expression to find numbers followed by common suffixes
|
| 191 |
+
pattern = r'(\d+)(st|nd|rd|th)'
|
| 192 |
+
# Replace the matched pattern with just the number (capture group 1)
|
| 193 |
+
cleaned_text = re.sub(pattern, r'\1', text)
|
| 194 |
+
return cleaned_text # Return the cleaned text without suffixes
|
| 195 |
+
|
| 196 |
+
# Helper function to remove repeated phrases in the transcript text which sometimes exist due to transcription error
|
| 197 |
+
def remove_repeated_phrases(text):
|
| 198 |
+
# Regular expression to find repeated phrases with length up to 3 words
|
| 199 |
+
pattern = r'\b(\w+\s+\w+\s+\w+|\w+\s+\w+|\w+)\s+\1\b'
|
| 200 |
+
prev_text = ''
|
| 201 |
+
|
| 202 |
+
while prev_text != text:
|
| 203 |
+
prev_text = text # Store previous version for comparison
|
| 204 |
+
text = re.sub(pattern, r'\1 \1', text, flags=re.IGNORECASE) # Keep only two instances for genuine repeat, e.g: bye. bye.
|
| 205 |
+
|
| 206 |
+
return text # Return the cleaned text without repeated phrases
|
| 207 |
+
|
| 208 |
+
# Example Usage
|
| 209 |
+
#text = "hello world hello world hello world test test test again again again"
|
| 210 |
+
#cleaned_text = remove_repeated_phrases(text)
|
| 211 |
+
#print(cleaned_text) # Output: "hello world hello world test test again again"
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# Helper function for text preprocessing before calculating WER
|
| 215 |
+
def preprocess_text(text):
|
| 216 |
+
text = remove_repeated_phrases(text) #remove repeated phrases due to transcription error
|
| 217 |
+
text = text.replace('\n', ' ') #replace newline with space
|
| 218 |
+
text = text.lower() #convert text to lower case
|
| 219 |
+
text = text.replace('-', '') #replace hypen with none
|
| 220 |
+
text = re.sub(r'[^a-z\s0-9!?]', ' ', text)#replace with space those NON lowercase letters, NON whitespace chars, NON numbers, NON exclamation, NON question mark
|
| 221 |
+
text = re.sub(r'\b(okay)\b', 'ok', text) #replace okay with ok to standardize the format
|
| 222 |
+
text = re.sub(r'\b(yeah)\b', 'yes', text) #replace yeah with yes to standardize the format
|
| 223 |
+
text = re.sub(r'\b(um)\b', '', text) #remove the word um filler word
|
| 224 |
+
text = re.sub(r'\b(uh)\b', '', text) #remove the word uh filler word
|
| 225 |
+
text = remove_suffixes(text) #remove suffixes behind numbers like st, nd, rd, th
|
| 226 |
+
text = re.sub(r'\s+', ' ', text).strip() #Removes extra spaces, including leading, trailing, and multiple spaces between words
|
| 227 |
+
return text # Return the cleaned text after preprocessing
|
| 228 |
+
|
| 229 |
+
# HELPER function for Display Output of Sentiment Analysis
|
| 230 |
+
# Update the Sentiment Trend Over Time real-time graph
|
| 231 |
+
def update_plot():
|
| 232 |
+
plt.clf()
|
| 233 |
+
# Generate timestamps
|
| 234 |
+
timestamps = list(range(len(sentiment_history)))
|
| 235 |
+
|
| 236 |
+
# Define color mapping for sentiment scores
|
| 237 |
+
colors = ["red" if s < -0.3 else "yellow" if -0.3 <= s <= 0.3 else "green" for s in sentiment_history]
|
| 238 |
+
|
| 239 |
+
plt.figure(figsize=(8, 4))
|
| 240 |
+
# Plot sentiment scores with colored markers
|
| 241 |
+
for i in range(len(sentiment_history)):
|
| 242 |
+
plt.plot(timestamps[i], sentiment_history[i], marker="o", color=colors[i], markersize=8)
|
| 243 |
+
|
| 244 |
+
# Plot line segments with the color of the next point
|
| 245 |
+
for i in range(len(sentiment_history) - 1):
|
| 246 |
+
plt.plot(timestamps[i:i+2], sentiment_history[i:i+2], linestyle="-", color=colors[i+1], linewidth=2)
|
| 247 |
+
|
| 248 |
+
plt.title("Sentiment Trend Over Time")
|
| 249 |
+
plt.xlabel("Time (Speech Segments)")
|
| 250 |
+
plt.ylabel("Sentiment Score")
|
| 251 |
+
plt.ylim([-1, 1])
|
| 252 |
+
plt.yticks([-1, 0, 1], ["Negative", "Neutral", "Positive"])
|
| 253 |
+
plt.savefig(sentiment_trend_path) # Save the plot as an image
|
| 254 |
+
plt.close()
|
| 255 |
+
|
| 256 |
+
# Generate the sentiment heatmap using red, yellow, and green colors.
|
| 257 |
+
def generate_sentiment_heatmap():
|
| 258 |
+
plt.clf()
|
| 259 |
+
#if not sentiment_history:
|
| 260 |
+
# return
|
| 261 |
+
# Convert sentiment scores to corresponding colors
|
| 262 |
+
heatmap_data = np.array(sentiment_history).reshape(1, -1)
|
| 263 |
+
print(heatmap_data)
|
| 264 |
+
# Define color mapping for sentiment scores
|
| 265 |
+
color_mapping = ["red", "yellow", "green"]
|
| 266 |
+
plt.figure(figsize=(6, 3))
|
| 267 |
+
ax = sns.heatmap(heatmap_data, annot=True, cmap=color_mapping, xticklabels=False,
|
| 268 |
+
yticklabels=["Sentiment"], cbar=True, vmin=-1, vmax=1)
|
| 269 |
+
|
| 270 |
+
# Customize color bar labels
|
| 271 |
+
colorbar = ax.collections[0].colorbar
|
| 272 |
+
colorbar.set_ticks([-1, 0, 1])
|
| 273 |
+
colorbar.set_ticklabels(["Negative", "Neutral", "Positive"])
|
| 274 |
+
|
| 275 |
+
plt.title("Sentiment Heatmap") # (Red = Negative, Yellow = Neutral, Green = Positive)
|
| 276 |
+
plt.show()
|
| 277 |
+
plt.savefig(sentiment_heatmap_path)
|
| 278 |
+
plt.close()
|
| 279 |
+
|
| 280 |
+
# Generate a Pie Chart for Sentiment Distribution.
|
| 281 |
+
def generate_sentiment_pie_chart():
|
| 282 |
+
plt.clf()
|
| 283 |
+
#if not sentiment_history:
|
| 284 |
+
# return
|
| 285 |
+
# Count occurrences of each sentiment category
|
| 286 |
+
sentiment_labels = ["Negative", "Neutral", "Positive"]
|
| 287 |
+
sentiment_counts = Counter(["Negative" if s < -0.3 else "Neutral" if -0.3 <= s <= 0.3 else "Positive" for s in sentiment_history])
|
| 288 |
+
|
| 289 |
+
# Extract count values
|
| 290 |
+
counts = [sentiment_counts[label] for label in sentiment_labels]
|
| 291 |
+
|
| 292 |
+
# Define colors
|
| 293 |
+
colors = ["red", "yellow", "green"]
|
| 294 |
+
|
| 295 |
+
# Plot pie chart
|
| 296 |
+
plt.figure(figsize=(4, 4))
|
| 297 |
+
plt.pie(counts, labels=sentiment_labels, autopct="%1.1f%%", colors=colors, startangle=140)
|
| 298 |
+
plt.title("Sentiment Distribution")
|
| 299 |
+
plt.savefig(sentiment_pie_chart_path)
|
| 300 |
+
plt.close()
|
| 301 |
+
|
| 302 |
+
# Create and save a PDF report with transcription and sentiment analysis graphs.
|
| 303 |
+
def generate_pdf_report(text):
|
| 304 |
+
pdf = FPDF()
|
| 305 |
+
pdf.set_auto_page_break(auto=True, margin=15)
|
| 306 |
+
pdf.add_page()
|
| 307 |
+
|
| 308 |
+
# Title
|
| 309 |
+
pdf.set_font("Arial", style='B', size=16)
|
| 310 |
+
pdf.cell(200, 10, "Sentiment Analysis Report", ln=True, align="C")
|
| 311 |
+
pdf.ln(10)
|
| 312 |
+
|
| 313 |
+
# Transcribed Text
|
| 314 |
+
pdf.set_font("Arial", size=12)
|
| 315 |
+
pdf.multi_cell(0, 10, f"Transcribed Text:\n\n{text}")
|
| 316 |
+
pdf.ln(10)
|
| 317 |
+
|
| 318 |
+
# Add images
|
| 319 |
+
for img_path in [sentiment_trend_path, sentiment_heatmap_path, sentiment_pie_chart_path, emotion_trend_path, emotion_heatmap_path, emotion_pie_chart_path]:
|
| 320 |
+
if os.path.exists(img_path):
|
| 321 |
+
pdf.add_page()
|
| 322 |
+
pdf.image(img_path, x=10, w=180)
|
| 323 |
+
|
| 324 |
+
pdf.output(report_path)
|
| 325 |
+
return report_path
|
| 326 |
+
|
| 327 |
+
# FOR FACE EMOTION ANALYSYS - SONG MING
|
| 328 |
+
#!pip install gradio ultralytics pandas matplotlib datetime
|
| 329 |
+
import gradio as gr
|
| 330 |
+
from ultralytics import YOLO
|
| 331 |
+
import pandas as pd
|
| 332 |
+
import seaborn as sns
|
| 333 |
+
import matplotlib.pyplot as plt
|
| 334 |
+
import logging
|
| 335 |
+
import cv2
|
| 336 |
+
from datetime import datetime
|
| 337 |
+
import os
|
| 338 |
+
|
| 339 |
+
# Configure logging (optional)
|
| 340 |
+
logging.basicConfig(filename='emotion_analysis.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 341 |
+
|
| 342 |
+
# Load model (outside the function)
|
| 343 |
+
try:
|
| 344 |
+
model = YOLO('yolo11m_affectnet_best.pt') # Replace with your model path. Download this model first!
|
| 345 |
+
except Exception as e:
|
| 346 |
+
logging.error(f"Error loading YOLO model: {e}. Make sure the path is correct.")
|
| 347 |
+
print(f"Error loading YOLO model: {e}. Make sure the path is correct.")
|
| 348 |
+
model = None
|
| 349 |
+
|
| 350 |
+
emotion_labels = ["neutral", "happy", "sad", "angry", "fearful", "disgusted", "surprised", "not_detected"]
|
| 351 |
+
# Initialize an empty global DataFrame
|
| 352 |
+
combined_df = pd.DataFrame(columns=['Emotion', 'Confidence', 'Frame', 'Class', 'Timestamp'])
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def analyze_video(video_file, interval_seconds=5, confidence=30, iou=30):
|
| 356 |
+
if model is None: return "<p>YOLO model failed to load. Check the logs.</p>"
|
| 357 |
+
|
| 358 |
+
model.conf = confidence / 100.0
|
| 359 |
+
model.iou = iou / 100.0
|
| 360 |
+
|
| 361 |
+
cap = cv2.VideoCapture(video_file)
|
| 362 |
+
if not cap.isOpened():
|
| 363 |
+
print(f"Error opening video file: {video_file}")
|
| 364 |
+
return "<p>Error opening video file.</p>"
|
| 365 |
+
|
| 366 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 367 |
+
total_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 368 |
+
|
| 369 |
+
all_emotions_data = []
|
| 370 |
+
current_frame = 0
|
| 371 |
+
interval_frames = int(fps * interval_seconds)
|
| 372 |
+
|
| 373 |
+
while current_frame < total_frame_count:
|
| 374 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, current_frame)
|
| 375 |
+
ret, frame = cap.read()
|
| 376 |
+
if not ret:
|
| 377 |
+
continue
|
| 378 |
+
analyze_emotion(frame, current_frame, all_emotions_data)
|
| 379 |
+
|
| 380 |
+
current_frame += interval_frames # Move to the next frame in the next interval
|
| 381 |
+
print(f"Finished Processing : {current_frame}")
|
| 382 |
+
|
| 383 |
+
cap.release()
|
| 384 |
+
print(f"Finished Processing all frames")
|
| 385 |
+
|
| 386 |
+
all_emotions_df = pd.DataFrame(all_emotions_data)
|
| 387 |
+
|
| 388 |
+
if all_emotions_df.empty:
|
| 389 |
+
return "No emotions detected in the video."
|
| 390 |
+
|
| 391 |
+
combined_df = all_emotions_df.groupby(['Frame', 'Emotion'], as_index=False).agg({'Confidence': 'mean', 'Class': 'first', 'Timestamp': 'first'})
|
| 392 |
+
|
| 393 |
+
# Line plot
|
| 394 |
+
plt.figure(figsize=(10, 6))
|
| 395 |
+
sns.lineplot(data=combined_df, x='Frame', y='Confidence', hue='Emotion', marker='o')
|
| 396 |
+
plt.title('Emotion Detections Over Time')
|
| 397 |
+
plt.xlabel('Frame')
|
| 398 |
+
plt.ylabel('Confidence')
|
| 399 |
+
#line_plot_path = os.path.abspath('line_plot.png')
|
| 400 |
+
plt.savefig(emotion_trend_path)
|
| 401 |
+
plt.close()
|
| 402 |
+
|
| 403 |
+
# Pie chart
|
| 404 |
+
pie_data = combined_df['Emotion'].value_counts()
|
| 405 |
+
plt.figure(figsize=(20, 12))
|
| 406 |
+
plt.pie(pie_data, labels=pie_data.index, autopct='%1.1f%%', startangle=90)
|
| 407 |
+
plt.title('Emotion Distribution')
|
| 408 |
+
#pie_chart_path = os.path.abspath('pie_chart.png')
|
| 409 |
+
plt.savefig(emotion_pie_chart_path)
|
| 410 |
+
plt.close()
|
| 411 |
+
|
| 412 |
+
# Heatmap
|
| 413 |
+
plt.figure(figsize=(10, 6))
|
| 414 |
+
heatmap_data = pd.pivot_table(combined_df, values='Confidence', index='Frame', columns='Emotion', fill_value=0)
|
| 415 |
+
sns.heatmap(heatmap_data, cmap='YlGnBu', cbar_kws={'label': 'Confidence'})
|
| 416 |
+
plt.title('Emotion Heatmap')
|
| 417 |
+
plt.xlabel('Emotion')
|
| 418 |
+
plt.ylabel('Frame')
|
| 419 |
+
#heatmap_path = os.path.abspath('heatmap.png')
|
| 420 |
+
plt.savefig(emotion_heatmap_path)
|
| 421 |
+
plt.close()
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def analyze_emotion(frame, frame_index, all_emotions_data):
|
| 425 |
+
if model is None:
|
| 426 |
+
return
|
| 427 |
+
|
| 428 |
+
results = model(frame)
|
| 429 |
+
for result in results:
|
| 430 |
+
boxes = result.boxes
|
| 431 |
+
for box in boxes:
|
| 432 |
+
conf = float(box.conf)
|
| 433 |
+
cls = int(box.cls.item())
|
| 434 |
+
|
| 435 |
+
if cls < len(emotion_labels):
|
| 436 |
+
predicted_emotion = emotion_labels[cls]
|
| 437 |
+
else:
|
| 438 |
+
predicted_emotion = 'not_detected'
|
| 439 |
+
logging.warning(f"Predicted class {cls} out of range. Setting to 'not_detected'.")
|
| 440 |
+
conf = 0.0
|
| 441 |
+
|
| 442 |
+
if conf > model.conf:
|
| 443 |
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
|
| 444 |
+
|
| 445 |
+
all_emotions_data.append({
|
| 446 |
+
'Emotion': predicted_emotion,
|
| 447 |
+
'Confidence': conf,
|
| 448 |
+
'Frame': frame_index,
|
| 449 |
+
'Class': cls,
|
| 450 |
+
'Timestamp': timestamp
|
| 451 |
+
})
|
| 452 |
+
|
| 453 |
+
# MAIN FUNCTIONs FOR GRADIO APPLICATION - SETYANI
|
| 454 |
+
# 17/2 video file sentiment analysis working
|
| 455 |
+
# 21/2 fixed heatmap display, add button click handler for clear, download report
|
| 456 |
+
# 23/2 integrated Azure Whisper, GPT and Language services created by Thim Wai, however the performance is too slow so switch back to Groq
|
| 457 |
+
# 25/2 integrated Face Emotion analysis from SongMing
|
| 458 |
+
#==========================================================================================================================================
|
| 459 |
+
# MAIN function to process uploaded video from Gradio User Interface
|
| 460 |
+
def process_video_gradio(video_path):
|
| 461 |
+
global sentiment_history
|
| 462 |
+
sentiment_history = [] # Reset sentiment history
|
| 463 |
+
|
| 464 |
+
if not os.path.exists(video_path):
|
| 465 |
+
raise ValueError("File not found.")
|
| 466 |
+
|
| 467 |
+
clear_function() # clear the previous analysis files if exist
|
| 468 |
+
video_clip = VideoFileClip(video_path) # extract video
|
| 469 |
+
audio_clip = video_clip.audio # extract audio
|
| 470 |
+
|
| 471 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
|
| 472 |
+
audio_clip.write_audiofile(temp_audio.name)
|
| 473 |
+
full_audio_path = temp_audio.name
|
| 474 |
+
|
| 475 |
+
audio = AudioSegment.from_wav(full_audio_path)
|
| 476 |
+
segment_length = 5000 # 5 seconds per segment
|
| 477 |
+
num_segments = len(audio) // segment_length
|
| 478 |
+
transcribed_text = ""
|
| 479 |
+
|
| 480 |
+
for i in range(num_segments):
|
| 481 |
+
segment = audio[i * segment_length: (i + 1) * segment_length] # split audio into segment of 5sec each to be analysed
|
| 482 |
+
segment_path = f"temp_segment_{i}.wav"
|
| 483 |
+
segment.export(segment_path, format="wav")
|
| 484 |
+
|
| 485 |
+
segment_text = transcribe_audio(segment_path) # CALL transcribe audio using Groq Whisper
|
| 486 |
+
#segment_text = transcribe_audio_azure(segment_path) # CALL transcribe audio using Azure Whisper
|
| 487 |
+
transcribed_text += segment_text + "\n" # added new line for display purpose
|
| 488 |
+
preprocess_text(segment_text)
|
| 489 |
+
|
| 490 |
+
sentiment = analyze_sentiment(segment_text) # CALL analyze sentiment using Groq Llama
|
| 491 |
+
#sentiment = analyze_sentiment_gpt(segment_text) # CALL analyze sentiment using Azure GPT
|
| 492 |
+
#text_analytics_client = authenticate_text_analytics_client() # CALL analyze sentiment using Azure Language Service
|
| 493 |
+
#sentiment = analyze_sentiment_azure(text_analytics_client, segment_text) # CALL analyze sentiment using Azure Language Service
|
| 494 |
+
|
| 495 |
+
os.remove(segment_path) # Cleanup segment files
|
| 496 |
+
|
| 497 |
+
update_plot() # Update plot after processing each segment
|
| 498 |
+
yield transcribed_text, sentiment_trend_path, sentiment_heatmap_path, sentiment_pie_chart_path, emotion_trend_path, emotion_heatmap_path, emotion_pie_chart_path
|
| 499 |
+
|
| 500 |
+
os.remove(full_audio_path) # Cleanup full audio file
|
| 501 |
+
|
| 502 |
+
generate_sentiment_heatmap()
|
| 503 |
+
generate_sentiment_pie_chart()
|
| 504 |
+
yield transcribed_text, sentiment_trend_path, sentiment_heatmap_path, sentiment_pie_chart_path, emotion_trend_path, emotion_heatmap_path, emotion_pie_chart_path
|
| 505 |
+
|
| 506 |
+
analyze_video(video_path)
|
| 507 |
+
report_path = generate_pdf_report(transcribed_text)
|
| 508 |
+
# update final heatmap and pie chart before return
|
| 509 |
+
yield transcribed_text, sentiment_trend_path, sentiment_heatmap_path, sentiment_pie_chart_path, emotion_trend_path, emotion_heatmap_path, emotion_pie_chart_path
|
| 510 |
+
return transcribed_text, sentiment_trend_path, sentiment_heatmap_path, sentiment_pie_chart_path, emotion_trend_path, emotion_heatmap_path, emotion_pie_chart_path
|
| 511 |
+
|
| 512 |
+
# Function to handle 'Download Report' button
|
| 513 |
+
def download_report_function():
|
| 514 |
+
if not os.path.exists(report_path):
|
| 515 |
+
raise ValueError("Please upload video file for report analysis.")
|
| 516 |
+
return report_path
|
| 517 |
+
|
| 518 |
+
# Function to handle 'Clear' button
|
| 519 |
+
def clear_function():
|
| 520 |
+
if os.path.isfile(sentiment_trend_path): # Ensure it is a file before attempting to delete
|
| 521 |
+
os.remove(sentiment_trend_path)
|
| 522 |
+
if os.path.isfile(sentiment_heatmap_path):
|
| 523 |
+
os.remove(sentiment_heatmap_path)
|
| 524 |
+
if os.path.isfile(sentiment_pie_chart_path):
|
| 525 |
+
os.remove(sentiment_pie_chart_path)
|
| 526 |
+
if os.path.isfile(emotion_trend_path): # Ensure it is a file before attempting to delete
|
| 527 |
+
os.remove(emotion_trend_path)
|
| 528 |
+
if os.path.isfile(emotion_heatmap_path):
|
| 529 |
+
os.remove(emotion_heatmap_path)
|
| 530 |
+
if os.path.isfile(emotion_pie_chart_path):
|
| 531 |
+
os.remove(emotion_pie_chart_path)
|
| 532 |
+
#if os.path.isfile(report_path):
|
| 533 |
+
#os.remove(report_path)
|
| 534 |
+
#return gr.update(value=None, interactive=True), gr.update(value="", interactive=False), gr.update(value=""), gr.update(value=""), gr.update(value=""), gr.update(value=""), gr.update(value=""), gr.update(value="")
|
| 535 |
+
return None, None, None, None, None, None, None, None
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
iface = gr.Interface(
|
| 539 |
+
fn=process_video_gradio,
|
| 540 |
+
inputs=gr.Video(label="Video"),
|
| 541 |
+
outputs=[
|
| 542 |
+
gr.Textbox(label="Transcribed Text"),
|
| 543 |
+
gr.Image(label="Sentiment Trend Over Time"),
|
| 544 |
+
gr.Image(label="Sentiment Heatmap"),
|
| 545 |
+
gr.Image(label="Sentiment Distribution Pie Chart"),
|
| 546 |
+
gr.Image(label="Emotion Trend Over Time"),
|
| 547 |
+
gr.Image(label="Emotion Heatmap"),
|
| 548 |
+
gr.Image(label="Emotion Distribution Pie Chart")
|
| 549 |
+
],
|
| 550 |
+
allow_flagging="never", # Disable flag button
|
| 551 |
+
title="Real-Time Video Sentiment Analysis",
|
| 552 |
+
description="Upload a video file or use your webcam for live video streaming to analyze speech sentiment dynamically.",
|
| 553 |
+
live=True # Enable live updates for streaming
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
with gr.Blocks() as iface:
|
| 557 |
+
with gr.Row():
|
| 558 |
+
video_input = gr.Video(label="Video", scale=1, interactive = True) # Video box takes more space
|
| 559 |
+
transcribed_text = gr.Textbox(label="Transcribed Text", lines=15, max_lines=15, interactive=False, scale=1)
|
| 560 |
+
|
| 561 |
+
with gr.Row():
|
| 562 |
+
sentiment_trend = gr.Image(label="Sentiment Trend Over Time", scale=2)
|
| 563 |
+
sentiment_heatmap = gr.Image(label="Sentiment Heatmap", scale=1)
|
| 564 |
+
sentiment_pie_chart = gr.Image(label="Sentiment Distribution Pie Chart", scale=1)
|
| 565 |
+
|
| 566 |
+
with gr.Row():
|
| 567 |
+
emotion_trend = gr.Image(label="Emotion Trend Over Time", scale=2)
|
| 568 |
+
emotion_heatmap = gr.Image(label="Emotion Heatmap", scale=1)
|
| 569 |
+
emotion_pie_chart = gr.Image(label="Emotion Distribution Pie Chart", scale=1)
|
| 570 |
+
|
| 571 |
+
with gr.Row():
|
| 572 |
+
# Buttons for manual control
|
| 573 |
+
download_button = gr.Button("Download Report")
|
| 574 |
+
clear_button = gr.Button("Clear")
|
| 575 |
+
|
| 576 |
+
video_input.change(fn=process_video_gradio, inputs=video_input, outputs=[transcribed_text,
|
| 577 |
+
sentiment_trend, sentiment_heatmap, sentiment_pie_chart,
|
| 578 |
+
emotion_trend, emotion_heatmap, emotion_pie_chart
|
| 579 |
+
])
|
| 580 |
+
|
| 581 |
+
# Link the button clicks to the functions that handle them
|
| 582 |
+
download_button.click(fn=download_report_function, inputs=[], outputs=gr.File())
|
| 583 |
+
clear_button.click(
|
| 584 |
+
fn=clear_function,
|
| 585 |
+
inputs=[],
|
| 586 |
+
outputs=[video_input, transcribed_text, sentiment_trend, sentiment_heatmap, sentiment_pie_chart, emotion_trend, emotion_heatmap, emotion_pie_chart])
|
| 587 |
+
|
| 588 |
+
# Add custom JavaScript to trigger play button after uploading
|
| 589 |
+
instructions = gr.HTML("""
|
| 590 |
+
<script>
|
| 591 |
+
document.querySelector('input[type="file"]').addEventListener('change', function() {
|
| 592 |
+
var intervalId = setInterval(function() {
|
| 593 |
+
var videoPlayer = document.querySelector('video');
|
| 594 |
+
if (videoPlayer) {
|
| 595 |
+
videoPlayer.play();
|
| 596 |
+
clearInterval(intervalId);
|
| 597 |
+
}
|
| 598 |
+
}, 500);
|
| 599 |
+
});
|
| 600 |
+
</script>
|
| 601 |
+
""")
|
| 602 |
+
|
| 603 |
+
iface.launch(inline=False, share=True)
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
moviepy
|
| 2 |
+
gradio
|
| 3 |
+
numpy
|
| 4 |
+
transformers
|
| 5 |
+
ultralytics
|
| 6 |
+
huggingface_hub
|
runtime.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
python-3.10
|
yolo11m_affectnet_best.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d9103ead25cfa9307e1580c95d19d744251b3cbc03b8945a7c74b239309d105
|
| 3 |
+
size 40481573
|