Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,28 +1,50 @@
|
|
| 1 |
-
# Step 1: Install
|
| 2 |
-
# (Only needed locally; Hugging Face Spaces handles dependencies via 'requirements.txt')
|
| 3 |
-
# !pip install streamlit spacy numpy
|
| 4 |
-
|
| 5 |
import streamlit as st
|
| 6 |
import spacy
|
|
|
|
| 7 |
import numpy as np
|
| 8 |
-
import json
|
| 9 |
from numpy.linalg import norm
|
| 10 |
|
| 11 |
-
# Step 2:
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
# Step 3:
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
# Step 4:
|
| 19 |
faq_docs = []
|
| 20 |
for category, faq_list in faqs.items():
|
| 21 |
for faq in faq_list:
|
| 22 |
question = faq['question']
|
| 23 |
answer = faq['answer']
|
| 24 |
-
faq_vector = nlp(question).vector # Precompute
|
| 25 |
-
faq_docs.append((question, answer, faq_vector))
|
| 26 |
|
| 27 |
# Step 5: Define the function to find the most relevant FAQs
|
| 28 |
def find_most_relevant_faq_optimized(query, faq_docs):
|
|
@@ -59,4 +81,3 @@ if query:
|
|
| 59 |
st.write(f"**Similarity Score:** {score:.2f}")
|
| 60 |
else:
|
| 61 |
st.write("Please enter a query to search for relevant FAQs.")
|
| 62 |
-
|
|
|
|
| 1 |
+
# Step 1: Install necessary libraries (Handled by Hugging Face via 'requirements.txt')
|
|
|
|
|
|
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
import spacy
|
| 4 |
+
from spacy.cli import download # To download the model programmatically
|
| 5 |
import numpy as np
|
|
|
|
| 6 |
from numpy.linalg import norm
|
| 7 |
|
| 8 |
+
# Step 2: Download the spaCy model if not already installed
|
| 9 |
+
try:
|
| 10 |
+
nlp = spacy.load("en_core_web_md")
|
| 11 |
+
except OSError:
|
| 12 |
+
st.warning("Downloading spaCy model 'en_core_web_md'. This may take a few minutes...")
|
| 13 |
+
download("en_core_web_md")
|
| 14 |
+
nlp = spacy.load("en_core_web_md")
|
| 15 |
|
| 16 |
+
# Step 3: Hardcode the FAQ data within the code
|
| 17 |
+
faqs = {
|
| 18 |
+
'Admissions': [
|
| 19 |
+
{
|
| 20 |
+
'question': 'What is the process for admission into Saras AI Institute?',
|
| 21 |
+
'answer': 'The admission process involves submitting the online application form, followed by a pre-enrollment assessment.'
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
'question': 'Is there an application fee for applying to Saras AI Institute?',
|
| 25 |
+
'answer': 'There is no application fee for applying to any program at Saras.'
|
| 26 |
+
}
|
| 27 |
+
],
|
| 28 |
+
'Curriculum and Faculty': [
|
| 29 |
+
{
|
| 30 |
+
'question': 'What is the curriculum like at Saras AI Institute?',
|
| 31 |
+
'answer': 'The curriculum prepares students for roles like AI/ML Engineer, Data Scientist, and Gen AI Engineer.'
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
'question': 'Do you also conduct LIVE sessions?',
|
| 35 |
+
'answer': 'Yes, live sessions are conducted regularly to provide interactive learning and Q&A.'
|
| 36 |
+
}
|
| 37 |
+
]
|
| 38 |
+
}
|
| 39 |
|
| 40 |
+
# Step 4: Precompute vectors for FAQ questions
|
| 41 |
faq_docs = []
|
| 42 |
for category, faq_list in faqs.items():
|
| 43 |
for faq in faq_list:
|
| 44 |
question = faq['question']
|
| 45 |
answer = faq['answer']
|
| 46 |
+
faq_vector = nlp(question).vector # Precompute vector
|
| 47 |
+
faq_docs.append((question, answer, faq_vector))
|
| 48 |
|
| 49 |
# Step 5: Define the function to find the most relevant FAQs
|
| 50 |
def find_most_relevant_faq_optimized(query, faq_docs):
|
|
|
|
| 81 |
st.write(f"**Similarity Score:** {score:.2f}")
|
| 82 |
else:
|
| 83 |
st.write("Please enter a query to search for relevant FAQs.")
|
|
|