Spaces:
Sleeping
Sleeping
| # Step 1: Install necessary libraries (Handled by Hugging Face via 'requirements.txt') | |
| import streamlit as st | |
| import spacy | |
| from spacy.cli import download # To download the model programmatically | |
| import numpy as np | |
| from numpy.linalg import norm | |
| # Step 2: Download the spaCy model if not already installed | |
| try: | |
| nlp = spacy.load("en_core_web_md") | |
| except OSError: | |
| st.warning("Downloading spaCy model 'en_core_web_md'. This may take a few minutes...") | |
| download("en_core_web_md") | |
| nlp = spacy.load("en_core_web_md") | |
| # Step 3: Hardcode the FAQ data within the code | |
| faqs = { | |
| 'Admissions': [ | |
| { | |
| 'question': 'What is the process for admission into Saras AI Institute?', | |
| 'answer': 'The admission process involves submitting the online application form, followed by a pre-enrollment assessment.' | |
| }, | |
| { | |
| 'question': 'Is there an application fee for applying to Saras AI Institute?', | |
| 'answer': 'There is no application fee for applying to any program at Saras.' | |
| } | |
| ], | |
| 'Curriculum and Faculty': [ | |
| { | |
| 'question': 'What is the curriculum like at Saras AI Institute?', | |
| 'answer': 'The curriculum prepares students for roles like AI/ML Engineer, Data Scientist, and Gen AI Engineer.' | |
| }, | |
| { | |
| 'question': 'Do you also conduct LIVE sessions?', | |
| 'answer': 'Yes, live sessions are conducted regularly to provide interactive learning and Q&A.' | |
| } | |
| ] | |
| } | |
| # Step 4: Precompute vectors for FAQ questions | |
| faq_docs = [] | |
| for category, faq_list in faqs.items(): | |
| for faq in faq_list: | |
| question = faq['question'] | |
| answer = faq['answer'] | |
| faq_vector = nlp(question).vector # Precompute vector | |
| faq_docs.append((question, answer, faq_vector)) | |
| # Step 5: Define the function to find the most relevant FAQs | |
| def find_most_relevant_faq_optimized(query, faq_docs): | |
| """Find the top 3 most relevant FAQs based on semantic similarity.""" | |
| query_vector = nlp(query).vector | |
| # Calculate cosine similarity between query and each FAQ | |
| similarities = [ | |
| (question, answer, np.dot(query_vector, faq_vector) / (norm(query_vector) * norm(faq_vector))) | |
| for question, answer, faq_vector in faq_docs | |
| ] | |
| # Sort by similarity score (highest first) | |
| similarities = sorted(similarities, key=lambda x: x[2], reverse=True) | |
| return similarities[:3] # Return top 3 FAQs | |
| # Step 6: Create the Streamlit UI | |
| st.title("Smart FAQ Search - SARAS AI Institute") | |
| st.markdown("### Find Answers to Your Questions Instantly") | |
| # Text input for the user query | |
| query = st.text_input("Enter your question here:") | |
| if query: | |
| # Find the most relevant FAQs | |
| top_faqs = find_most_relevant_faq_optimized(query, faq_docs) | |
| # Display the results | |
| st.markdown("### Top Relevant FAQs:") | |
| for i, (question, answer, score) in enumerate(top_faqs, 1): | |
| st.write(f"**{i}. {question}**") | |
| st.write(f"*Answer:* {answer}") | |
| st.write(f"**Similarity Score:** {score:.2f}") | |
| else: | |
| st.write("Please enter a query to search for relevant FAQs.") | |