Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,23 +1,35 @@
|
|
| 1 |
import pandas as pd
|
| 2 |
from sentence_transformers import SentenceTransformer
|
| 3 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
-
#
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# Load a pre-trained sentence transformer model
|
|
|
|
| 10 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
|
|
|
| 11 |
|
| 12 |
# Create a combined column for embedding (e.g., title + description + keywords)
|
|
|
|
| 13 |
df['combined_text'] = df['title'] + " " + df['description'] + " " + df['keywords']
|
| 14 |
course_embeddings = model.encode(df['combined_text'].tolist(), convert_to_tensor=True)
|
|
|
|
| 15 |
|
| 16 |
def search_courses(user_query):
|
|
|
|
|
|
|
| 17 |
# Encode the user query
|
| 18 |
query_embedding = model.encode(user_query, convert_to_tensor=True)
|
| 19 |
|
| 20 |
# Compute cosine similarities between the query and each course embedding
|
|
|
|
| 21 |
similarities = cosine_similarity(
|
| 22 |
query_embedding.cpu().detach().numpy().reshape(1, -1),
|
| 23 |
course_embeddings.cpu().detach().numpy()
|
|
@@ -25,9 +37,10 @@ def search_courses(user_query):
|
|
| 25 |
|
| 26 |
# Get indices of top matching courses (top 5 results)
|
| 27 |
top_matches = similarities.argsort()[0][-5:][::-1]
|
| 28 |
-
|
| 29 |
# Retrieve top matching courses
|
| 30 |
results = [{"title": df.iloc[i]["title"], "description": df.iloc[i]["description"]} for i in top_matches]
|
|
|
|
| 31 |
return results
|
| 32 |
|
| 33 |
# Define Gradio function for user interaction
|
|
@@ -36,6 +49,7 @@ def gradio_search(query):
|
|
| 36 |
return results
|
| 37 |
|
| 38 |
# Set up Gradio interface
|
|
|
|
| 39 |
iface = gr.Interface(
|
| 40 |
fn=gradio_search,
|
| 41 |
inputs="text",
|
|
@@ -45,4 +59,8 @@ iface = gr.Interface(
|
|
| 45 |
)
|
| 46 |
|
| 47 |
# Launch the app (for local testing or deploying in Hugging Face Spaces)
|
|
|
|
| 48 |
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import pandas as pd
|
| 2 |
from sentence_transformers import SentenceTransformer
|
| 3 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
+
# Debug: Print start of application
|
| 7 |
+
print("Starting the application...")
|
| 8 |
+
|
| 9 |
+
# Load the dataset from the same directory
|
| 10 |
+
print("Loading dataset...")
|
| 11 |
+
df = pd.read_csv('courses.csv') # Assuming courses.csv is in the same directory as app.py
|
| 12 |
+
print(f"Dataset loaded. Number of rows: {df.shape[0]}")
|
| 13 |
|
| 14 |
# Load a pre-trained sentence transformer model
|
| 15 |
+
print("Loading Sentence Transformer model...")
|
| 16 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 17 |
+
print("Model loaded successfully.")
|
| 18 |
|
| 19 |
# Create a combined column for embedding (e.g., title + description + keywords)
|
| 20 |
+
print("Generating embeddings for courses...")
|
| 21 |
df['combined_text'] = df['title'] + " " + df['description'] + " " + df['keywords']
|
| 22 |
course_embeddings = model.encode(df['combined_text'].tolist(), convert_to_tensor=True)
|
| 23 |
+
print("Embeddings generated successfully.")
|
| 24 |
|
| 25 |
def search_courses(user_query):
|
| 26 |
+
print(f"Received query: {user_query}")
|
| 27 |
+
|
| 28 |
# Encode the user query
|
| 29 |
query_embedding = model.encode(user_query, convert_to_tensor=True)
|
| 30 |
|
| 31 |
# Compute cosine similarities between the query and each course embedding
|
| 32 |
+
print("Calculating cosine similarities...")
|
| 33 |
similarities = cosine_similarity(
|
| 34 |
query_embedding.cpu().detach().numpy().reshape(1, -1),
|
| 35 |
course_embeddings.cpu().detach().numpy()
|
|
|
|
| 37 |
|
| 38 |
# Get indices of top matching courses (top 5 results)
|
| 39 |
top_matches = similarities.argsort()[0][-5:][::-1]
|
| 40 |
+
|
| 41 |
# Retrieve top matching courses
|
| 42 |
results = [{"title": df.iloc[i]["title"], "description": df.iloc[i]["description"]} for i in top_matches]
|
| 43 |
+
print(f"Found {len(results)} results.")
|
| 44 |
return results
|
| 45 |
|
| 46 |
# Define Gradio function for user interaction
|
|
|
|
| 49 |
return results
|
| 50 |
|
| 51 |
# Set up Gradio interface
|
| 52 |
+
print("Setting up Gradio interface...")
|
| 53 |
iface = gr.Interface(
|
| 54 |
fn=gradio_search,
|
| 55 |
inputs="text",
|
|
|
|
| 59 |
)
|
| 60 |
|
| 61 |
# Launch the app (for local testing or deploying in Hugging Face Spaces)
|
| 62 |
+
print("Launching the app...")
|
| 63 |
iface.launch()
|
| 64 |
+
|
| 65 |
+
# Debug: Print end of application
|
| 66 |
+
print("Application launched successfully.")
|