Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from sentence_transformers import SentenceTransformer, util
|
| 3 |
import pandas as pd
|
| 4 |
-
from rapidfuzz import process
|
| 5 |
|
| 6 |
# Pre-download the model
|
| 7 |
model_name = "sentence-transformers/all-MiniLM-L6-v2"
|
|
@@ -20,18 +20,28 @@ data = df['text'].dropna().tolist()
|
|
| 20 |
def autocomplete(query):
|
| 21 |
if not query.strip():
|
| 22 |
return [] # Return empty if query is blank
|
| 23 |
-
|
|
|
|
|
|
|
| 24 |
return "\n".join([match[0] for match in matches]) # Return matches as a multi-line string
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
# Gradio interface
|
| 27 |
with gr.Blocks() as demo:
|
| 28 |
-
gr.Markdown("### Typo-Tolerant Autocomplete")
|
| 29 |
|
| 30 |
-
#
|
| 31 |
-
query = gr.Textbox(label="Start typing for autocomplete")
|
| 32 |
autocomplete_output = gr.Textbox(label="Autocomplete Suggestions", lines=5)
|
| 33 |
|
| 34 |
-
#
|
| 35 |
query.change(fn=autocomplete, inputs=query, outputs=autocomplete_output)
|
| 36 |
|
| 37 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from sentence_transformers import SentenceTransformer, util
|
| 3 |
import pandas as pd
|
| 4 |
+
from rapidfuzz import fuzz, process # Importing valid functions for fuzzy matching
|
| 5 |
|
| 6 |
# Pre-download the model
|
| 7 |
model_name = "sentence-transformers/all-MiniLM-L6-v2"
|
|
|
|
| 20 |
def autocomplete(query):
|
| 21 |
if not query.strip():
|
| 22 |
return [] # Return empty if query is blank
|
| 23 |
+
|
| 24 |
+
# Use fuzz.partial_ratio for fuzzy matching
|
| 25 |
+
matches = process.extract(query, data, scorer=fuzz.partial_ratio, limit=5)
|
| 26 |
return "\n".join([match[0] for match in matches]) # Return matches as a multi-line string
|
| 27 |
|
| 28 |
+
# Semantic search function
|
| 29 |
+
def semantic_search(query):
|
| 30 |
+
if not query.strip():
|
| 31 |
+
return [] # Return empty if query is blank
|
| 32 |
+
query_embedding = model.encode(query, convert_to_tensor=True)
|
| 33 |
+
results = util.semantic_search(query_embedding, embeddings, top_k=5)
|
| 34 |
+
return [data[result['corpus_id']] for result in results[0]]
|
| 35 |
+
|
| 36 |
# Gradio interface
|
| 37 |
with gr.Blocks() as demo:
|
| 38 |
+
gr.Markdown("### Typo-Tolerant Autocomplete & Semantic Search")
|
| 39 |
|
| 40 |
+
# Real-time autocomplete
|
| 41 |
+
query = gr.Textbox(label="Start typing for autocomplete", live=True)
|
| 42 |
autocomplete_output = gr.Textbox(label="Autocomplete Suggestions", lines=5)
|
| 43 |
|
| 44 |
+
# Autocomplete on change
|
| 45 |
query.change(fn=autocomplete, inputs=query, outputs=autocomplete_output)
|
| 46 |
|
| 47 |
demo.launch()
|