Spaces:
No application file
No application file
Create app.ppy
Browse files
app.ppy
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 3 |
+
from sentence_transformers import SentenceTransformer
|
| 4 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
# FAQ Data
|
| 8 |
+
faq_data = [
|
| 9 |
+
{"question": "What is AI?", "answer": "AI stands for Artificial Intelligence."},
|
| 10 |
+
{"question": "What is IBM Granite?", "answer": "Granite is IBM’s family of foundation models."},
|
| 11 |
+
{"question": "What is Streamlit?", "answer": "Streamlit is a Python library to build interactive web apps."},
|
| 12 |
+
{"question": "What is machine learning?", "answer": "Machine learning is a subset of AI involving data-driven models."}
|
| 13 |
+
]
|
| 14 |
+
|
| 15 |
+
# Load models
|
| 16 |
+
embedder = SentenceTransformer('all-MiniLM-L6-v2')
|
| 17 |
+
tokenizer = AutoTokenizer.from_pretrained("ibm/granite-13b-instruct")
|
| 18 |
+
model = AutoModelForCausalLM.from_pretrained("ibm/granite-13b-instruct", torch_dtype="auto", device_map="auto")
|
| 19 |
+
qa_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 20 |
+
|
| 21 |
+
# Semantic Search
|
| 22 |
+
def find_most_similar_question(user_question, faq_data):
|
| 23 |
+
user_vec = embedder.encode([user_question])
|
| 24 |
+
faq_vecs = embedder.encode([faq["question"] for faq in faq_data])
|
| 25 |
+
similarities = cosine_similarity(user_vec, faq_vecs)[0]
|
| 26 |
+
best_idx = int(np.argmax(similarities))
|
| 27 |
+
return faq_data[best_idx]["question"], faq_data[best_idx]["answer"]
|
| 28 |
+
|
| 29 |
+
# Chatbot logic
|
| 30 |
+
def chatbot(user_question):
|
| 31 |
+
matched_q, matched_ans = find_most_similar_question(user_question, faq_data)
|
| 32 |
+
|
| 33 |
+
prompt = f"""You are a helpful assistant. A user asked: "{user_question}"
|
| 34 |
+
Here is the most relevant question and answer from FAQ:
|
| 35 |
+
Q: {matched_q}
|
| 36 |
+
A: {matched_ans}
|
| 37 |
+
|
| 38 |
+
Now generate a helpful and clear answer based on the above."""
|
| 39 |
+
|
| 40 |
+
output = qa_pipeline(prompt, max_new_tokens=150, do_sample=False)[0]["generated_text"]
|
| 41 |
+
final_response = output[len(prompt):].strip()
|
| 42 |
+
return final_response
|
| 43 |
+
|
| 44 |
+
# Gradio UI
|
| 45 |
+
iface = gr.Interface(fn=chatbot,
|
| 46 |
+
inputs=gr.Textbox(lines=2, placeholder="Ask a question..."),
|
| 47 |
+
outputs="text",
|
| 48 |
+
title="🤖 FAQ Chatbot with IBM Granite",
|
| 49 |
+
description="Ask any question related to the FAQ and get an AI-generated answer using IBM Granite.")
|
| 50 |
+
|
| 51 |
+
if __name__ == "__main__":
|
| 52 |
+
iface.launch()
|