|
|
import json |
|
|
import difflib |
|
|
import gradio as gr |
|
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open("destinations.json", "r", encoding="utf-8") as f: |
|
|
DESTS = json.load(f) |
|
|
|
|
|
DEST_NAMES = [d["name"] for d in DESTS] |
|
|
|
|
|
def retrieve_destinations(query, n=3): |
|
|
query = query.lower() |
|
|
exact = [ |
|
|
d for d in DESTS |
|
|
if d["name"].lower() in query or any(tag.lower() in query for tag in d.get("tags", [])) |
|
|
] |
|
|
|
|
|
if exact: |
|
|
return exact[:n] |
|
|
|
|
|
matches = difflib.get_close_matches(query, DEST_NAMES, n=n, cutoff=0.4) |
|
|
return [d for d in DESTS if d["name"] in matches] |
|
|
|
|
|
|
|
|
def build_prompt(user_message: str, retrieved): |
|
|
kb_text = "" |
|
|
if retrieved: |
|
|
details = [] |
|
|
for d in retrieved: |
|
|
details.append( |
|
|
f"{d['name']} β {d['summary']}\n" |
|
|
f"Top attractions: {', '.join(d['top_attractions'])}\n" |
|
|
f"Best months: {d['best_months']}" |
|
|
) |
|
|
kb_text = "Destination Knowledge:\n" + "\n\n".join(details) + "\n\n" |
|
|
|
|
|
return ( |
|
|
f"{kb_text}" |
|
|
f"User Query: \"{user_message}\"\n" |
|
|
"You are a helpful travel guide. Provide:\n" |
|
|
"- Best suited destination(s)\n" |
|
|
"- Why it matches the user's need\n" |
|
|
"- Best time to visit\n" |
|
|
"- 2β3 activities to do\n" |
|
|
"- Short travel/safety tips" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MODEL = "facebook/blenderbot-400M-distill" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL) |
|
|
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL) |
|
|
|
|
|
|
|
|
def chatbot_reply(message, history): |
|
|
retrieved = retrieve_destinations(message) |
|
|
prompt = build_prompt(message, retrieved) |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", truncation=True) |
|
|
outputs = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=200, |
|
|
do_sample=True, |
|
|
top_p=0.9, |
|
|
temperature=0.6 |
|
|
) |
|
|
reply = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
history.append((message, reply)) |
|
|
return history, history |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# π Travel Recommendation Chatbot (CPU Only)") |
|
|
|
|
|
chatbox = gr.Chatbot() |
|
|
txt = gr.Textbox(placeholder="Ask: 'Where should I travel in winter for beaches?'", label="Your Message") |
|
|
state = gr.State([]) |
|
|
|
|
|
txt.submit(chatbot_reply, [txt, state], [chatbox, state]) |
|
|
|
|
|
demo.launch() |
|
|
|