simran40 commited on
Commit
1a38637
·
verified ·
1 Parent(s): b337e60

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -65
app.py CHANGED
@@ -1,70 +1,91 @@
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
 
 
67
 
 
68
 
69
- if __name__ == "__main__":
70
- demo.launch()
 
1
+ import json
2
+ import difflib
3
  import gradio as gr
4
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # -------------------------------------------------
7
+ # Load Knowledge Base
8
+ # -------------------------------------------------
9
+ with open("destinations.json", "r", encoding="utf-8") as f:
10
+ DESTS = json.load(f)
11
+
12
+ DEST_NAMES = [d["name"] for d in DESTS]
13
+
14
+ def retrieve_destinations(query, n=3):
15
+ query = query.lower()
16
+ exact = [
17
+ d for d in DESTS
18
+ if d["name"].lower() in query or any(tag.lower() in query for tag in d.get("tags", []))
19
+ ]
20
+
21
+ if exact:
22
+ return exact[:n]
23
+
24
+ matches = difflib.get_close_matches(query, DEST_NAMES, n=n, cutoff=0.4)
25
+ return [d for d in DESTS if d["name"] in matches]
26
+
27
+
28
+ def build_prompt(user_message: str, retrieved):
29
+ kb_text = ""
30
+ if retrieved:
31
+ details = []
32
+ for d in retrieved:
33
+ details.append(
34
+ f"{d['name']} — {d['summary']}\n"
35
+ f"Top attractions: {', '.join(d['top_attractions'])}\n"
36
+ f"Best months: {d['best_months']}"
37
+ )
38
+ kb_text = "Destination Knowledge:\n" + "\n\n".join(details) + "\n\n"
39
+
40
+ return (
41
+ f"{kb_text}"
42
+ f"User Query: \"{user_message}\"\n"
43
+ "You are a helpful travel guide. Provide:\n"
44
+ "- Best suited destination(s)\n"
45
+ "- Why it matches the user's need\n"
46
+ "- Best time to visit\n"
47
+ "- 2–3 activities to do\n"
48
+ "- Short travel/safety tips"
49
+ )
50
+
51
+
52
+ # -------------------------------------------------
53
+ # Load CPU-friendly Model
54
+ # -------------------------------------------------
55
+ MODEL = "facebook/blenderbot-400M-distill"
56
+
57
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
58
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL) # CPU ONLY → no .to('cuda')
59
+
60
+
61
+ def chatbot_reply(message, history):
62
+ retrieved = retrieve_destinations(message)
63
+ prompt = build_prompt(message, retrieved)
64
+
65
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
66
+ outputs = model.generate(
67
+ **inputs,
68
+ max_new_tokens=200,
69
+ do_sample=True,
70
+ top_p=0.9,
71
+ temperature=0.6
72
+ )
73
+ reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
74
+
75
+ history.append((message, reply))
76
+ return history, history
77
+
78
+
79
+ # -------------------------------------------------
80
+ # Gradio UI
81
+ # -------------------------------------------------
82
  with gr.Blocks() as demo:
83
+ gr.Markdown("# 🌍 Travel Recommendation Chatbot (CPU Only)")
84
+
85
+ chatbox = gr.Chatbot()
86
+ txt = gr.Textbox(placeholder="Ask: 'Where should I travel in winter for beaches?'", label="Your Message")
87
+ state = gr.State([])
88
 
89
+ txt.submit(chatbot_reply, [txt, state], [chatbox, state])
90
 
91
+ demo.launch()