muddasser commited on
Commit
add1b67
Β·
verified Β·
1 Parent(s): a4f2ddb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -133
app.py CHANGED
@@ -1,134 +1,122 @@
1
- import streamlit as st
2
- import pandas as pd
3
- import pypdf
4
- import docx2txt
5
- from typing import List
6
- from transformers import pipeline
7
- from langchain_community.vectorstores import FAISS
8
- from langchain_community.embeddings import HuggingFaceEmbeddings
9
- from langchain_text_splitters import RecursiveCharacterTextSplitter
10
- from langchain_core.documents import Document
11
- import evaluate
12
-
13
- # ======================================
14
- # 🧩 Streamlit Title
15
- # ======================================
16
- st.set_page_config(page_title="RAG Chatbot with Grok 70B", page_icon="πŸ“š", layout="centered")
17
- st.title("πŸ“š RAG Chatbot powered by Grok 70B")
18
-
19
- # ======================================
20
- # πŸ”Ή Load Model (HF Hosted)
21
- # ======================================
22
- @st.cache_resource
23
- def load_model():
24
- """
25
- Load Grok 70B model via Hugging Face pipeline (hosted inference)
26
- """
27
- model_name = "GrokAI/llama-3.3-70b-versatile"
28
- generator = pipeline(
29
- "text-generation",
30
- model=model_name,
31
- device_map="auto", # uses GPU if available, else CPU
32
- trust_remote_code=True # required for Grok models
33
  )
34
- return generator
35
-
36
- with st.spinner("πŸ”„ Loading Grok 70B model (hosted)..."):
37
- generator = load_model()
38
-
39
- # ======================================
40
- # πŸ“‚ File Upload Section
41
- # ======================================
42
- uploaded_file = st.file_uploader("πŸ“€ Upload a file (PDF, DOCX, CSV)", type=["pdf", "docx", "csv"])
43
-
44
- def extract_text(file):
45
- """Extract text from uploaded file"""
46
- if file.type == "application/pdf":
47
- reader = pypdf.PdfReader(file)
48
- return "\n".join([page.extract_text() for page in reader.pages if page.extract_text()])
49
- elif file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
50
- return docx2txt.process(file)
51
- elif file.type == "text/csv":
52
- df = pd.read_csv(file)
53
- return df.to_string(index=False)
54
- return ""
55
-
56
- # ======================================
57
- # 🧠 Build FAISS Vector Store
58
- # ======================================
59
- @st.cache_resource
60
- def build_faiss(_docs: List[Document]):
61
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
62
- return FAISS.from_documents(_docs, embeddings)
63
-
64
- docs, db = [], None
65
- if uploaded_file:
66
- text = extract_text(uploaded_file)
67
- if text:
68
- st.info("πŸ” Splitting and indexing document...")
69
- splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) # increased chunk size
70
- docs = [Document(page_content=chunk) for chunk in splitter.split_text(text)]
71
- db = build_faiss(docs)
72
- st.success("βœ… Knowledge base indexed successfully!")
73
-
74
- # ======================================
75
- # πŸ’¬ Chat Interface
76
- # ======================================
77
- query = st.text_input("πŸ’¬ Ask a question about your document:")
78
-
79
- if query and db:
80
- retriever = db.as_retriever(search_kwargs={"k": 3})
81
- retrieved_docs = retriever.invoke(query) # get top 3 relevant docs
82
- context = "\n".join([doc.page_content for doc in retrieved_docs])
83
-
84
- prompt = f"""
85
- You are a factual assistant. Use ONLY the provided context to answer.
86
- If the context doesn’t contain enough info, say:
87
- "The document does not provide enough information."
88
- Context:
89
- {context}
90
- Question: {query}
91
- Answer:
92
- """
93
-
94
- with st.spinner("πŸ€” Generating answer..."):
95
- result = generator(prompt, max_new_tokens=200, temperature=0.5, top_p=0.9)
96
- generated_text = result[0]["generated_text"]
97
- answer = generated_text.split("Answer:")[-1].strip()
98
-
99
- st.subheader("πŸ“ Answer")
100
- st.write(answer)
101
-
102
- # ======================================
103
- # πŸ”Ž Evaluation Section
104
- # ======================================
105
- st.divider()
106
- st.subheader("πŸ“Š Evaluate Answer Quality")
107
-
108
- reference = st.text_area("βœ… Reference Answer (optional for evaluation):")
109
-
110
- if st.button("Compute BLEU + ROUGE + BERTScore") and reference:
111
- # BLEU
112
- bleu = evaluate.load("bleu")
113
- bleu_score = bleu.compute(predictions=[answer], references=[[reference]])["bleu"]
114
-
115
- # ROUGE
116
- rouge = evaluate.load("rouge")
117
- rouge_result = rouge.compute(predictions=[answer], references=[reference])
118
- rouge1_f = rouge_result["rouge1"]
119
- rouge2_f = rouge_result["rouge2"]
120
- rougel_f = rouge_result["rougeL"]
121
-
122
- # BERTScore
123
- bertscore = evaluate.load("bertscore")
124
- bert = bertscore.compute(predictions=[answer], references=[reference], lang="en")
125
- bert_f1 = sum(bert["f1"]) / len(bert["f1"])
126
-
127
- st.markdown(f"""
128
- **🧩 Evaluation Results:**
129
- - **BLEU:** {bleu_score:.4f}
130
- - **ROUGE-1 F1:** {rouge1_f:.4f}
131
- - **ROUGE-2 F1:** {rouge2_f:.4f}
132
- - **ROUGE-L F1:** {rougel_f:.4f}
133
- - **BERTScore F1:** {bert_f1:.4f}
134
- """)
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from groq import Groq
5
+
6
+ load_dotenv()
7
+
8
+ # ── Agent Runner ─────────────────────────────────────────────────
9
+ def run_agent(client, role, backstory, task):
10
+ response = client.chat.completions.create(
11
+ model="llama-3.3-70b-versatile",
12
+ messages=[
13
+ {"role": "system", "content": f"You are a {role}. {backstory}"},
14
+ {"role": "user", "content": task}
15
+ ],
16
+ temperature=0.7,
17
+ max_tokens=1024,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  )
19
+ return response.choices[0].message.content.strip()
20
+
21
+ # ── Three Agents Sequentially ────────────────────────────────────
22
+ def plan_trip(destination, trip_days, budget_range):
23
+ api_key = os.getenv("GROQ_API_KEY")
24
+ if not api_key:
25
+ return "❌ GROQ_API_KEY not set. Add it in Space Settings β†’ Variables and secrets."
26
+ if not destination.strip():
27
+ return "⚠️ Please enter a destination."
28
+
29
+ client = Groq(api_key=api_key)
30
+
31
+ try:
32
+ # Agent 1 β€” Researcher
33
+ research = run_agent(
34
+ client,
35
+ role="Travel Researcher",
36
+ backstory="You have deep knowledge of global destinations, cultures, and hidden gems.",
37
+ task=(
38
+ f"Research {destination} and provide:\n"
39
+ f"- Top 5 tourist attractions (1-2 lines each)\n"
40
+ f"- 3 hotel recommendations across budget levels\n"
41
+ f"- 5 must-try local foods\n"
42
+ f"- Best time to visit and general travel tips\n"
43
+ f"Be concise and specific."
44
+ )
45
+ )
46
+
47
+ # Agent 2 β€” Planner (receives researcher output)
48
+ itinerary = run_agent(
49
+ client,
50
+ role="Travel Planner",
51
+ backstory="You craft realistic, enjoyable day-by-day itineraries tailored to traveler preferences.",
52
+ task=(
53
+ f"Based on this research about {destination}:\n{research}\n\n"
54
+ f"Create a {trip_days}-day itinerary with Morning, Afternoon, and Evening activities each day. "
55
+ f"Be specific with place names and timings."
56
+ )
57
+ )
58
+
59
+ # Agent 3 β€” Budget Analyst (receives itinerary output)
60
+ budget = run_agent(
61
+ client,
62
+ role="Travel Budget Analyst",
63
+ backstory="You give accurate, realistic travel cost breakdowns in USD.",
64
+ task=(
65
+ f"Based on this {trip_days}-day itinerary for {destination}:\n{itinerary}\n\n"
66
+ f"Provide a full budget for a {budget_range} traveler (1 person) covering:\n"
67
+ f"- Flights (round trip estimate)\n"
68
+ f"- Accommodation (per night Γ— {trip_days} nights)\n"
69
+ f"- Food & Dining\n"
70
+ f"- Activities & Entrance Fees\n"
71
+ f"- Local Transport\n"
72
+ f"- Miscellaneous\n"
73
+ f"- TOTAL ESTIMATED COST\n"
74
+ f"Use USD. Be realistic for the {budget_range} level."
75
+ )
76
+ )
77
+
78
+ return (
79
+ f"# ✈️ {destination} β€” {trip_days}-Day Trip ({budget_range})\n"
80
+ f"{'='*60}\n\n"
81
+ f"## πŸ” Agent 1: Research\n{research}\n\n"
82
+ f"{'='*60}\n\n"
83
+ f"## πŸ—“οΈ Agent 2: Itinerary\n{itinerary}\n\n"
84
+ f"{'='*60}\n\n"
85
+ f"## πŸ’° Agent 3: Budget Breakdown\n{budget}\n"
86
+ )
87
+
88
+ except Exception as e:
89
+ return f"❌ Error: {str(e)}"
90
+
91
+
92
+ # ── Gradio UI ─────────────────────────────────────────────────────
93
+ with gr.Blocks(theme=gr.themes.Soft(), title="Travel Planner AI") as demo:
94
+
95
+ gr.Markdown(
96
+ "# ✈️ Travel Planner AI\n"
97
+ "Three AI agents work sequentially: **Researcher β†’ Planner β†’ Budget Analyst** "
98
+ "(powered by Groq + LLaMA3-70b)"
99
+ )
100
+
101
+ with gr.Row():
102
+ with gr.Column(scale=1):
103
+ destination = gr.Textbox(label="Destination", placeholder="e.g. Paris, Tokyo, Istanbul")
104
+ trip_days = gr.Slider(1, 14, value=5, step=1, label="Number of Days")
105
+ budget_range = gr.Radio(["Budget", "Mid-range", "Luxury"], value="Mid-range", label="Budget Style")
106
+ plan_btn = gr.Button("πŸ—ΊοΈ Plan My Trip", variant="primary")
107
+
108
+ with gr.Column(scale=2):
109
+ output = gr.Textbox(
110
+ label="Your AI-Generated Trip Plan",
111
+ lines=35,
112
+ placeholder="Your trip plan will appear here once the three agents complete their work..."
113
+ )
114
+
115
+ plan_btn.click(fn=plan_trip, inputs=[destination, trip_days, budget_range], outputs=output)
116
+
117
+ gr.Markdown(
118
+ "> πŸ”‘ Powered by [Groq](https://groq.com) (free). "
119
+ "Add your `GROQ_API_KEY` in **Space Settings β†’ Variables and secrets**."
120
+ )
121
+
122
+ demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)