Wikiyatra / streamlit_app.py
Srujit24's picture
Rename src/streamlit_app.py to streamlit_app.py
2d01d74 verified
import streamlit as st
import requests
from PIL import Image
import io
import json
import os
from dotenv import load_dotenv
import googlemaps
import folium
from streamlit_folium import st_folium
import polyline
import time
# Load environment variables from .env file (if it exists)
load_dotenv()
# --- Configuration ---
WIKIPEDIA_API_BASE_URL_PREFIX = ".wikipedia.org/w/api.php"
ENGLISH_SUMMARY_SENTENCES = 5 # For English, we still want concise default
WIKIMEDIA_HEADERS = {
'User-Agent': 'WikiYatra/1.0 (vaishnavi.maddali.project@example.com)' # IMPORTANT: Customize this!
}
# --- Mapping & Services Configuration ---
Maps_API_KEY = os.getenv("Maps_API_KEY")
# Initialize Google Maps client IF API key is available
gmaps = googlemaps.Client(key=Maps_API_KEY) if Maps_API_KEY else None
# --- LLM Chatbot Configuration (Hugging Face) ---
HF_API_KEY = os.getenv("HF_API_KEY") # New env var for Hugging Face API Token
LLM_MODEL_TYPE = None # Will indicate if Hugging Face is used
HF_INFERENCE_API_URL = "https://api-inference.huggingface.co/models/"
# Chosen Hugging Face model. Mistral-7B-Instruct-v0.3 is often better for instruction following.
HF_MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.3"
# Note: Larger models might be slower or incur cost.
if HF_API_KEY and HF_MODEL_ID:
LLM_MODEL_TYPE = "huggingface_inference"
else:
pass # Warning handled later in UI display
REQUEST_TIMEOUT = 30 # For HTTP requests
API_REQUEST_DELAY = 1.5 # Rate limit delay for free APIs
# -------------------------------------------------------
# Helper Functions
# -------------------------------------------------------
@st.cache_data(ttl=3600)
def fetch_wikipedia_content(query, lang="en", is_fallback_attempt=False):
"""
Fetches a summary, image, and page URL from Wikipedia for a given query.
Prioritizes the selected language, attempts search fallback, then English fallback.
For non-English languages, attempts to fetch full extract for LLM summarization.
"""
api_url = f"https://{lang}{WIKIPEDIA_API_BASE_URL_PREFIX}"
extract_params = {
"explaintext": True,
"redirects": 1,
}
if lang == "en":
extract_params["exsentences"] = ENGLISH_SUMMARY_SENTENCES
else:
extract_params["exlimit"] = 1 # Fetch the first extract, which is often the introduction
params_title = {
"action": "query",
"format": "json",
"prop": "extracts|pageimages|info",
"pithumbsize": 300,
"inprop": "url",
"titles": query,
"uselang": lang,
**extract_params
}
try:
response = requests.get(api_url, params=params_title, headers=WIKIMEDIA_HEADERS, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
data = response.json()
pages = data.get("query", {}).get("pages", {})
if pages and list(pages.keys())[0] != '-1':
page_id = next(iter(pages))
page = pages[page_id]
summary = page.get("extract")
image_url = page.get("thumbnail", {}).get("source")
full_url = page.get("fullurl")
if summary:
# If not English, and summary is available, try to ensure paragraph length via LLM
if lang != "en" and LLM_MODEL_TYPE == "huggingface_inference":
processed_summary = ai_summarize(summary, target_lang=lang)
else:
processed_summary = summary
return processed_summary, image_url, full_url
except requests.exceptions.RequestException as e:
st.warning(f"Network error during direct title lookup for '{query}' in {lang.upper()}: {e}")
except Exception as e:
st.warning(f"Error during direct title lookup for '{query}' in {lang.upper()}: {e}")
# --- Attempt 2: Search for the query within the target language Wikipedia ---
if lang != "en" or is_fallback_attempt:
params_search = {
"action": "query",
"format": "json",
"list": "search",
"srsearch": query,
"srlimit": 1,
"srprop": "snippet|titlesnippet",
"uselang": lang
}
try:
response_search = requests.get(api_url, params=params_search, headers=WIKIMEDIA_HEADERS, timeout=REQUEST_TIMEOUT)
response_search.raise_for_status()
data_search = response_search.json()
search_results = data_search.get("query", {}).get("search", [])
if search_results:
found_title = search_results[0].get("title")
if found_title:
st.info(f"Found '{found_title}' via search in {lang.upper()}. Fetching details...")
return fetch_wikipedia_content(found_title, lang=lang, is_fallback_attempt=True)
except requests.exceptions.RequestException as e:
st.warning(f"Network error during search lookup for '{query}' in {lang.upper()}: {e}")
except Exception as e:
st.warning(f"Error during search lookup for '{query}' in {lang.upper()}: {e}")
# --- Attempt 3: Fallback to English Wikipedia ---
if lang != "en" and not is_fallback_attempt:
st.warning(f"No direct or search results found for '{query}' in {lang.upper()}. Attempting to fetch from English Wikipedia.")
return fetch_wikipedia_content(query, "en", is_fallback_attempt=True)
return None, None, None
def query_huggingface_api(payload, model_id=HF_MODEL_ID):
"""Sends a payload to the Hugging Face Inference API."""
if not HF_API_KEY:
st.error("Hugging Face API Key is not set. Cannot query model.")
return None
API_URL = f"{HF_INFERENCE_API_URL}{model_id}"
headers = {"Authorization": f"Bearer {HF_API_KEY}"}
try:
response = requests.post(API_URL, headers=headers, json=payload, timeout=90) # Increased timeout
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
return response.json()
except requests.exceptions.Timeout:
st.error(f"Hugging Face API request timed out for model {model_id}. The model might be loading or too busy. Please try again.")
return None
except requests.exceptions.RequestException as e:
st.error(f"Error querying Hugging Face API for model {model_id}: {e}")
if e.response is not None:
st.error(f"Response content: {e.response.text}")
if "loading" in e.response.text.lower() or "too many requests" in e.response.text.lower() or "service unavailable" in e.response.text.lower():
st.warning("Hugging Face model is currently loading or busy. Please try again in a moment. This is common for free tier models.")
return None
def ai_summarize(text, target_lang="en"):
"""
Summarizes text using Hugging Face LLM, aiming for at least one paragraph.
Only uses LLM if it's configured.
"""
if not text:
return "No content available for summarization."
if LLM_MODEL_TYPE == "huggingface_inference":
try:
# Mistral-7B-Instruct-v0.3 uses specific prompt format for instructions
prompt_text = f"<s>[INST] Summarize the following text into at least one coherent paragraph in {target_lang}. Ensure the summary is informative and captures the main points. If the text is too short to form a full paragraph, just return the text as is. \nText to summarize:\n{text} [/INST]"
payload = {
"inputs": prompt_text,
"parameters": {
"max_new_tokens": 300, # Increased tokens for better summaries
"temperature": 0.7,
"do_sample": True,
"return_full_text": False, # This should prevent the prompt from being returned
"pad_token_id": 2 # Some models like Mistral need this for generation to stop cleanly
}
}
response_data = query_huggingface_api(payload, model_id=HF_MODEL_ID)
if response_data and isinstance(response_data, list) and response_data[0].get('generated_text'):
# Post-process to remove any lingering prompt elements if the model returns them
generated_text = response_data[0]['generated_text'].strip()
# Some models might repeat the input or instructions, try to strip them
if generated_text.lower().startswith("summary:"):
generated_text = generated_text[len("summary:"):].strip()
if generated_text.lower().startswith("here's a summary:"):
generated_text = generated_text[len("here's a summary:"):].strip()
return generated_text
else:
st.warning("Hugging Face API response unexpected for summarization. Displaying raw content.")
return text
except Exception as e:
st.warning(f"AI summarization failed: {e}. Displaying raw content.")
return text
else:
return text
@st.cache_data(ttl=3600)
def get_coordinates(location_name):
"""
Fetch coordinates for a given location using Nominatim API.
"""
location_name = location_name.strip()
if not location_name:
st.error("Location name cannot be empty.")
return None
url = "https://nominatim.openstreetmap.org/search"
params = {
'q': f"{location_name}, India",
'format': 'json',
'limit': 1,
'countrycodes': 'in',
'addressdetails': 1
}
try:
time.sleep(API_REQUEST_DELAY)
response = requests.get(url, params=params, headers=WIKIMEDIA_HEADERS, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
data = response.json()
if data:
return float(data[0]['lat']), float(data[0]['lon'])
else:
st.warning(f"No results found for '{location_name}'. Please try a more specific name.")
return None
except requests.exceptions.Timeout:
st.error(f"Request timed out for '{location_name}'. Please try again.")
except requests.exceptions.HTTPError as http_err:
if response.status_code == 429:
st.error(f"Rate limit exceeded for geocoding '{location_name}'. Please wait a moment and try again.")
else:
st.error(f"HTTP error occurred during geocoding for '{location_name}': {http_err}")
except requests.exceptions.RequestException as req_err:
st.error(f"Network error occurred during geocoding for '{location_name}': {req_err}")
except json.JSONDecodeError:
st.error(f"Invalid response format from geocoding service for '{location_name}'.")
except Exception as e:
st.error(f"Unexpected error geocoding '{location_name}': {e}")
return None
@st.cache_data(ttl=3600)
def get_route(start_coords, end_coords):
"""
Fetch driving route between two coordinates using OSRM API.
"""
if not start_coords or not end_coords:
return None, None, None
url = f"https://router.project-osrm.org/route/v1/driving/{start_coords[1]},{start_coords[0]};{end_coords[1]},{end_coords[0]}"
params = {
'overview': 'full',
'geometries': 'polyline',
'steps': 'false'
}
try:
time.sleep(API_REQUEST_DELAY)
response = requests.get(url, params=params, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
data = response.json()
if 'routes' in data and data['routes']:
route = data['routes'][0]
distance_km = route['distance'] / 1000
duration_hours = route['duration'] / 3600
return distance_km, duration_hours, route.get('geometry')
else:
st.warning("No route found between the specified locations.")
return None, None, None
except requests.exceptions.Timeout:
st.error("Route request timed out. Please try again.")
except requests.exceptions.HTTPError as http_err:
if response.status_code == 429:
st.error("Rate limit exceeded for routing. Please wait a moment and try again.")
else:
st.error(f"HTTP error occurred fetching route: {http_err}")
except requests.exceptions.RequestException as req_err:
st.error(f"Network error occurred fetching route: {req_err}")
except json.JSONDecodeError:
st.error("Invalid response format from routing service.")
except Exception as e:
st.error(f"Unexpected error fetching route: {e}")
return None, None, None
@st.cache_data(ttl=3600)
def get_places_nearby(lat, lon, place_type="food", radius=5000):
"""
Fetch nearby places of interest using Overpass API.
"""
osm_amenity_map = {
"food": "restaurant|cafe|fast_food",
"hotels": "hotel|guest_house|motel",
"fuel": "fuel"
}
amenity_query = osm_amenity_map.get(place_type, "restaurant")
overpass_url = "https://overpass-api.de/api/interpreter"
overpass_query = f"""
[out:json][timeout:30];
(
node["amenity"~"{amenity_query}"](around:{radius},{lat},{lon});
way["amenity"~"{amenity_query}"](around:{radius},{lat},{lon});
relation["amenity"~"{amenity_query}"](around:{radius},{lat},{lon});
);
out center;
"""
try:
time.sleep(API_REQUEST_DELAY)
response = requests.get(overpass_url, params={'data': overpass_query}, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
data = response.json()
places = []
if data.get("elements"):
for el in data["elements"]:
name = el.get("tags", {}).get("name")
if name:
places.append(name)
elif el.get("tags", {}).get("amenity"):
places.append(el["tags"]["amenity"].replace('_', ' ').title())
elif el.get("tags", {}).get("shop"):
places.append(el["tags"]["shop"].replace('_', ' ').title())
return places[:10] # Limit to top 10 results for better UX
except requests.exceptions.Timeout:
st.error(f"Request timed out while searching for {place_type}. Please try again.")
except requests.exceptions.HTTPError as http_err:
if response.status_code == 429:
st.error(f"Rate limit exceeded while searching for {place_type}. Please wait and try again.")
else:
st.error(f"HTTP error occurred finding {place_type}: {http_err}")
except requests.exceptions.RequestException as req_err:
st.error(f"Network error occurred finding {place_type}: {req_err}")
except json.JSONDecodeError:
st.error(f"Invalid response format while searching for {place_type}.")
except Exception as e:
st.error(f"Unexpected error finding {place_type}: {e}")
return []
def format_duration(hours):
"""Format duration in hours to human-readable format."""
if hours <= 0:
return "Less than a minute"
total_minutes = int(hours * 60)
hh = total_minutes // 60
mm = total_minutes % 60
if hh > 0:
return f"{hh}h {mm}m" if mm > 0 else f"{hh}h"
else:
return f"{mm}m"
# -------------------------------------------------------
# Streamlit UI Setup
# -------------------------------------------------------
st.set_page_config(
page_title="WikiYatra - Smart Cultural Travel Assistant",
page_icon="🧭",
layout="centered",
initial_sidebar_state="auto"
)
st.title("🧭 WikiYatra: Your Smart Cultural Travel Assistant")
st.markdown("Explore India's culturally rich destinations with ease and depth.")
# Tabs
tab_info, tab_route, tab_chat = st.tabs(["🏛 Destination Info", "🗺 Route Planner", "💬 AI Chatbot"])
# ----------- Destination Info Tab -----------
with tab_info:
st.header("🌐 Place Search")
st.sidebar.header("Settings")
selected_language = st.sidebar.selectbox(
"Select Content Language:",
options=["English", "Hindi", "Telugu", "Tamil", "Kannada", "Malayalam", "Gujarati", "Bengali", "Marathi", "Punjabi"],
format_func=lambda x: {
"English": "English (en)",
"Hindi": "हिन्दी (hi)",
"Telugu": "తెలుగు (te)",
"Tamil": "தமிழ் (ta)",
"Kannada": "ಕನ್ನಡ (kn)",
"Malayalam": "മലയാളം (ml)",
"Gujarati": "ગુજરાતી (gu)",
"Bengali": "বাংলা (bn)",
"Marathi": "मराठी (mr)",
"Punjabi": "ਪੰਜਾਬੀ (pa)"
}[x]
)
language_codes = {
"English": "en",
"Hindi": "hi",
"Telugu": "te",
"Tamil": "ta",
"Kannada": "kn",
"Malayalam": "ml",
"Gujarati": "gu",
"Bengali": "bn",
"Marathi": "mr",
"Punjabi": "pa"
}
current_lang_code = language_codes[selected_language]
place_query = st.text_input("Enter any Indian location (e.g., 'Taj Mahal', 'Hampi', 'Hyderabad'):", "")
if st.button("Search"):
if place_query:
if current_lang_code != "en" and LLM_MODEL_TYPE != "huggingface_inference":
st.warning("AI summarization for non-English languages requires a configured LLM (Hugging Face API). Please set HF_API_KEY.")
with st.spinner(f"Fetching information for {place_query} in {selected_language}..."):
summary, image_url, full_url = fetch_wikipedia_content(place_query, lang=current_lang_code)
if summary:
st.subheader(f"✨ Information about {place_query}")
if image_url:
try:
image_response = requests.get(image_url, headers=WIKIMEDIA_HEADERS, timeout=REQUEST_TIMEOUT)
image_response.raise_for_status()
image = Image.open(io.BytesIO(image_response.content))
st.image(image, caption=f"Image of {place_query}", use_container_width=True)
except requests.exceptions.RequestException as e:
st.warning(f"Could not load image: {e}. Ensure you comply with Wikimedia's User-Agent policy.")
except Exception as e:
st.warning(f"Error processing image: {e}")
st.markdown("---")
st.subheader("📝 Summary:")
st.write(summary)
if full_url:
st.markdown(f"[Read full article on Wikipedia]({full_url})")
else:
st.warning(f"No detailed information found for '{place_query}' in the selected language or English fallback. Please try a different query or language.")
else:
st.info("Please enter a location to search.")
# ----------- Route Planner Tab -----------
with tab_route:
st.header("🗺 India Route Planner")
st.markdown("""
Plan your journey across India using OpenStreetMap services.
Enter your start and end locations to get route information and nearby points of interest.
""")
col1, col2 = st.columns(2)
with col1:
start_location = st.text_input("Start Location", value="Hyderabad, Telangana", help="Enter city, state format")
with col2:
end_location = st.text_input("Destination", value="Hampi, Karnataka", help="Enter city, state format")
if st.button("🚗 Calculate Route", type="primary"):
if not start_location.strip() or not end_location.strip():
st.error("Please enter both start and destination locations.")
st.stop()
with st.spinner("Planning your route... This may take a few moments."):
# Get coordinates
start_coords = get_coordinates(start_location)
if not start_coords:
st.error(f"Could not find coordinates for '{start_location}'. Please try a different location name.")
st.stop()
end_coords = get_coordinates(end_location)
if not end_coords:
st.error(f"Could not find coordinates for '{end_location}'. Please try a different location name.")
st.stop()
# Get route
distance, duration, route_geometry = get_route(start_coords, end_coords)
if distance is None or duration is None or route_geometry is None:
st.error("Could not calculate route between the specified locations. Please try different locations.")
st.stop()
# Display results
st.success("🎉 Your route is ready!")
# Route summary
st.subheader("📍 Route Summary")
col1s, col2s, col3s = st.columns(3)
with col1s:
st.metric("Distance", f"{distance:.1f} km")
with col2s:
st.metric("Duration", format_duration(duration))
with col3s:
avg_speed = distance / duration if duration > 0 else 0
st.metric("Avg Speed", f"{avg_speed:.0f} km/h")
# Places of interest
st.subheader("🍽 Services Near Destination")
st.info(f"Searching within 5km of {end_location.split(',')[0].strip()}...")
tab1, tab2, tab3 = st.tabs(["🍴 Food", "🏨 Hotels", "⛽ Fuel"])
with tab1:
with st.spinner("Finding food places..."):
food_places = get_places_nearby(end_coords[0], end_coords[1], "food")
if food_places:
st.write("*Food places nearby:*")
for place in food_places:
st.write(f"• {place}")
else:
st.info("No food places found nearby.")
with tab2:
with st.spinner("Finding hotels..."):
hotel_places = get_places_nearby(end_coords[0], end_coords[1], "hotels")
if hotel_places:
st.write("*Hotels nearby:*")
for place in hotel_places:
st.write(f"• {place}")
else:
st.info("No hotels found nearby.")
with tab3:
with st.spinner("Finding fuel stations..."):
fuel_places = get_places_nearby(end_coords[0], end_coords[1], "fuel")
if fuel_places:
st.write("*Fuel stations nearby:*")
for place in fuel_places:
st.write(f"• {place}")
else:
st.info("No fuel stations found nearby.")
# Map display
st.subheader("🗺 Route Map")
try:
# Decode polyline
coords = polyline.decode(route_geometry)
if not coords:
st.error("Could not decode route geometry for map display.")
st.stop()
# Create map
center_lat = (start_coords[0] + end_coords[0]) / 2
center_lon = (start_coords[1] + end_coords[1]) / 2
m = folium.Map(location=[center_lat, center_lon], zoom_start=8)
# Add markers
folium.Marker(
start_coords,
tooltip=f"Start: {start_location}",
popup=f"<b>Start:</b><br>{start_location}",
icon=folium.Icon(color='green', icon='play')
).add_to(m)
folium.Marker(
end_coords,
tooltip=f"Destination: {end_location}",
popup=f"<b>Destination:</b><br>{end_location}",
icon=folium.Icon(color='red', icon='stop')
).add_to(m)
# Add route line
folium.PolyLine(
coords,
color='blue',
weight=4,
opacity=0.8,
tooltip="Your Route"
).add_to(m)
# Fit bounds to route
bounds = [[min(p[0] for p in coords), min(p[1] for p in coords)],
[max(p[0] for p in coords), max(p[1] for p in coords)]]
m.fit_bounds(bounds, padding=(20, 20))
# Display map
st_folium(m, width=700, height=500, returned_objects=["last_clicked"])
except Exception as e:
st.error(f"Could not display map: {e}")
st.info("Route calculated successfully, but map display failed.")
# Footer
st.markdown("---")
st.caption("""
*Data Sources:* [OpenStreetMap](https://www.openstreetmap.org/) •
[Nominatim](https://nominatim.openstreetmap.org/) •
[OSRM](http://project-osrm.org/) •
[Overpass API](https://overpass-api.de/)
""")
# Usage notes
with st.expander("ℹ Usage Tips"):
st.markdown("""
- *Location format*: Use "City, State" format for best results (e.g., "Mumbai, Maharashtra")
- *API limitations*: This uses free public APIs which may have rate limits
- *Accuracy*: Routes are approximate and for planning purposes only
- *Coverage*: Works best for locations within India
""")
# ----------- AI Chatbot Tab -----------
with tab_chat:
st.header("💬 AI Chatbot (Powered by Hugging Face & Wikipedia)")
st.write("Ask me questions about Indian places! For example: 'Tell me about Taj Mahal', 'What can I see in Hampi?', 'Where is Hyderabad?'")
if LLM_MODEL_TYPE != "huggingface_inference":
st.warning("Chatbot is disabled: Hugging Face API Key or Model ID not set. Please set HF_API_KEY environment variable and ensure HF_MODEL_ID is valid.")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Ask me anything about Indian places..."):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
if LLM_MODEL_TYPE != "huggingface_inference":
full_response_text = "Chatbot is unavailable due to missing API key or model configuration."
st.markdown(full_response_text)
st.session_state.messages.append({"role": "assistant", "content": full_response_text})
else:
with st.spinner("Thinking..."):
full_response_text = "I'm sorry, I couldn't find information about that. Please try rephrasing or asking about a well-known Indian place."
try:
# Step 1: Use HF model for place name extraction
place_name_extraction_prompt = (
f"Identify the primary Indian tourist destination or place mentioned in the following question. "
f"If no specific Indian place is clearly mentioned, respond with 'NONE'. "
f"Your response should be only the extracted place name or 'NONE'.\n\n"
f"Question: '{prompt}'\n"
f"Place Name:"
)
place_name_payload = {
"inputs": f"<s>[INST] {place_name_extraction_prompt} [/INST]", # Mistral instruction format
"parameters": {
"max_new_tokens": 20,
"temperature": 0.1,
"do_sample": False,
"return_full_text": False,
"pad_token_id": 2
}
}
place_name_response_data = query_huggingface_api(place_name_payload, model_id=HF_MODEL_ID)
place_name = "NONE"
if place_name_response_data and isinstance(place_name_response_data, list) and place_name_response_data[0].get('generated_text'):
extracted_name = place_name_response_data[0]['generated_text'].strip()
extracted_name = extracted_name.split('\n')[0].strip()
if extracted_name.lower() != 'none':
place_name = extracted_name
else:
place_name = "NONE"
wikipedia_summary = None
if place_name and place_name.lower() != 'none':
wikipedia_summary, _, _ = fetch_wikipedia_content(place_name, lang="en")
# Step 2: Use HF model for general response based on Wikipedia summary
if wikipedia_summary:
llm_prompt = (
f"You are a helpful AI assistant specializing in Indian travel destinations. "
f"The user asked: '{prompt}'. "
f"Here is some information about '{place_name}' from Wikipedia:\n\n"
f"{wikipedia_summary}\n\n"
f"Please answer the user's question concisely, using only the provided information if possible. "
f"If the information is not directly relevant, provide general known facts about {place_name}. "
f"Start with 'Certainly! ' or 'Here's what I know about {place_name}:'. "
f"End with a call-to-action or a question to encourage further conversation. "
f"Ensure proper grammar, punctuation, and spelling. Avoid jargon.\n"
f"Answer:"
)
else:
llm_prompt = (
f"You are a helpful AI assistant specializing in Indian travel destinations. "
f"The user asked: '{prompt}'. "
f"I couldn't find detailed Wikipedia information for the place mentioned, or no specific Indian place was identified. "
f"Please provide a polite and helpful response, suggesting that you can answer questions about known Indian landmarks. "
f"Keep it concise. Start with 'I'm sorry, but...' or 'I can't find specific details for that...'. "
f"End with a friendly prompt for more questions.\n"
f"Answer:"
)
llm_answer_payload = {
"inputs": f"<s>[INST] {llm_prompt} [/INST]", # Mistral instruction format
"parameters": {
"max_new_tokens": 300,
"temperature": 0.8,
"do_sample": True,
"return_full_text": False,
"pad_token_id": 2
}
}
llm_answer_response_data = query_huggingface_api(llm_answer_payload, model_id=HF_MODEL_ID)
if llm_answer_response_data and isinstance(llm_answer_response_data, list) and llm_answer_response_data[0].get('generated_text'):
generated_response = llm_answer_response_data[0]['generated_text'].strip()
if generated_response.lower().startswith("answer:"):
generated_response = generated_response[len("answer:"):].strip()
full_response_text = generated_response
else:
full_response_text = "I couldn't generate a response from the Hugging Face model. The model might be busy or did not provide a valid answer. Please try again or rephrase your question."
except Exception as e:
full_response_text = f"An unexpected error occurred while processing your request: {e}. Please try again or rephrase your question."
st.markdown(full_response_text)
st.session_state.messages.append({"role": "assistant", "content": full_response_text})
st.markdown("---")
st.caption("Powered by Wikimedia APIs, OpenStreetMap services, and Hugging Face Inference API.")